1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 128 static int pf_commit_rules(u_int32_t, int, char *); 129 static int pf_addr_setup(struct pf_kruleset *, 130 struct pf_addr_wrap *, sa_family_t); 131 static void pf_addr_copyout(struct pf_addr_wrap *); 132 static void pf_src_node_copy(const struct pf_ksrc_node *, 133 struct pf_src_node *); 134 #ifdef ALTQ 135 static int pf_export_kaltq(struct pf_altq *, 136 struct pfioc_altq_v1 *, size_t); 137 static int pf_import_kaltq(struct pfioc_altq_v1 *, 138 struct pf_altq *, size_t); 139 #endif /* ALTQ */ 140 141 VNET_DEFINE(struct pf_krule, pf_default_rule); 142 143 #ifdef ALTQ 144 VNET_DEFINE_STATIC(int, pf_altq_running); 145 #define V_pf_altq_running VNET(pf_altq_running) 146 #endif 147 148 #define TAGID_MAX 50000 149 struct pf_tagname { 150 TAILQ_ENTRY(pf_tagname) namehash_entries; 151 TAILQ_ENTRY(pf_tagname) taghash_entries; 152 char name[PF_TAG_NAME_SIZE]; 153 uint16_t tag; 154 int ref; 155 }; 156 157 struct pf_tagset { 158 TAILQ_HEAD(, pf_tagname) *namehash; 159 TAILQ_HEAD(, pf_tagname) *taghash; 160 unsigned int mask; 161 uint32_t seed; 162 BITSET_DEFINE(, TAGID_MAX) avail; 163 }; 164 165 VNET_DEFINE(struct pf_tagset, pf_tags); 166 #define V_pf_tags VNET(pf_tags) 167 static unsigned int pf_rule_tag_hashsize; 168 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 169 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 170 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 171 "Size of pf(4) rule tag hashtable"); 172 173 #ifdef ALTQ 174 VNET_DEFINE(struct pf_tagset, pf_qids); 175 #define V_pf_qids VNET(pf_qids) 176 static unsigned int pf_queue_tag_hashsize; 177 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 178 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 179 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 180 "Size of pf(4) queue tag hashtable"); 181 #endif 182 VNET_DEFINE(uma_zone_t, pf_tag_z); 183 #define V_pf_tag_z VNET(pf_tag_z) 184 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 185 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 186 187 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 188 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 189 #endif 190 191 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 192 unsigned int); 193 static void pf_cleanup_tagset(struct pf_tagset *); 194 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 195 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 196 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 197 static u_int16_t pf_tagname2tag(const char *); 198 static void tag_unref(struct pf_tagset *, u_int16_t); 199 200 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 201 202 struct cdev *pf_dev; 203 204 /* 205 * XXX - These are new and need to be checked when moveing to a new version 206 */ 207 static void pf_clear_all_states(void); 208 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 209 static void pf_killstates(struct pf_kstate_kill *, 210 unsigned int *); 211 static int pf_killstates_row(struct pf_kstate_kill *, 212 struct pf_idhash *); 213 static int pf_killstates_nv(struct pfioc_nv *); 214 static int pf_clearstates_nv(struct pfioc_nv *); 215 static int pf_getstate(struct pfioc_nv *); 216 static int pf_getstatus(struct pfioc_nv *); 217 static int pf_clear_tables(void); 218 static void pf_clear_srcnodes(struct pf_ksrc_node *); 219 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 220 static int pf_keepcounters(struct pfioc_nv *); 221 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 222 223 /* 224 * Wrapper functions for pfil(9) hooks 225 */ 226 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 227 int flags, void *ruleset __unused, struct inpcb *inp); 228 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 229 int flags, void *ruleset __unused, struct inpcb *inp); 230 #ifdef INET 231 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 232 int flags, void *ruleset __unused, struct inpcb *inp); 233 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 234 int flags, void *ruleset __unused, struct inpcb *inp); 235 #endif 236 #ifdef INET6 237 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 238 int flags, void *ruleset __unused, struct inpcb *inp); 239 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 #endif 242 243 static void hook_pf_eth(void); 244 static void hook_pf(void); 245 static void dehook_pf_eth(void); 246 static void dehook_pf(void); 247 static int shutdown_pf(void); 248 static int pf_load(void); 249 static void pf_unload(void); 250 251 static struct cdevsw pf_cdevsw = { 252 .d_ioctl = pfioctl, 253 .d_name = PF_NAME, 254 .d_version = D_VERSION, 255 }; 256 257 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 258 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 259 volatile VNET_DEFINE_STATIC(int, pf_pfil_eth_hooked); 260 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 261 262 /* 263 * We need a flag that is neither hooked nor running to know when 264 * the VNET is "valid". We primarily need this to control (global) 265 * external event, e.g., eventhandlers. 266 */ 267 VNET_DEFINE(int, pf_vnet_active); 268 #define V_pf_vnet_active VNET(pf_vnet_active) 269 270 int pf_end_threads; 271 struct proc *pf_purge_proc; 272 273 struct rmlock pf_rules_lock; 274 struct sx pf_ioctl_lock; 275 struct sx pf_end_lock; 276 277 /* pfsync */ 278 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 279 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 280 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 281 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 282 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 283 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 284 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 285 286 /* pflog */ 287 pflog_packet_t *pflog_packet_ptr = NULL; 288 289 /* 290 * Copy a user-provided string, returning an error if truncation would occur. 291 * Avoid scanning past "sz" bytes in the source string since there's no 292 * guarantee that it's nul-terminated. 293 */ 294 static int 295 pf_user_strcpy(char *dst, const char *src, size_t sz) 296 { 297 if (strnlen(src, sz) == sz) 298 return (EINVAL); 299 (void)strlcpy(dst, src, sz); 300 return (0); 301 } 302 303 static void 304 pfattach_vnet(void) 305 { 306 u_int32_t *my_timeout = V_pf_default_rule.timeout; 307 308 pf_initialize(); 309 pfr_initialize(); 310 pfi_initialize_vnet(); 311 pf_normalize_init(); 312 pf_syncookies_init(); 313 314 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 315 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 316 317 RB_INIT(&V_pf_anchors); 318 pf_init_kruleset(&pf_main_ruleset); 319 320 pf_init_keth(V_pf_keth); 321 322 /* default rule should never be garbage collected */ 323 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 324 #ifdef PF_DEFAULT_TO_DROP 325 V_pf_default_rule.action = PF_DROP; 326 #else 327 V_pf_default_rule.action = PF_PASS; 328 #endif 329 V_pf_default_rule.nr = -1; 330 V_pf_default_rule.rtableid = -1; 331 332 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 333 for (int i = 0; i < 2; i++) { 334 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 335 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 336 } 337 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 338 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 339 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 340 341 #ifdef PF_WANT_32_TO_64_COUNTER 342 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 343 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 344 PF_RULES_WLOCK(); 345 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 346 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 347 V_pf_allrulecount++; 348 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 349 PF_RULES_WUNLOCK(); 350 #endif 351 352 /* initialize default timeouts */ 353 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 354 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 355 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 356 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 357 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 358 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 359 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 360 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 361 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 362 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 363 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 364 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 365 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 366 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 367 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 368 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 369 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 370 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 371 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 372 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 373 374 bzero(&V_pf_status, sizeof(V_pf_status)); 375 V_pf_status.debug = PF_DEBUG_URGENT; 376 377 V_pf_pfil_hooked = 0; 378 V_pf_pfil_eth_hooked = 0; 379 380 /* XXX do our best to avoid a conflict */ 381 V_pf_status.hostid = arc4random(); 382 383 for (int i = 0; i < PFRES_MAX; i++) 384 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 385 for (int i = 0; i < KLCNT_MAX; i++) 386 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 387 for (int i = 0; i < FCNT_MAX; i++) 388 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 389 for (int i = 0; i < SCNT_MAX; i++) 390 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 391 392 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 393 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 394 /* XXXGL: leaked all above. */ 395 return; 396 } 397 398 static struct pf_kpool * 399 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 400 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 401 u_int8_t check_ticket) 402 { 403 struct pf_kruleset *ruleset; 404 struct pf_krule *rule; 405 int rs_num; 406 407 ruleset = pf_find_kruleset(anchor); 408 if (ruleset == NULL) 409 return (NULL); 410 rs_num = pf_get_ruleset_number(rule_action); 411 if (rs_num >= PF_RULESET_MAX) 412 return (NULL); 413 if (active) { 414 if (check_ticket && ticket != 415 ruleset->rules[rs_num].active.ticket) 416 return (NULL); 417 if (r_last) 418 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 419 pf_krulequeue); 420 else 421 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 422 } else { 423 if (check_ticket && ticket != 424 ruleset->rules[rs_num].inactive.ticket) 425 return (NULL); 426 if (r_last) 427 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 428 pf_krulequeue); 429 else 430 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 431 } 432 if (!r_last) { 433 while ((rule != NULL) && (rule->nr != rule_number)) 434 rule = TAILQ_NEXT(rule, entries); 435 } 436 if (rule == NULL) 437 return (NULL); 438 439 return (&rule->rpool); 440 } 441 442 static void 443 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 444 { 445 struct pf_kpooladdr *mv_pool_pa; 446 447 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 448 TAILQ_REMOVE(poola, mv_pool_pa, entries); 449 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 450 } 451 } 452 453 static void 454 pf_empty_kpool(struct pf_kpalist *poola) 455 { 456 struct pf_kpooladdr *pa; 457 458 while ((pa = TAILQ_FIRST(poola)) != NULL) { 459 switch (pa->addr.type) { 460 case PF_ADDR_DYNIFTL: 461 pfi_dynaddr_remove(pa->addr.p.dyn); 462 break; 463 case PF_ADDR_TABLE: 464 /* XXX: this could be unfinished pooladdr on pabuf */ 465 if (pa->addr.p.tbl != NULL) 466 pfr_detach_table(pa->addr.p.tbl); 467 break; 468 } 469 if (pa->kif) 470 pfi_kkif_unref(pa->kif); 471 TAILQ_REMOVE(poola, pa, entries); 472 free(pa, M_PFRULE); 473 } 474 } 475 476 static void 477 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 478 { 479 480 PF_RULES_WASSERT(); 481 PF_UNLNKDRULES_ASSERT(); 482 483 TAILQ_REMOVE(rulequeue, rule, entries); 484 485 rule->rule_ref |= PFRULE_REFS; 486 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 487 } 488 489 static void 490 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 491 { 492 493 PF_RULES_WASSERT(); 494 495 PF_UNLNKDRULES_LOCK(); 496 pf_unlink_rule_locked(rulequeue, rule); 497 PF_UNLNKDRULES_UNLOCK(); 498 } 499 500 static void 501 pf_free_eth_rule(struct pf_keth_rule *rule) 502 { 503 PF_RULES_WASSERT(); 504 505 if (rule == NULL) 506 return; 507 508 if (rule->tag) 509 tag_unref(&V_pf_tags, rule->tag); 510 #ifdef ALTQ 511 pf_qid_unref(rule->qid); 512 #endif 513 514 if (rule->kif) 515 pfi_kkif_unref(rule->kif); 516 517 counter_u64_free(rule->evaluations); 518 for (int i = 0; i < 2; i++) { 519 counter_u64_free(rule->packets[i]); 520 counter_u64_free(rule->bytes[i]); 521 } 522 pf_keth_anchor_remove(rule); 523 524 free(rule, M_PFRULE); 525 } 526 527 void 528 pf_free_rule(struct pf_krule *rule) 529 { 530 531 PF_RULES_WASSERT(); 532 533 if (rule->tag) 534 tag_unref(&V_pf_tags, rule->tag); 535 if (rule->match_tag) 536 tag_unref(&V_pf_tags, rule->match_tag); 537 #ifdef ALTQ 538 if (rule->pqid != rule->qid) 539 pf_qid_unref(rule->pqid); 540 pf_qid_unref(rule->qid); 541 #endif 542 switch (rule->src.addr.type) { 543 case PF_ADDR_DYNIFTL: 544 pfi_dynaddr_remove(rule->src.addr.p.dyn); 545 break; 546 case PF_ADDR_TABLE: 547 pfr_detach_table(rule->src.addr.p.tbl); 548 break; 549 } 550 switch (rule->dst.addr.type) { 551 case PF_ADDR_DYNIFTL: 552 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 553 break; 554 case PF_ADDR_TABLE: 555 pfr_detach_table(rule->dst.addr.p.tbl); 556 break; 557 } 558 if (rule->overload_tbl) 559 pfr_detach_table(rule->overload_tbl); 560 if (rule->kif) 561 pfi_kkif_unref(rule->kif); 562 pf_kanchor_remove(rule); 563 pf_empty_kpool(&rule->rpool.list); 564 565 pf_krule_free(rule); 566 } 567 568 static void 569 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 570 unsigned int default_size) 571 { 572 unsigned int i; 573 unsigned int hashsize; 574 575 if (*tunable_size == 0 || !powerof2(*tunable_size)) 576 *tunable_size = default_size; 577 578 hashsize = *tunable_size; 579 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 580 M_WAITOK); 581 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 582 M_WAITOK); 583 ts->mask = hashsize - 1; 584 ts->seed = arc4random(); 585 for (i = 0; i < hashsize; i++) { 586 TAILQ_INIT(&ts->namehash[i]); 587 TAILQ_INIT(&ts->taghash[i]); 588 } 589 BIT_FILL(TAGID_MAX, &ts->avail); 590 } 591 592 static void 593 pf_cleanup_tagset(struct pf_tagset *ts) 594 { 595 unsigned int i; 596 unsigned int hashsize; 597 struct pf_tagname *t, *tmp; 598 599 /* 600 * Only need to clean up one of the hashes as each tag is hashed 601 * into each table. 602 */ 603 hashsize = ts->mask + 1; 604 for (i = 0; i < hashsize; i++) 605 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 606 uma_zfree(V_pf_tag_z, t); 607 608 free(ts->namehash, M_PFHASH); 609 free(ts->taghash, M_PFHASH); 610 } 611 612 static uint16_t 613 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 614 { 615 size_t len; 616 617 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 618 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 619 } 620 621 static uint16_t 622 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 623 { 624 625 return (tag & ts->mask); 626 } 627 628 static u_int16_t 629 tagname2tag(struct pf_tagset *ts, const char *tagname) 630 { 631 struct pf_tagname *tag; 632 u_int32_t index; 633 u_int16_t new_tagid; 634 635 PF_RULES_WASSERT(); 636 637 index = tagname2hashindex(ts, tagname); 638 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 639 if (strcmp(tagname, tag->name) == 0) { 640 tag->ref++; 641 return (tag->tag); 642 } 643 644 /* 645 * new entry 646 * 647 * to avoid fragmentation, we do a linear search from the beginning 648 * and take the first free slot we find. 649 */ 650 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 651 /* 652 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 653 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 654 * set. It may also return a bit number greater than TAGID_MAX due 655 * to rounding of the number of bits in the vector up to a multiple 656 * of the vector word size at declaration/allocation time. 657 */ 658 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 659 return (0); 660 661 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 662 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 663 664 /* allocate and fill new struct pf_tagname */ 665 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 666 if (tag == NULL) 667 return (0); 668 strlcpy(tag->name, tagname, sizeof(tag->name)); 669 tag->tag = new_tagid; 670 tag->ref = 1; 671 672 /* Insert into namehash */ 673 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 674 675 /* Insert into taghash */ 676 index = tag2hashindex(ts, new_tagid); 677 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 678 679 return (tag->tag); 680 } 681 682 static void 683 tag_unref(struct pf_tagset *ts, u_int16_t tag) 684 { 685 struct pf_tagname *t; 686 uint16_t index; 687 688 PF_RULES_WASSERT(); 689 690 index = tag2hashindex(ts, tag); 691 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 692 if (tag == t->tag) { 693 if (--t->ref == 0) { 694 TAILQ_REMOVE(&ts->taghash[index], t, 695 taghash_entries); 696 index = tagname2hashindex(ts, t->name); 697 TAILQ_REMOVE(&ts->namehash[index], t, 698 namehash_entries); 699 /* Bits are 0-based for BIT_SET() */ 700 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 701 uma_zfree(V_pf_tag_z, t); 702 } 703 break; 704 } 705 } 706 707 static uint16_t 708 pf_tagname2tag(const char *tagname) 709 { 710 return (tagname2tag(&V_pf_tags, tagname)); 711 } 712 713 static int 714 pf_begin_eth(uint32_t *ticket, const char *anchor) 715 { 716 struct pf_keth_rule *rule, *tmp; 717 struct pf_keth_ruleset *rs; 718 719 PF_RULES_WASSERT(); 720 721 rs = pf_find_or_create_keth_ruleset(anchor); 722 if (rs == NULL) 723 return (EINVAL); 724 725 if (rs->inactive.open) 726 /* We may be waiting for NET_EPOCH_CALL(pf_rollback_eth_cb) to 727 * finish. */ 728 return (EBUSY); 729 730 /* Purge old inactive rules. */ 731 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 732 tmp) { 733 TAILQ_REMOVE(rs->inactive.rules, rule, 734 entries); 735 pf_free_eth_rule(rule); 736 } 737 738 *ticket = ++rs->inactive.ticket; 739 rs->inactive.open = 1; 740 741 return (0); 742 } 743 744 static void 745 pf_rollback_eth_cb(struct epoch_context *ctx) 746 { 747 struct pf_keth_ruleset *rs; 748 749 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 750 751 CURVNET_SET(rs->vnet); 752 753 PF_RULES_WLOCK(); 754 pf_rollback_eth(rs->inactive.ticket, 755 rs->anchor ? rs->anchor->path : ""); 756 PF_RULES_WUNLOCK(); 757 758 CURVNET_RESTORE(); 759 } 760 761 static int 762 pf_rollback_eth(uint32_t ticket, const char *anchor) 763 { 764 struct pf_keth_rule *rule, *tmp; 765 struct pf_keth_ruleset *rs; 766 767 PF_RULES_WASSERT(); 768 769 rs = pf_find_keth_ruleset(anchor); 770 if (rs == NULL) 771 return (EINVAL); 772 773 if (!rs->inactive.open || 774 ticket != rs->inactive.ticket) 775 return (0); 776 777 /* Purge old inactive rules. */ 778 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 779 tmp) { 780 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 781 pf_free_eth_rule(rule); 782 } 783 784 rs->inactive.open = 0; 785 786 pf_remove_if_empty_keth_ruleset(rs); 787 788 return (0); 789 } 790 791 #define PF_SET_SKIP_STEPS(i) \ 792 do { \ 793 while (head[i] != cur) { \ 794 head[i]->skip[i].ptr = cur; \ 795 head[i] = TAILQ_NEXT(head[i], entries); \ 796 } \ 797 } while (0) 798 799 static void 800 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 801 { 802 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 803 int i; 804 805 cur = TAILQ_FIRST(rules); 806 prev = cur; 807 for (i = 0; i < PFE_SKIP_COUNT; ++i) 808 head[i] = cur; 809 while (cur != NULL) { 810 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 811 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 812 if (cur->direction != prev->direction) 813 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 814 if (cur->proto != prev->proto) 815 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 816 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 817 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 818 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 819 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 820 821 prev = cur; 822 cur = TAILQ_NEXT(cur, entries); 823 } 824 for (i = 0; i < PFE_SKIP_COUNT; ++i) 825 PF_SET_SKIP_STEPS(i); 826 } 827 828 static int 829 pf_commit_eth(uint32_t ticket, const char *anchor) 830 { 831 struct pf_keth_ruleq *rules; 832 struct pf_keth_ruleset *rs; 833 834 rs = pf_find_keth_ruleset(anchor); 835 if (rs == NULL) { 836 return (EINVAL); 837 } 838 839 if (!rs->inactive.open || 840 ticket != rs->inactive.ticket) 841 return (EBUSY); 842 843 PF_RULES_WASSERT(); 844 845 pf_eth_calc_skip_steps(rs->inactive.rules); 846 847 rules = rs->active.rules; 848 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 849 rs->inactive.rules = rules; 850 rs->inactive.ticket = rs->active.ticket; 851 852 /* Clean up inactive rules (i.e. previously active rules), only when 853 * we're sure they're no longer used. */ 854 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 855 856 return (0); 857 } 858 859 #ifdef ALTQ 860 static uint16_t 861 pf_qname2qid(const char *qname) 862 { 863 return (tagname2tag(&V_pf_qids, qname)); 864 } 865 866 static void 867 pf_qid_unref(uint16_t qid) 868 { 869 tag_unref(&V_pf_qids, qid); 870 } 871 872 static int 873 pf_begin_altq(u_int32_t *ticket) 874 { 875 struct pf_altq *altq, *tmp; 876 int error = 0; 877 878 PF_RULES_WASSERT(); 879 880 /* Purge the old altq lists */ 881 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 882 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 883 /* detach and destroy the discipline */ 884 error = altq_remove(altq); 885 } 886 free(altq, M_PFALTQ); 887 } 888 TAILQ_INIT(V_pf_altq_ifs_inactive); 889 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 890 pf_qid_unref(altq->qid); 891 free(altq, M_PFALTQ); 892 } 893 TAILQ_INIT(V_pf_altqs_inactive); 894 if (error) 895 return (error); 896 *ticket = ++V_ticket_altqs_inactive; 897 V_altqs_inactive_open = 1; 898 return (0); 899 } 900 901 static int 902 pf_rollback_altq(u_int32_t ticket) 903 { 904 struct pf_altq *altq, *tmp; 905 int error = 0; 906 907 PF_RULES_WASSERT(); 908 909 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 910 return (0); 911 /* Purge the old altq lists */ 912 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 913 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 914 /* detach and destroy the discipline */ 915 error = altq_remove(altq); 916 } 917 free(altq, M_PFALTQ); 918 } 919 TAILQ_INIT(V_pf_altq_ifs_inactive); 920 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 921 pf_qid_unref(altq->qid); 922 free(altq, M_PFALTQ); 923 } 924 TAILQ_INIT(V_pf_altqs_inactive); 925 V_altqs_inactive_open = 0; 926 return (error); 927 } 928 929 static int 930 pf_commit_altq(u_int32_t ticket) 931 { 932 struct pf_altqqueue *old_altqs, *old_altq_ifs; 933 struct pf_altq *altq, *tmp; 934 int err, error = 0; 935 936 PF_RULES_WASSERT(); 937 938 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 939 return (EBUSY); 940 941 /* swap altqs, keep the old. */ 942 old_altqs = V_pf_altqs_active; 943 old_altq_ifs = V_pf_altq_ifs_active; 944 V_pf_altqs_active = V_pf_altqs_inactive; 945 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 946 V_pf_altqs_inactive = old_altqs; 947 V_pf_altq_ifs_inactive = old_altq_ifs; 948 V_ticket_altqs_active = V_ticket_altqs_inactive; 949 950 /* Attach new disciplines */ 951 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 952 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 953 /* attach the discipline */ 954 error = altq_pfattach(altq); 955 if (error == 0 && V_pf_altq_running) 956 error = pf_enable_altq(altq); 957 if (error != 0) 958 return (error); 959 } 960 } 961 962 /* Purge the old altq lists */ 963 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 964 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 965 /* detach and destroy the discipline */ 966 if (V_pf_altq_running) 967 error = pf_disable_altq(altq); 968 err = altq_pfdetach(altq); 969 if (err != 0 && error == 0) 970 error = err; 971 err = altq_remove(altq); 972 if (err != 0 && error == 0) 973 error = err; 974 } 975 free(altq, M_PFALTQ); 976 } 977 TAILQ_INIT(V_pf_altq_ifs_inactive); 978 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 979 pf_qid_unref(altq->qid); 980 free(altq, M_PFALTQ); 981 } 982 TAILQ_INIT(V_pf_altqs_inactive); 983 984 V_altqs_inactive_open = 0; 985 return (error); 986 } 987 988 static int 989 pf_enable_altq(struct pf_altq *altq) 990 { 991 struct ifnet *ifp; 992 struct tb_profile tb; 993 int error = 0; 994 995 if ((ifp = ifunit(altq->ifname)) == NULL) 996 return (EINVAL); 997 998 if (ifp->if_snd.altq_type != ALTQT_NONE) 999 error = altq_enable(&ifp->if_snd); 1000 1001 /* set tokenbucket regulator */ 1002 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1003 tb.rate = altq->ifbandwidth; 1004 tb.depth = altq->tbrsize; 1005 error = tbr_set(&ifp->if_snd, &tb); 1006 } 1007 1008 return (error); 1009 } 1010 1011 static int 1012 pf_disable_altq(struct pf_altq *altq) 1013 { 1014 struct ifnet *ifp; 1015 struct tb_profile tb; 1016 int error; 1017 1018 if ((ifp = ifunit(altq->ifname)) == NULL) 1019 return (EINVAL); 1020 1021 /* 1022 * when the discipline is no longer referenced, it was overridden 1023 * by a new one. if so, just return. 1024 */ 1025 if (altq->altq_disc != ifp->if_snd.altq_disc) 1026 return (0); 1027 1028 error = altq_disable(&ifp->if_snd); 1029 1030 if (error == 0) { 1031 /* clear tokenbucket regulator */ 1032 tb.rate = 0; 1033 error = tbr_set(&ifp->if_snd, &tb); 1034 } 1035 1036 return (error); 1037 } 1038 1039 static int 1040 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1041 struct pf_altq *altq) 1042 { 1043 struct ifnet *ifp1; 1044 int error = 0; 1045 1046 /* Deactivate the interface in question */ 1047 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1048 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1049 (remove && ifp1 == ifp)) { 1050 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1051 } else { 1052 error = altq_add(ifp1, altq); 1053 1054 if (ticket != V_ticket_altqs_inactive) 1055 error = EBUSY; 1056 1057 if (error) 1058 free(altq, M_PFALTQ); 1059 } 1060 1061 return (error); 1062 } 1063 1064 void 1065 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1066 { 1067 struct pf_altq *a1, *a2, *a3; 1068 u_int32_t ticket; 1069 int error = 0; 1070 1071 /* 1072 * No need to re-evaluate the configuration for events on interfaces 1073 * that do not support ALTQ, as it's not possible for such 1074 * interfaces to be part of the configuration. 1075 */ 1076 if (!ALTQ_IS_READY(&ifp->if_snd)) 1077 return; 1078 1079 /* Interrupt userland queue modifications */ 1080 if (V_altqs_inactive_open) 1081 pf_rollback_altq(V_ticket_altqs_inactive); 1082 1083 /* Start new altq ruleset */ 1084 if (pf_begin_altq(&ticket)) 1085 return; 1086 1087 /* Copy the current active set */ 1088 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1089 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1090 if (a2 == NULL) { 1091 error = ENOMEM; 1092 break; 1093 } 1094 bcopy(a1, a2, sizeof(struct pf_altq)); 1095 1096 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1097 if (error) 1098 break; 1099 1100 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1101 } 1102 if (error) 1103 goto out; 1104 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1105 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1106 if (a2 == NULL) { 1107 error = ENOMEM; 1108 break; 1109 } 1110 bcopy(a1, a2, sizeof(struct pf_altq)); 1111 1112 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1113 error = EBUSY; 1114 free(a2, M_PFALTQ); 1115 break; 1116 } 1117 a2->altq_disc = NULL; 1118 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1119 if (strncmp(a3->ifname, a2->ifname, 1120 IFNAMSIZ) == 0) { 1121 a2->altq_disc = a3->altq_disc; 1122 break; 1123 } 1124 } 1125 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1126 if (error) 1127 break; 1128 1129 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1130 } 1131 1132 out: 1133 if (error != 0) 1134 pf_rollback_altq(ticket); 1135 else 1136 pf_commit_altq(ticket); 1137 } 1138 #endif /* ALTQ */ 1139 1140 static int 1141 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1142 { 1143 struct pf_kruleset *rs; 1144 struct pf_krule *rule; 1145 1146 PF_RULES_WASSERT(); 1147 1148 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1149 return (EINVAL); 1150 rs = pf_find_or_create_kruleset(anchor); 1151 if (rs == NULL) 1152 return (EINVAL); 1153 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1154 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1155 rs->rules[rs_num].inactive.rcount--; 1156 } 1157 *ticket = ++rs->rules[rs_num].inactive.ticket; 1158 rs->rules[rs_num].inactive.open = 1; 1159 return (0); 1160 } 1161 1162 static int 1163 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1164 { 1165 struct pf_kruleset *rs; 1166 struct pf_krule *rule; 1167 1168 PF_RULES_WASSERT(); 1169 1170 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1171 return (EINVAL); 1172 rs = pf_find_kruleset(anchor); 1173 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1174 rs->rules[rs_num].inactive.ticket != ticket) 1175 return (0); 1176 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1177 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1178 rs->rules[rs_num].inactive.rcount--; 1179 } 1180 rs->rules[rs_num].inactive.open = 0; 1181 return (0); 1182 } 1183 1184 #define PF_MD5_UPD(st, elm) \ 1185 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1186 1187 #define PF_MD5_UPD_STR(st, elm) \ 1188 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1189 1190 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1191 (stor) = htonl((st)->elm); \ 1192 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1193 } while (0) 1194 1195 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1196 (stor) = htons((st)->elm); \ 1197 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1198 } while (0) 1199 1200 static void 1201 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1202 { 1203 PF_MD5_UPD(pfr, addr.type); 1204 switch (pfr->addr.type) { 1205 case PF_ADDR_DYNIFTL: 1206 PF_MD5_UPD(pfr, addr.v.ifname); 1207 PF_MD5_UPD(pfr, addr.iflags); 1208 break; 1209 case PF_ADDR_TABLE: 1210 PF_MD5_UPD(pfr, addr.v.tblname); 1211 break; 1212 case PF_ADDR_ADDRMASK: 1213 /* XXX ignore af? */ 1214 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1215 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1216 break; 1217 } 1218 1219 PF_MD5_UPD(pfr, port[0]); 1220 PF_MD5_UPD(pfr, port[1]); 1221 PF_MD5_UPD(pfr, neg); 1222 PF_MD5_UPD(pfr, port_op); 1223 } 1224 1225 static void 1226 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule) 1227 { 1228 u_int16_t x; 1229 u_int32_t y; 1230 1231 pf_hash_rule_addr(ctx, &rule->src); 1232 pf_hash_rule_addr(ctx, &rule->dst); 1233 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1234 PF_MD5_UPD_STR(rule, label[i]); 1235 PF_MD5_UPD_STR(rule, ifname); 1236 PF_MD5_UPD_STR(rule, match_tagname); 1237 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1238 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1239 PF_MD5_UPD_HTONL(rule, prob, y); 1240 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1241 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1242 PF_MD5_UPD(rule, uid.op); 1243 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1244 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1245 PF_MD5_UPD(rule, gid.op); 1246 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1247 PF_MD5_UPD(rule, action); 1248 PF_MD5_UPD(rule, direction); 1249 PF_MD5_UPD(rule, af); 1250 PF_MD5_UPD(rule, quick); 1251 PF_MD5_UPD(rule, ifnot); 1252 PF_MD5_UPD(rule, match_tag_not); 1253 PF_MD5_UPD(rule, natpass); 1254 PF_MD5_UPD(rule, keep_state); 1255 PF_MD5_UPD(rule, proto); 1256 PF_MD5_UPD(rule, type); 1257 PF_MD5_UPD(rule, code); 1258 PF_MD5_UPD(rule, flags); 1259 PF_MD5_UPD(rule, flagset); 1260 PF_MD5_UPD(rule, allow_opts); 1261 PF_MD5_UPD(rule, rt); 1262 PF_MD5_UPD(rule, tos); 1263 } 1264 1265 static bool 1266 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1267 { 1268 MD5_CTX ctx[2]; 1269 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH]; 1270 1271 MD5Init(&ctx[0]); 1272 MD5Init(&ctx[1]); 1273 pf_hash_rule(&ctx[0], a); 1274 pf_hash_rule(&ctx[1], b); 1275 MD5Final(digest[0], &ctx[0]); 1276 MD5Final(digest[1], &ctx[1]); 1277 1278 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0); 1279 } 1280 1281 static int 1282 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1283 { 1284 struct pf_kruleset *rs; 1285 struct pf_krule *rule, **old_array, *tail; 1286 struct pf_krulequeue *old_rules; 1287 int error; 1288 u_int32_t old_rcount; 1289 1290 PF_RULES_WASSERT(); 1291 1292 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1293 return (EINVAL); 1294 rs = pf_find_kruleset(anchor); 1295 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1296 ticket != rs->rules[rs_num].inactive.ticket) 1297 return (EBUSY); 1298 1299 /* Calculate checksum for the main ruleset */ 1300 if (rs == &pf_main_ruleset) { 1301 error = pf_setup_pfsync_matching(rs); 1302 if (error != 0) 1303 return (error); 1304 } 1305 1306 /* Swap rules, keep the old. */ 1307 old_rules = rs->rules[rs_num].active.ptr; 1308 old_rcount = rs->rules[rs_num].active.rcount; 1309 old_array = rs->rules[rs_num].active.ptr_array; 1310 1311 rs->rules[rs_num].active.ptr = 1312 rs->rules[rs_num].inactive.ptr; 1313 rs->rules[rs_num].active.ptr_array = 1314 rs->rules[rs_num].inactive.ptr_array; 1315 rs->rules[rs_num].active.rcount = 1316 rs->rules[rs_num].inactive.rcount; 1317 1318 /* Attempt to preserve counter information. */ 1319 if (V_pf_status.keep_counters) { 1320 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1321 entries) { 1322 tail = TAILQ_FIRST(old_rules); 1323 while ((tail != NULL) && ! pf_krule_compare(tail, rule)) 1324 tail = TAILQ_NEXT(tail, entries); 1325 if (tail != NULL) { 1326 pf_counter_u64_critical_enter(); 1327 pf_counter_u64_add_protected(&rule->evaluations, 1328 pf_counter_u64_fetch(&tail->evaluations)); 1329 pf_counter_u64_add_protected(&rule->packets[0], 1330 pf_counter_u64_fetch(&tail->packets[0])); 1331 pf_counter_u64_add_protected(&rule->packets[1], 1332 pf_counter_u64_fetch(&tail->packets[1])); 1333 pf_counter_u64_add_protected(&rule->bytes[0], 1334 pf_counter_u64_fetch(&tail->bytes[0])); 1335 pf_counter_u64_add_protected(&rule->bytes[1], 1336 pf_counter_u64_fetch(&tail->bytes[1])); 1337 pf_counter_u64_critical_exit(); 1338 } 1339 } 1340 } 1341 1342 rs->rules[rs_num].inactive.ptr = old_rules; 1343 rs->rules[rs_num].inactive.ptr_array = old_array; 1344 rs->rules[rs_num].inactive.rcount = old_rcount; 1345 1346 rs->rules[rs_num].active.ticket = 1347 rs->rules[rs_num].inactive.ticket; 1348 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1349 1350 /* Purge the old rule list. */ 1351 PF_UNLNKDRULES_LOCK(); 1352 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1353 pf_unlink_rule_locked(old_rules, rule); 1354 PF_UNLNKDRULES_UNLOCK(); 1355 if (rs->rules[rs_num].inactive.ptr_array) 1356 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1357 rs->rules[rs_num].inactive.ptr_array = NULL; 1358 rs->rules[rs_num].inactive.rcount = 0; 1359 rs->rules[rs_num].inactive.open = 0; 1360 pf_remove_if_empty_kruleset(rs); 1361 1362 return (0); 1363 } 1364 1365 static int 1366 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1367 { 1368 MD5_CTX ctx; 1369 struct pf_krule *rule; 1370 int rs_cnt; 1371 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1372 1373 MD5Init(&ctx); 1374 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1375 /* XXX PF_RULESET_SCRUB as well? */ 1376 if (rs_cnt == PF_RULESET_SCRUB) 1377 continue; 1378 1379 if (rs->rules[rs_cnt].inactive.ptr_array) 1380 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1381 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1382 1383 if (rs->rules[rs_cnt].inactive.rcount) { 1384 rs->rules[rs_cnt].inactive.ptr_array = 1385 malloc(sizeof(caddr_t) * 1386 rs->rules[rs_cnt].inactive.rcount, 1387 M_TEMP, M_NOWAIT); 1388 1389 if (!rs->rules[rs_cnt].inactive.ptr_array) 1390 return (ENOMEM); 1391 } 1392 1393 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1394 entries) { 1395 pf_hash_rule(&ctx, rule); 1396 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1397 } 1398 } 1399 1400 MD5Final(digest, &ctx); 1401 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1402 return (0); 1403 } 1404 1405 static int 1406 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1407 sa_family_t af) 1408 { 1409 int error = 0; 1410 1411 switch (addr->type) { 1412 case PF_ADDR_TABLE: 1413 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1414 if (addr->p.tbl == NULL) 1415 error = ENOMEM; 1416 break; 1417 case PF_ADDR_DYNIFTL: 1418 error = pfi_dynaddr_setup(addr, af); 1419 break; 1420 } 1421 1422 return (error); 1423 } 1424 1425 static void 1426 pf_addr_copyout(struct pf_addr_wrap *addr) 1427 { 1428 1429 switch (addr->type) { 1430 case PF_ADDR_DYNIFTL: 1431 pfi_dynaddr_copyout(addr); 1432 break; 1433 case PF_ADDR_TABLE: 1434 pf_tbladdr_copyout(addr); 1435 break; 1436 } 1437 } 1438 1439 static void 1440 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1441 { 1442 int secs = time_uptime, diff; 1443 1444 bzero(out, sizeof(struct pf_src_node)); 1445 1446 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1447 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1448 1449 if (in->rule.ptr != NULL) 1450 out->rule.nr = in->rule.ptr->nr; 1451 1452 for (int i = 0; i < 2; i++) { 1453 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1454 out->packets[i] = counter_u64_fetch(in->packets[i]); 1455 } 1456 1457 out->states = in->states; 1458 out->conn = in->conn; 1459 out->af = in->af; 1460 out->ruletype = in->ruletype; 1461 1462 out->creation = secs - in->creation; 1463 if (out->expire > secs) 1464 out->expire -= secs; 1465 else 1466 out->expire = 0; 1467 1468 /* Adjust the connection rate estimate. */ 1469 diff = secs - in->conn_rate.last; 1470 if (diff >= in->conn_rate.seconds) 1471 out->conn_rate.count = 0; 1472 else 1473 out->conn_rate.count -= 1474 in->conn_rate.count * diff / 1475 in->conn_rate.seconds; 1476 } 1477 1478 #ifdef ALTQ 1479 /* 1480 * Handle export of struct pf_kaltq to user binaries that may be using any 1481 * version of struct pf_altq. 1482 */ 1483 static int 1484 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1485 { 1486 u_int32_t version; 1487 1488 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1489 version = 0; 1490 else 1491 version = pa->version; 1492 1493 if (version > PFIOC_ALTQ_VERSION) 1494 return (EINVAL); 1495 1496 #define ASSIGN(x) exported_q->x = q->x 1497 #define COPY(x) \ 1498 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1499 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1500 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1501 1502 switch (version) { 1503 case 0: { 1504 struct pf_altq_v0 *exported_q = 1505 &((struct pfioc_altq_v0 *)pa)->altq; 1506 1507 COPY(ifname); 1508 1509 ASSIGN(scheduler); 1510 ASSIGN(tbrsize); 1511 exported_q->tbrsize = SATU16(q->tbrsize); 1512 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1513 1514 COPY(qname); 1515 COPY(parent); 1516 ASSIGN(parent_qid); 1517 exported_q->bandwidth = SATU32(q->bandwidth); 1518 ASSIGN(priority); 1519 ASSIGN(local_flags); 1520 1521 ASSIGN(qlimit); 1522 ASSIGN(flags); 1523 1524 if (q->scheduler == ALTQT_HFSC) { 1525 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1526 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1527 SATU32(q->pq_u.hfsc_opts.x) 1528 1529 ASSIGN_OPT_SATU32(rtsc_m1); 1530 ASSIGN_OPT(rtsc_d); 1531 ASSIGN_OPT_SATU32(rtsc_m2); 1532 1533 ASSIGN_OPT_SATU32(lssc_m1); 1534 ASSIGN_OPT(lssc_d); 1535 ASSIGN_OPT_SATU32(lssc_m2); 1536 1537 ASSIGN_OPT_SATU32(ulsc_m1); 1538 ASSIGN_OPT(ulsc_d); 1539 ASSIGN_OPT_SATU32(ulsc_m2); 1540 1541 ASSIGN_OPT(flags); 1542 1543 #undef ASSIGN_OPT 1544 #undef ASSIGN_OPT_SATU32 1545 } else 1546 COPY(pq_u); 1547 1548 ASSIGN(qid); 1549 break; 1550 } 1551 case 1: { 1552 struct pf_altq_v1 *exported_q = 1553 &((struct pfioc_altq_v1 *)pa)->altq; 1554 1555 COPY(ifname); 1556 1557 ASSIGN(scheduler); 1558 ASSIGN(tbrsize); 1559 ASSIGN(ifbandwidth); 1560 1561 COPY(qname); 1562 COPY(parent); 1563 ASSIGN(parent_qid); 1564 ASSIGN(bandwidth); 1565 ASSIGN(priority); 1566 ASSIGN(local_flags); 1567 1568 ASSIGN(qlimit); 1569 ASSIGN(flags); 1570 COPY(pq_u); 1571 1572 ASSIGN(qid); 1573 break; 1574 } 1575 default: 1576 panic("%s: unhandled struct pfioc_altq version", __func__); 1577 break; 1578 } 1579 1580 #undef ASSIGN 1581 #undef COPY 1582 #undef SATU16 1583 #undef SATU32 1584 1585 return (0); 1586 } 1587 1588 /* 1589 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1590 * that may be using any version of it. 1591 */ 1592 static int 1593 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1594 { 1595 u_int32_t version; 1596 1597 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1598 version = 0; 1599 else 1600 version = pa->version; 1601 1602 if (version > PFIOC_ALTQ_VERSION) 1603 return (EINVAL); 1604 1605 #define ASSIGN(x) q->x = imported_q->x 1606 #define COPY(x) \ 1607 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1608 1609 switch (version) { 1610 case 0: { 1611 struct pf_altq_v0 *imported_q = 1612 &((struct pfioc_altq_v0 *)pa)->altq; 1613 1614 COPY(ifname); 1615 1616 ASSIGN(scheduler); 1617 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1618 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1619 1620 COPY(qname); 1621 COPY(parent); 1622 ASSIGN(parent_qid); 1623 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1624 ASSIGN(priority); 1625 ASSIGN(local_flags); 1626 1627 ASSIGN(qlimit); 1628 ASSIGN(flags); 1629 1630 if (imported_q->scheduler == ALTQT_HFSC) { 1631 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1632 1633 /* 1634 * The m1 and m2 parameters are being copied from 1635 * 32-bit to 64-bit. 1636 */ 1637 ASSIGN_OPT(rtsc_m1); 1638 ASSIGN_OPT(rtsc_d); 1639 ASSIGN_OPT(rtsc_m2); 1640 1641 ASSIGN_OPT(lssc_m1); 1642 ASSIGN_OPT(lssc_d); 1643 ASSIGN_OPT(lssc_m2); 1644 1645 ASSIGN_OPT(ulsc_m1); 1646 ASSIGN_OPT(ulsc_d); 1647 ASSIGN_OPT(ulsc_m2); 1648 1649 ASSIGN_OPT(flags); 1650 1651 #undef ASSIGN_OPT 1652 } else 1653 COPY(pq_u); 1654 1655 ASSIGN(qid); 1656 break; 1657 } 1658 case 1: { 1659 struct pf_altq_v1 *imported_q = 1660 &((struct pfioc_altq_v1 *)pa)->altq; 1661 1662 COPY(ifname); 1663 1664 ASSIGN(scheduler); 1665 ASSIGN(tbrsize); 1666 ASSIGN(ifbandwidth); 1667 1668 COPY(qname); 1669 COPY(parent); 1670 ASSIGN(parent_qid); 1671 ASSIGN(bandwidth); 1672 ASSIGN(priority); 1673 ASSIGN(local_flags); 1674 1675 ASSIGN(qlimit); 1676 ASSIGN(flags); 1677 COPY(pq_u); 1678 1679 ASSIGN(qid); 1680 break; 1681 } 1682 default: 1683 panic("%s: unhandled struct pfioc_altq version", __func__); 1684 break; 1685 } 1686 1687 #undef ASSIGN 1688 #undef COPY 1689 1690 return (0); 1691 } 1692 1693 static struct pf_altq * 1694 pf_altq_get_nth_active(u_int32_t n) 1695 { 1696 struct pf_altq *altq; 1697 u_int32_t nr; 1698 1699 nr = 0; 1700 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1701 if (nr == n) 1702 return (altq); 1703 nr++; 1704 } 1705 1706 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1707 if (nr == n) 1708 return (altq); 1709 nr++; 1710 } 1711 1712 return (NULL); 1713 } 1714 #endif /* ALTQ */ 1715 1716 struct pf_krule * 1717 pf_krule_alloc(void) 1718 { 1719 struct pf_krule *rule; 1720 1721 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1722 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1723 return (rule); 1724 } 1725 1726 void 1727 pf_krule_free(struct pf_krule *rule) 1728 { 1729 #ifdef PF_WANT_32_TO_64_COUNTER 1730 bool wowned; 1731 #endif 1732 1733 if (rule == NULL) 1734 return; 1735 1736 #ifdef PF_WANT_32_TO_64_COUNTER 1737 if (rule->allrulelinked) { 1738 wowned = PF_RULES_WOWNED(); 1739 if (!wowned) 1740 PF_RULES_WLOCK(); 1741 LIST_REMOVE(rule, allrulelist); 1742 V_pf_allrulecount--; 1743 if (!wowned) 1744 PF_RULES_WUNLOCK(); 1745 } 1746 #endif 1747 1748 pf_counter_u64_deinit(&rule->evaluations); 1749 for (int i = 0; i < 2; i++) { 1750 pf_counter_u64_deinit(&rule->packets[i]); 1751 pf_counter_u64_deinit(&rule->bytes[i]); 1752 } 1753 counter_u64_free(rule->states_cur); 1754 counter_u64_free(rule->states_tot); 1755 counter_u64_free(rule->src_nodes); 1756 1757 mtx_destroy(&rule->rpool.mtx); 1758 free(rule, M_PFRULE); 1759 } 1760 1761 static void 1762 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1763 struct pf_pooladdr *pool) 1764 { 1765 1766 bzero(pool, sizeof(*pool)); 1767 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1768 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1769 } 1770 1771 static int 1772 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1773 struct pf_kpooladdr *kpool) 1774 { 1775 int ret; 1776 1777 bzero(kpool, sizeof(*kpool)); 1778 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1779 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1780 sizeof(kpool->ifname)); 1781 return (ret); 1782 } 1783 1784 static void 1785 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1786 { 1787 bzero(pool, sizeof(*pool)); 1788 1789 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1790 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1791 1792 pool->tblidx = kpool->tblidx; 1793 pool->proxy_port[0] = kpool->proxy_port[0]; 1794 pool->proxy_port[1] = kpool->proxy_port[1]; 1795 pool->opts = kpool->opts; 1796 } 1797 1798 static void 1799 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1800 { 1801 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1802 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1803 1804 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1805 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1806 1807 kpool->tblidx = pool->tblidx; 1808 kpool->proxy_port[0] = pool->proxy_port[0]; 1809 kpool->proxy_port[1] = pool->proxy_port[1]; 1810 kpool->opts = pool->opts; 1811 } 1812 1813 static void 1814 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1815 { 1816 1817 bzero(rule, sizeof(*rule)); 1818 1819 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1820 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1821 1822 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1823 if (rule->skip[i].ptr == NULL) 1824 rule->skip[i].nr = -1; 1825 else 1826 rule->skip[i].nr = krule->skip[i].ptr->nr; 1827 } 1828 1829 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1830 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1831 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1832 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1833 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1834 strlcpy(rule->match_tagname, krule->match_tagname, 1835 sizeof(rule->match_tagname)); 1836 strlcpy(rule->overload_tblname, krule->overload_tblname, 1837 sizeof(rule->overload_tblname)); 1838 1839 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1840 1841 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1842 for (int i = 0; i < 2; i++) { 1843 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1844 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1845 } 1846 1847 /* kif, anchor, overload_tbl are not copied over. */ 1848 1849 rule->os_fingerprint = krule->os_fingerprint; 1850 1851 rule->rtableid = krule->rtableid; 1852 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1853 rule->max_states = krule->max_states; 1854 rule->max_src_nodes = krule->max_src_nodes; 1855 rule->max_src_states = krule->max_src_states; 1856 rule->max_src_conn = krule->max_src_conn; 1857 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1858 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1859 rule->qid = krule->qid; 1860 rule->pqid = krule->pqid; 1861 rule->nr = krule->nr; 1862 rule->prob = krule->prob; 1863 rule->cuid = krule->cuid; 1864 rule->cpid = krule->cpid; 1865 1866 rule->return_icmp = krule->return_icmp; 1867 rule->return_icmp6 = krule->return_icmp6; 1868 rule->max_mss = krule->max_mss; 1869 rule->tag = krule->tag; 1870 rule->match_tag = krule->match_tag; 1871 rule->scrub_flags = krule->scrub_flags; 1872 1873 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1874 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1875 1876 rule->rule_flag = krule->rule_flag; 1877 rule->action = krule->action; 1878 rule->direction = krule->direction; 1879 rule->log = krule->log; 1880 rule->logif = krule->logif; 1881 rule->quick = krule->quick; 1882 rule->ifnot = krule->ifnot; 1883 rule->match_tag_not = krule->match_tag_not; 1884 rule->natpass = krule->natpass; 1885 1886 rule->keep_state = krule->keep_state; 1887 rule->af = krule->af; 1888 rule->proto = krule->proto; 1889 rule->type = krule->type; 1890 rule->code = krule->code; 1891 rule->flags = krule->flags; 1892 rule->flagset = krule->flagset; 1893 rule->min_ttl = krule->min_ttl; 1894 rule->allow_opts = krule->allow_opts; 1895 rule->rt = krule->rt; 1896 rule->return_ttl = krule->return_ttl; 1897 rule->tos = krule->tos; 1898 rule->set_tos = krule->set_tos; 1899 rule->anchor_relative = krule->anchor_relative; 1900 rule->anchor_wildcard = krule->anchor_wildcard; 1901 1902 rule->flush = krule->flush; 1903 rule->prio = krule->prio; 1904 rule->set_prio[0] = krule->set_prio[0]; 1905 rule->set_prio[1] = krule->set_prio[1]; 1906 1907 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1908 1909 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1910 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1911 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1912 } 1913 1914 static int 1915 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1916 { 1917 int ret; 1918 1919 #ifndef INET 1920 if (rule->af == AF_INET) { 1921 return (EAFNOSUPPORT); 1922 } 1923 #endif /* INET */ 1924 #ifndef INET6 1925 if (rule->af == AF_INET6) { 1926 return (EAFNOSUPPORT); 1927 } 1928 #endif /* INET6 */ 1929 1930 ret = pf_check_rule_addr(&rule->src); 1931 if (ret != 0) 1932 return (ret); 1933 ret = pf_check_rule_addr(&rule->dst); 1934 if (ret != 0) 1935 return (ret); 1936 1937 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1938 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1939 1940 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1941 if (ret != 0) 1942 return (ret); 1943 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1944 if (ret != 0) 1945 return (ret); 1946 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1947 if (ret != 0) 1948 return (ret); 1949 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1950 if (ret != 0) 1951 return (ret); 1952 ret = pf_user_strcpy(krule->tagname, rule->tagname, 1953 sizeof(rule->tagname)); 1954 if (ret != 0) 1955 return (ret); 1956 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 1957 sizeof(rule->match_tagname)); 1958 if (ret != 0) 1959 return (ret); 1960 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 1961 sizeof(rule->overload_tblname)); 1962 if (ret != 0) 1963 return (ret); 1964 1965 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1966 1967 /* Don't allow userspace to set evaulations, packets or bytes. */ 1968 /* kif, anchor, overload_tbl are not copied over. */ 1969 1970 krule->os_fingerprint = rule->os_fingerprint; 1971 1972 krule->rtableid = rule->rtableid; 1973 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 1974 krule->max_states = rule->max_states; 1975 krule->max_src_nodes = rule->max_src_nodes; 1976 krule->max_src_states = rule->max_src_states; 1977 krule->max_src_conn = rule->max_src_conn; 1978 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1979 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1980 krule->qid = rule->qid; 1981 krule->pqid = rule->pqid; 1982 krule->nr = rule->nr; 1983 krule->prob = rule->prob; 1984 krule->cuid = rule->cuid; 1985 krule->cpid = rule->cpid; 1986 1987 krule->return_icmp = rule->return_icmp; 1988 krule->return_icmp6 = rule->return_icmp6; 1989 krule->max_mss = rule->max_mss; 1990 krule->tag = rule->tag; 1991 krule->match_tag = rule->match_tag; 1992 krule->scrub_flags = rule->scrub_flags; 1993 1994 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 1995 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 1996 1997 krule->rule_flag = rule->rule_flag; 1998 krule->action = rule->action; 1999 krule->direction = rule->direction; 2000 krule->log = rule->log; 2001 krule->logif = rule->logif; 2002 krule->quick = rule->quick; 2003 krule->ifnot = rule->ifnot; 2004 krule->match_tag_not = rule->match_tag_not; 2005 krule->natpass = rule->natpass; 2006 2007 krule->keep_state = rule->keep_state; 2008 krule->af = rule->af; 2009 krule->proto = rule->proto; 2010 krule->type = rule->type; 2011 krule->code = rule->code; 2012 krule->flags = rule->flags; 2013 krule->flagset = rule->flagset; 2014 krule->min_ttl = rule->min_ttl; 2015 krule->allow_opts = rule->allow_opts; 2016 krule->rt = rule->rt; 2017 krule->return_ttl = rule->return_ttl; 2018 krule->tos = rule->tos; 2019 krule->set_tos = rule->set_tos; 2020 2021 krule->flush = rule->flush; 2022 krule->prio = rule->prio; 2023 krule->set_prio[0] = rule->set_prio[0]; 2024 krule->set_prio[1] = rule->set_prio[1]; 2025 2026 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2027 2028 return (0); 2029 } 2030 2031 static int 2032 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2033 struct pf_kstate_kill *kill) 2034 { 2035 int ret; 2036 2037 bzero(kill, sizeof(*kill)); 2038 2039 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2040 kill->psk_af = psk->psk_af; 2041 kill->psk_proto = psk->psk_proto; 2042 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2043 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2044 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2045 sizeof(kill->psk_ifname)); 2046 if (ret != 0) 2047 return (ret); 2048 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2049 sizeof(kill->psk_label)); 2050 if (ret != 0) 2051 return (ret); 2052 2053 return (0); 2054 } 2055 2056 static int 2057 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2058 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2059 struct thread *td) 2060 { 2061 struct pf_kruleset *ruleset; 2062 struct pf_krule *tail; 2063 struct pf_kpooladdr *pa; 2064 struct pfi_kkif *kif = NULL; 2065 int rs_num; 2066 int error = 0; 2067 2068 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2069 error = EINVAL; 2070 goto errout_unlocked; 2071 } 2072 2073 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2074 2075 if (rule->ifname[0]) 2076 kif = pf_kkif_create(M_WAITOK); 2077 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2078 for (int i = 0; i < 2; i++) { 2079 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2080 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2081 } 2082 rule->states_cur = counter_u64_alloc(M_WAITOK); 2083 rule->states_tot = counter_u64_alloc(M_WAITOK); 2084 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2085 rule->cuid = td->td_ucred->cr_ruid; 2086 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2087 TAILQ_INIT(&rule->rpool.list); 2088 2089 PF_RULES_WLOCK(); 2090 #ifdef PF_WANT_32_TO_64_COUNTER 2091 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2092 MPASS(!rule->allrulelinked); 2093 rule->allrulelinked = true; 2094 V_pf_allrulecount++; 2095 #endif 2096 ruleset = pf_find_kruleset(anchor); 2097 if (ruleset == NULL) 2098 ERROUT(EINVAL); 2099 rs_num = pf_get_ruleset_number(rule->action); 2100 if (rs_num >= PF_RULESET_MAX) 2101 ERROUT(EINVAL); 2102 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2103 DPFPRINTF(PF_DEBUG_MISC, 2104 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2105 ruleset->rules[rs_num].inactive.ticket)); 2106 ERROUT(EBUSY); 2107 } 2108 if (pool_ticket != V_ticket_pabuf) { 2109 DPFPRINTF(PF_DEBUG_MISC, 2110 ("pool_ticket: %d != %d\n", pool_ticket, 2111 V_ticket_pabuf)); 2112 ERROUT(EBUSY); 2113 } 2114 2115 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2116 pf_krulequeue); 2117 if (tail) 2118 rule->nr = tail->nr + 1; 2119 else 2120 rule->nr = 0; 2121 if (rule->ifname[0]) { 2122 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2123 kif = NULL; 2124 pfi_kkif_ref(rule->kif); 2125 } else 2126 rule->kif = NULL; 2127 2128 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2129 error = EBUSY; 2130 2131 #ifdef ALTQ 2132 /* set queue IDs */ 2133 if (rule->qname[0] != 0) { 2134 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2135 error = EBUSY; 2136 else if (rule->pqname[0] != 0) { 2137 if ((rule->pqid = 2138 pf_qname2qid(rule->pqname)) == 0) 2139 error = EBUSY; 2140 } else 2141 rule->pqid = rule->qid; 2142 } 2143 #endif 2144 if (rule->tagname[0]) 2145 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2146 error = EBUSY; 2147 if (rule->match_tagname[0]) 2148 if ((rule->match_tag = 2149 pf_tagname2tag(rule->match_tagname)) == 0) 2150 error = EBUSY; 2151 if (rule->rt && !rule->direction) 2152 error = EINVAL; 2153 if (!rule->log) 2154 rule->logif = 0; 2155 if (rule->logif >= PFLOGIFS_MAX) 2156 error = EINVAL; 2157 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2158 error = ENOMEM; 2159 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2160 error = ENOMEM; 2161 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2162 error = EINVAL; 2163 if (rule->scrub_flags & PFSTATE_SETPRIO && 2164 (rule->set_prio[0] > PF_PRIO_MAX || 2165 rule->set_prio[1] > PF_PRIO_MAX)) 2166 error = EINVAL; 2167 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2168 if (pa->addr.type == PF_ADDR_TABLE) { 2169 pa->addr.p.tbl = pfr_attach_table(ruleset, 2170 pa->addr.v.tblname); 2171 if (pa->addr.p.tbl == NULL) 2172 error = ENOMEM; 2173 } 2174 2175 rule->overload_tbl = NULL; 2176 if (rule->overload_tblname[0]) { 2177 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2178 rule->overload_tblname)) == NULL) 2179 error = EINVAL; 2180 else 2181 rule->overload_tbl->pfrkt_flags |= 2182 PFR_TFLAG_ACTIVE; 2183 } 2184 2185 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2186 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2187 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2188 (rule->rt > PF_NOPFROUTE)) && 2189 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2190 error = EINVAL; 2191 2192 if (error) { 2193 pf_free_rule(rule); 2194 rule = NULL; 2195 ERROUT(error); 2196 } 2197 2198 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2199 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2200 rule, entries); 2201 ruleset->rules[rs_num].inactive.rcount++; 2202 2203 PF_RULES_WUNLOCK(); 2204 2205 return (0); 2206 2207 #undef ERROUT 2208 errout: 2209 PF_RULES_WUNLOCK(); 2210 errout_unlocked: 2211 pf_kkif_free(kif); 2212 pf_krule_free(rule); 2213 return (error); 2214 } 2215 2216 static bool 2217 pf_label_match(const struct pf_krule *rule, const char *label) 2218 { 2219 int i = 0; 2220 2221 while (*rule->label[i]) { 2222 if (strcmp(rule->label[i], label) == 0) 2223 return (true); 2224 i++; 2225 } 2226 2227 return (false); 2228 } 2229 2230 static unsigned int 2231 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2232 { 2233 struct pf_kstate *s; 2234 int more = 0; 2235 2236 s = pf_find_state_all(key, dir, &more); 2237 if (s == NULL) 2238 return (0); 2239 2240 if (more) { 2241 PF_STATE_UNLOCK(s); 2242 return (0); 2243 } 2244 2245 pf_unlink_state(s); 2246 return (1); 2247 } 2248 2249 static int 2250 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2251 { 2252 struct pf_kstate *s; 2253 struct pf_state_key *sk; 2254 struct pf_addr *srcaddr, *dstaddr; 2255 struct pf_state_key_cmp match_key; 2256 int idx, killed = 0; 2257 unsigned int dir; 2258 u_int16_t srcport, dstport; 2259 struct pfi_kkif *kif; 2260 2261 relock_DIOCKILLSTATES: 2262 PF_HASHROW_LOCK(ih); 2263 LIST_FOREACH(s, &ih->states, entry) { 2264 /* For floating states look at the original kif. */ 2265 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2266 2267 sk = s->key[PF_SK_WIRE]; 2268 if (s->direction == PF_OUT) { 2269 srcaddr = &sk->addr[1]; 2270 dstaddr = &sk->addr[0]; 2271 srcport = sk->port[1]; 2272 dstport = sk->port[0]; 2273 } else { 2274 srcaddr = &sk->addr[0]; 2275 dstaddr = &sk->addr[1]; 2276 srcport = sk->port[0]; 2277 dstport = sk->port[1]; 2278 } 2279 2280 if (psk->psk_af && sk->af != psk->psk_af) 2281 continue; 2282 2283 if (psk->psk_proto && psk->psk_proto != sk->proto) 2284 continue; 2285 2286 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2287 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2288 continue; 2289 2290 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2291 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2292 continue; 2293 2294 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2295 &psk->psk_rt_addr.addr.v.a.addr, 2296 &psk->psk_rt_addr.addr.v.a.mask, 2297 &s->rt_addr, sk->af)) 2298 continue; 2299 2300 if (psk->psk_src.port_op != 0 && 2301 ! pf_match_port(psk->psk_src.port_op, 2302 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2303 continue; 2304 2305 if (psk->psk_dst.port_op != 0 && 2306 ! pf_match_port(psk->psk_dst.port_op, 2307 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2308 continue; 2309 2310 if (psk->psk_label[0] && 2311 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2312 continue; 2313 2314 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2315 kif->pfik_name)) 2316 continue; 2317 2318 if (psk->psk_kill_match) { 2319 /* Create the key to find matching states, with lock 2320 * held. */ 2321 2322 bzero(&match_key, sizeof(match_key)); 2323 2324 if (s->direction == PF_OUT) { 2325 dir = PF_IN; 2326 idx = PF_SK_STACK; 2327 } else { 2328 dir = PF_OUT; 2329 idx = PF_SK_WIRE; 2330 } 2331 2332 match_key.af = s->key[idx]->af; 2333 match_key.proto = s->key[idx]->proto; 2334 PF_ACPY(&match_key.addr[0], 2335 &s->key[idx]->addr[1], match_key.af); 2336 match_key.port[0] = s->key[idx]->port[1]; 2337 PF_ACPY(&match_key.addr[1], 2338 &s->key[idx]->addr[0], match_key.af); 2339 match_key.port[1] = s->key[idx]->port[0]; 2340 } 2341 2342 pf_unlink_state(s); 2343 killed++; 2344 2345 if (psk->psk_kill_match) 2346 killed += pf_kill_matching_state(&match_key, dir); 2347 2348 goto relock_DIOCKILLSTATES; 2349 } 2350 PF_HASHROW_UNLOCK(ih); 2351 2352 return (killed); 2353 } 2354 2355 static int 2356 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2357 { 2358 int error = 0; 2359 PF_RULES_RLOCK_TRACKER; 2360 2361 #define ERROUT_IOCTL(target, x) \ 2362 do { \ 2363 error = (x); \ 2364 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2365 goto target; \ 2366 } while (0) 2367 2368 2369 /* XXX keep in sync with switch() below */ 2370 if (securelevel_gt(td->td_ucred, 2)) 2371 switch (cmd) { 2372 case DIOCGETRULES: 2373 case DIOCGETRULE: 2374 case DIOCGETRULENV: 2375 case DIOCGETADDRS: 2376 case DIOCGETADDR: 2377 case DIOCGETSTATE: 2378 case DIOCGETSTATENV: 2379 case DIOCSETSTATUSIF: 2380 case DIOCGETSTATUS: 2381 case DIOCGETSTATUSNV: 2382 case DIOCCLRSTATUS: 2383 case DIOCNATLOOK: 2384 case DIOCSETDEBUG: 2385 case DIOCGETSTATES: 2386 case DIOCGETSTATESV2: 2387 case DIOCGETTIMEOUT: 2388 case DIOCCLRRULECTRS: 2389 case DIOCGETLIMIT: 2390 case DIOCGETALTQSV0: 2391 case DIOCGETALTQSV1: 2392 case DIOCGETALTQV0: 2393 case DIOCGETALTQV1: 2394 case DIOCGETQSTATSV0: 2395 case DIOCGETQSTATSV1: 2396 case DIOCGETRULESETS: 2397 case DIOCGETRULESET: 2398 case DIOCRGETTABLES: 2399 case DIOCRGETTSTATS: 2400 case DIOCRCLRTSTATS: 2401 case DIOCRCLRADDRS: 2402 case DIOCRADDADDRS: 2403 case DIOCRDELADDRS: 2404 case DIOCRSETADDRS: 2405 case DIOCRGETADDRS: 2406 case DIOCRGETASTATS: 2407 case DIOCRCLRASTATS: 2408 case DIOCRTSTADDRS: 2409 case DIOCOSFPGET: 2410 case DIOCGETSRCNODES: 2411 case DIOCCLRSRCNODES: 2412 case DIOCGETSYNCOOKIES: 2413 case DIOCIGETIFACES: 2414 case DIOCGIFSPEEDV0: 2415 case DIOCGIFSPEEDV1: 2416 case DIOCSETIFFLAG: 2417 case DIOCCLRIFFLAG: 2418 case DIOCGETETHRULES: 2419 case DIOCGETETHRULE: 2420 break; 2421 case DIOCRCLRTABLES: 2422 case DIOCRADDTABLES: 2423 case DIOCRDELTABLES: 2424 case DIOCRSETTFLAGS: 2425 if (((struct pfioc_table *)addr)->pfrio_flags & 2426 PFR_FLAG_DUMMY) 2427 break; /* dummy operation ok */ 2428 return (EPERM); 2429 default: 2430 return (EPERM); 2431 } 2432 2433 if (!(flags & FWRITE)) 2434 switch (cmd) { 2435 case DIOCGETRULES: 2436 case DIOCGETADDRS: 2437 case DIOCGETADDR: 2438 case DIOCGETSTATE: 2439 case DIOCGETSTATENV: 2440 case DIOCGETSTATUS: 2441 case DIOCGETSTATUSNV: 2442 case DIOCGETSTATES: 2443 case DIOCGETSTATESV2: 2444 case DIOCGETTIMEOUT: 2445 case DIOCGETLIMIT: 2446 case DIOCGETALTQSV0: 2447 case DIOCGETALTQSV1: 2448 case DIOCGETALTQV0: 2449 case DIOCGETALTQV1: 2450 case DIOCGETQSTATSV0: 2451 case DIOCGETQSTATSV1: 2452 case DIOCGETRULESETS: 2453 case DIOCGETRULESET: 2454 case DIOCNATLOOK: 2455 case DIOCRGETTABLES: 2456 case DIOCRGETTSTATS: 2457 case DIOCRGETADDRS: 2458 case DIOCRGETASTATS: 2459 case DIOCRTSTADDRS: 2460 case DIOCOSFPGET: 2461 case DIOCGETSRCNODES: 2462 case DIOCGETSYNCOOKIES: 2463 case DIOCIGETIFACES: 2464 case DIOCGIFSPEEDV1: 2465 case DIOCGIFSPEEDV0: 2466 case DIOCGETRULENV: 2467 case DIOCGETETHRULES: 2468 case DIOCGETETHRULE: 2469 break; 2470 case DIOCRCLRTABLES: 2471 case DIOCRADDTABLES: 2472 case DIOCRDELTABLES: 2473 case DIOCRCLRTSTATS: 2474 case DIOCRCLRADDRS: 2475 case DIOCRADDADDRS: 2476 case DIOCRDELADDRS: 2477 case DIOCRSETADDRS: 2478 case DIOCRSETTFLAGS: 2479 if (((struct pfioc_table *)addr)->pfrio_flags & 2480 PFR_FLAG_DUMMY) { 2481 flags |= FWRITE; /* need write lock for dummy */ 2482 break; /* dummy operation ok */ 2483 } 2484 return (EACCES); 2485 case DIOCGETRULE: 2486 if (((struct pfioc_rule *)addr)->action == 2487 PF_GET_CLR_CNTR) 2488 return (EACCES); 2489 break; 2490 default: 2491 return (EACCES); 2492 } 2493 2494 CURVNET_SET(TD_TO_VNET(td)); 2495 2496 switch (cmd) { 2497 case DIOCSTART: 2498 sx_xlock(&pf_ioctl_lock); 2499 if (V_pf_status.running) 2500 error = EEXIST; 2501 else { 2502 int cpu; 2503 2504 hook_pf(); 2505 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2506 hook_pf_eth(); 2507 V_pf_status.running = 1; 2508 V_pf_status.since = time_second; 2509 2510 CPU_FOREACH(cpu) 2511 V_pf_stateid[cpu] = time_second; 2512 2513 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2514 } 2515 break; 2516 2517 case DIOCSTOP: 2518 sx_xlock(&pf_ioctl_lock); 2519 if (!V_pf_status.running) 2520 error = ENOENT; 2521 else { 2522 V_pf_status.running = 0; 2523 dehook_pf(); 2524 dehook_pf_eth(); 2525 V_pf_status.since = time_second; 2526 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2527 } 2528 break; 2529 2530 case DIOCGETETHRULES: { 2531 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2532 nvlist_t *nvl; 2533 void *packed; 2534 struct pf_keth_rule *tail; 2535 struct pf_keth_ruleset *rs; 2536 u_int32_t ticket, nr; 2537 const char *anchor = ""; 2538 2539 nvl = NULL; 2540 packed = NULL; 2541 2542 #define ERROUT(x) do { error = (x); goto DIOCGETETHRULES_error; } while (0) 2543 2544 if (nv->len > pf_ioctl_maxcount) 2545 ERROUT(ENOMEM); 2546 2547 /* Copy the request in */ 2548 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2549 if (packed == NULL) 2550 ERROUT(ENOMEM); 2551 2552 error = copyin(nv->data, packed, nv->len); 2553 if (error) 2554 ERROUT(error); 2555 2556 nvl = nvlist_unpack(packed, nv->len, 0); 2557 if (nvl == NULL) 2558 ERROUT(EBADMSG); 2559 2560 if (! nvlist_exists_string(nvl, "anchor")) 2561 ERROUT(EBADMSG); 2562 2563 anchor = nvlist_get_string(nvl, "anchor"); 2564 2565 rs = pf_find_keth_ruleset(anchor); 2566 2567 nvlist_destroy(nvl); 2568 nvl = NULL; 2569 free(packed, M_NVLIST); 2570 packed = NULL; 2571 2572 if (rs == NULL) 2573 ERROUT(ENOENT); 2574 2575 /* Reply */ 2576 nvl = nvlist_create(0); 2577 if (nvl == NULL) 2578 ERROUT(ENOMEM); 2579 2580 PF_RULES_RLOCK(); 2581 2582 ticket = rs->active.ticket; 2583 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2584 if (tail) 2585 nr = tail->nr + 1; 2586 else 2587 nr = 0; 2588 2589 PF_RULES_RUNLOCK(); 2590 2591 nvlist_add_number(nvl, "ticket", ticket); 2592 nvlist_add_number(nvl, "nr", nr); 2593 2594 packed = nvlist_pack(nvl, &nv->len); 2595 if (packed == NULL) 2596 ERROUT(ENOMEM); 2597 2598 if (nv->size == 0) 2599 ERROUT(0); 2600 else if (nv->size < nv->len) 2601 ERROUT(ENOSPC); 2602 2603 error = copyout(packed, nv->data, nv->len); 2604 2605 #undef ERROUT 2606 DIOCGETETHRULES_error: 2607 free(packed, M_NVLIST); 2608 nvlist_destroy(nvl); 2609 break; 2610 } 2611 2612 case DIOCGETETHRULE: { 2613 struct epoch_tracker et; 2614 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2615 nvlist_t *nvl = NULL; 2616 void *nvlpacked = NULL; 2617 struct pf_keth_rule *rule = NULL; 2618 struct pf_keth_ruleset *rs; 2619 u_int32_t ticket, nr; 2620 bool clear = false; 2621 const char *anchor; 2622 2623 #define ERROUT(x) do { error = (x); goto DIOCGETETHRULE_error; } while (0) 2624 2625 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2626 if (nvlpacked == NULL) 2627 ERROUT(ENOMEM); 2628 2629 error = copyin(nv->data, nvlpacked, nv->len); 2630 if (error) 2631 ERROUT(error); 2632 2633 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2634 if (! nvlist_exists_number(nvl, "ticket")) 2635 ERROUT(EBADMSG); 2636 ticket = nvlist_get_number(nvl, "ticket"); 2637 if (! nvlist_exists_string(nvl, "anchor")) 2638 ERROUT(EBADMSG); 2639 anchor = nvlist_get_string(nvl, "anchor"); 2640 2641 if (nvlist_exists_bool(nvl, "clear")) 2642 clear = nvlist_get_bool(nvl, "clear"); 2643 2644 if (clear && !(flags & FWRITE)) 2645 ERROUT(EACCES); 2646 2647 if (! nvlist_exists_number(nvl, "nr")) 2648 ERROUT(EBADMSG); 2649 nr = nvlist_get_number(nvl, "nr"); 2650 2651 PF_RULES_RLOCK(); 2652 rs = pf_find_keth_ruleset(anchor); 2653 if (rs == NULL) { 2654 PF_RULES_RUNLOCK(); 2655 ERROUT(ENOENT); 2656 } 2657 if (ticket != rs->active.ticket) { 2658 PF_RULES_RUNLOCK(); 2659 ERROUT(EBUSY); 2660 } 2661 2662 nvlist_destroy(nvl); 2663 nvl = NULL; 2664 free(nvlpacked, M_TEMP); 2665 nvlpacked = NULL; 2666 2667 nvl = nvlist_create(0); 2668 2669 rule = TAILQ_FIRST(rs->active.rules); 2670 while ((rule != NULL) && (rule->nr != nr)) 2671 rule = TAILQ_NEXT(rule, entries); 2672 if (rule == NULL) { 2673 PF_RULES_RUNLOCK(); 2674 ERROUT(ENOENT); 2675 } 2676 /* Make sure rule can't go away. */ 2677 NET_EPOCH_ENTER(et); 2678 PF_RULES_RUNLOCK(); 2679 nvl = pf_keth_rule_to_nveth_rule(rule); 2680 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2681 ERROUT(EBUSY); 2682 NET_EPOCH_EXIT(et); 2683 if (nvl == NULL) 2684 ERROUT(ENOMEM); 2685 2686 nvlpacked = nvlist_pack(nvl, &nv->len); 2687 if (nvlpacked == NULL) 2688 ERROUT(ENOMEM); 2689 2690 if (nv->size == 0) 2691 ERROUT(0); 2692 else if (nv->size < nv->len) 2693 ERROUT(ENOSPC); 2694 2695 error = copyout(nvlpacked, nv->data, nv->len); 2696 if (error == 0 && clear) { 2697 counter_u64_zero(rule->evaluations); 2698 for (int i = 0; i < 2; i++) { 2699 counter_u64_zero(rule->packets[i]); 2700 counter_u64_zero(rule->bytes[i]); 2701 } 2702 } 2703 2704 #undef ERROUT 2705 DIOCGETETHRULE_error: 2706 free(nvlpacked, M_TEMP); 2707 nvlist_destroy(nvl); 2708 break; 2709 } 2710 2711 case DIOCADDETHRULE: { 2712 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2713 nvlist_t *nvl = NULL; 2714 void *nvlpacked = NULL; 2715 struct pf_keth_rule *rule = NULL, *tail = NULL; 2716 struct pf_keth_ruleset *ruleset = NULL; 2717 struct pfi_kkif *kif = NULL; 2718 const char *anchor = "", *anchor_call = ""; 2719 2720 #define ERROUT(x) do { error = (x); goto DIOCADDETHRULE_error; } while (0) 2721 2722 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2723 if (nvlpacked == NULL) 2724 ERROUT(ENOMEM); 2725 2726 error = copyin(nv->data, nvlpacked, nv->len); 2727 if (error) 2728 ERROUT(error); 2729 2730 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2731 if (nvl == NULL) 2732 ERROUT(EBADMSG); 2733 2734 if (! nvlist_exists_number(nvl, "ticket")) 2735 ERROUT(EBADMSG); 2736 2737 if (nvlist_exists_string(nvl, "anchor")) 2738 anchor = nvlist_get_string(nvl, "anchor"); 2739 if (nvlist_exists_string(nvl, "anchor_call")) 2740 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2741 2742 ruleset = pf_find_keth_ruleset(anchor); 2743 if (ruleset == NULL) 2744 ERROUT(EINVAL); 2745 2746 if (nvlist_get_number(nvl, "ticket") != 2747 ruleset->inactive.ticket) { 2748 DPFPRINTF(PF_DEBUG_MISC, 2749 ("ticket: %d != %d\n", 2750 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2751 ruleset->inactive.ticket)); 2752 ERROUT(EBUSY); 2753 } 2754 2755 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2756 if (rule == NULL) 2757 ERROUT(ENOMEM); 2758 2759 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2760 if (error != 0) 2761 ERROUT(error); 2762 2763 if (rule->ifname[0]) 2764 kif = pf_kkif_create(M_WAITOK); 2765 rule->evaluations = counter_u64_alloc(M_WAITOK); 2766 for (int i = 0; i < 2; i++) { 2767 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2768 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2769 } 2770 2771 PF_RULES_WLOCK(); 2772 2773 if (rule->ifname[0]) { 2774 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2775 pfi_kkif_ref(rule->kif); 2776 } else 2777 rule->kif = NULL; 2778 2779 #ifdef ALTQ 2780 /* set queue IDs */ 2781 if (rule->qname[0] != 0) { 2782 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2783 error = EBUSY; 2784 else 2785 rule->qid = rule->qid; 2786 } 2787 #endif 2788 if (rule->tagname[0]) 2789 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2790 error = EBUSY; 2791 2792 if (error) { 2793 pf_free_eth_rule(rule); 2794 PF_RULES_WUNLOCK(); 2795 ERROUT(error); 2796 } 2797 2798 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2799 pf_free_eth_rule(rule); 2800 PF_RULES_WUNLOCK(); 2801 ERROUT(EINVAL); 2802 } 2803 2804 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2805 if (tail) 2806 rule->nr = tail->nr + 1; 2807 else 2808 rule->nr = 0; 2809 2810 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2811 2812 PF_RULES_WUNLOCK(); 2813 2814 #undef ERROUT 2815 DIOCADDETHRULE_error: 2816 nvlist_destroy(nvl); 2817 free(nvlpacked, M_TEMP); 2818 break; 2819 } 2820 2821 case DIOCADDRULENV: { 2822 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2823 nvlist_t *nvl = NULL; 2824 void *nvlpacked = NULL; 2825 struct pf_krule *rule = NULL; 2826 const char *anchor = "", *anchor_call = ""; 2827 uint32_t ticket = 0, pool_ticket = 0; 2828 2829 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 2830 2831 if (nv->len > pf_ioctl_maxcount) 2832 ERROUT(ENOMEM); 2833 2834 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2835 error = copyin(nv->data, nvlpacked, nv->len); 2836 if (error) 2837 ERROUT(error); 2838 2839 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2840 if (nvl == NULL) 2841 ERROUT(EBADMSG); 2842 2843 if (! nvlist_exists_number(nvl, "ticket")) 2844 ERROUT(EINVAL); 2845 ticket = nvlist_get_number(nvl, "ticket"); 2846 2847 if (! nvlist_exists_number(nvl, "pool_ticket")) 2848 ERROUT(EINVAL); 2849 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 2850 2851 if (! nvlist_exists_nvlist(nvl, "rule")) 2852 ERROUT(EINVAL); 2853 2854 rule = pf_krule_alloc(); 2855 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 2856 rule); 2857 if (error) 2858 ERROUT(error); 2859 2860 if (nvlist_exists_string(nvl, "anchor")) 2861 anchor = nvlist_get_string(nvl, "anchor"); 2862 if (nvlist_exists_string(nvl, "anchor_call")) 2863 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2864 2865 if ((error = nvlist_error(nvl))) 2866 ERROUT(error); 2867 2868 /* Frees rule on error */ 2869 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 2870 anchor_call, td); 2871 2872 nvlist_destroy(nvl); 2873 free(nvlpacked, M_TEMP); 2874 break; 2875 #undef ERROUT 2876 DIOCADDRULENV_error: 2877 pf_krule_free(rule); 2878 nvlist_destroy(nvl); 2879 free(nvlpacked, M_TEMP); 2880 2881 break; 2882 } 2883 case DIOCADDRULE: { 2884 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2885 struct pf_krule *rule; 2886 2887 rule = pf_krule_alloc(); 2888 error = pf_rule_to_krule(&pr->rule, rule); 2889 if (error != 0) { 2890 pf_krule_free(rule); 2891 break; 2892 } 2893 2894 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2895 2896 /* Frees rule on error */ 2897 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 2898 pr->anchor, pr->anchor_call, td); 2899 break; 2900 } 2901 2902 case DIOCGETRULES: { 2903 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2904 struct pf_kruleset *ruleset; 2905 struct pf_krule *tail; 2906 int rs_num; 2907 2908 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2909 2910 PF_RULES_WLOCK(); 2911 ruleset = pf_find_kruleset(pr->anchor); 2912 if (ruleset == NULL) { 2913 PF_RULES_WUNLOCK(); 2914 error = EINVAL; 2915 break; 2916 } 2917 rs_num = pf_get_ruleset_number(pr->rule.action); 2918 if (rs_num >= PF_RULESET_MAX) { 2919 PF_RULES_WUNLOCK(); 2920 error = EINVAL; 2921 break; 2922 } 2923 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2924 pf_krulequeue); 2925 if (tail) 2926 pr->nr = tail->nr + 1; 2927 else 2928 pr->nr = 0; 2929 pr->ticket = ruleset->rules[rs_num].active.ticket; 2930 PF_RULES_WUNLOCK(); 2931 break; 2932 } 2933 2934 case DIOCGETRULE: { 2935 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2936 struct pf_kruleset *ruleset; 2937 struct pf_krule *rule; 2938 int rs_num; 2939 2940 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2941 2942 PF_RULES_WLOCK(); 2943 ruleset = pf_find_kruleset(pr->anchor); 2944 if (ruleset == NULL) { 2945 PF_RULES_WUNLOCK(); 2946 error = EINVAL; 2947 break; 2948 } 2949 rs_num = pf_get_ruleset_number(pr->rule.action); 2950 if (rs_num >= PF_RULESET_MAX) { 2951 PF_RULES_WUNLOCK(); 2952 error = EINVAL; 2953 break; 2954 } 2955 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 2956 PF_RULES_WUNLOCK(); 2957 error = EBUSY; 2958 break; 2959 } 2960 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 2961 while ((rule != NULL) && (rule->nr != pr->nr)) 2962 rule = TAILQ_NEXT(rule, entries); 2963 if (rule == NULL) { 2964 PF_RULES_WUNLOCK(); 2965 error = EBUSY; 2966 break; 2967 } 2968 2969 pf_krule_to_rule(rule, &pr->rule); 2970 2971 if (pf_kanchor_copyout(ruleset, rule, pr)) { 2972 PF_RULES_WUNLOCK(); 2973 error = EBUSY; 2974 break; 2975 } 2976 pf_addr_copyout(&pr->rule.src.addr); 2977 pf_addr_copyout(&pr->rule.dst.addr); 2978 2979 if (pr->action == PF_GET_CLR_CNTR) { 2980 pf_counter_u64_zero(&rule->evaluations); 2981 for (int i = 0; i < 2; i++) { 2982 pf_counter_u64_zero(&rule->packets[i]); 2983 pf_counter_u64_zero(&rule->bytes[i]); 2984 } 2985 counter_u64_zero(rule->states_tot); 2986 } 2987 PF_RULES_WUNLOCK(); 2988 break; 2989 } 2990 2991 case DIOCGETRULENV: { 2992 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2993 nvlist_t *nvrule = NULL; 2994 nvlist_t *nvl = NULL; 2995 struct pf_kruleset *ruleset; 2996 struct pf_krule *rule; 2997 void *nvlpacked = NULL; 2998 int rs_num, nr; 2999 bool clear_counter = false; 3000 3001 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3002 3003 if (nv->len > pf_ioctl_maxcount) 3004 ERROUT(ENOMEM); 3005 3006 /* Copy the request in */ 3007 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3008 if (nvlpacked == NULL) 3009 ERROUT(ENOMEM); 3010 3011 error = copyin(nv->data, nvlpacked, nv->len); 3012 if (error) 3013 ERROUT(error); 3014 3015 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3016 if (nvl == NULL) 3017 ERROUT(EBADMSG); 3018 3019 if (! nvlist_exists_string(nvl, "anchor")) 3020 ERROUT(EBADMSG); 3021 if (! nvlist_exists_number(nvl, "ruleset")) 3022 ERROUT(EBADMSG); 3023 if (! nvlist_exists_number(nvl, "ticket")) 3024 ERROUT(EBADMSG); 3025 if (! nvlist_exists_number(nvl, "nr")) 3026 ERROUT(EBADMSG); 3027 3028 if (nvlist_exists_bool(nvl, "clear_counter")) 3029 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3030 3031 if (clear_counter && !(flags & FWRITE)) 3032 ERROUT(EACCES); 3033 3034 nr = nvlist_get_number(nvl, "nr"); 3035 3036 PF_RULES_WLOCK(); 3037 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3038 if (ruleset == NULL) { 3039 PF_RULES_WUNLOCK(); 3040 ERROUT(ENOENT); 3041 } 3042 3043 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3044 if (rs_num >= PF_RULESET_MAX) { 3045 PF_RULES_WUNLOCK(); 3046 ERROUT(EINVAL); 3047 } 3048 3049 if (nvlist_get_number(nvl, "ticket") != 3050 ruleset->rules[rs_num].active.ticket) { 3051 PF_RULES_WUNLOCK(); 3052 ERROUT(EBUSY); 3053 } 3054 3055 if ((error = nvlist_error(nvl))) { 3056 PF_RULES_WUNLOCK(); 3057 ERROUT(error); 3058 } 3059 3060 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3061 while ((rule != NULL) && (rule->nr != nr)) 3062 rule = TAILQ_NEXT(rule, entries); 3063 if (rule == NULL) { 3064 PF_RULES_WUNLOCK(); 3065 ERROUT(EBUSY); 3066 } 3067 3068 nvrule = pf_krule_to_nvrule(rule); 3069 3070 nvlist_destroy(nvl); 3071 nvl = nvlist_create(0); 3072 if (nvl == NULL) { 3073 PF_RULES_WUNLOCK(); 3074 ERROUT(ENOMEM); 3075 } 3076 nvlist_add_number(nvl, "nr", nr); 3077 nvlist_add_nvlist(nvl, "rule", nvrule); 3078 nvlist_destroy(nvrule); 3079 nvrule = NULL; 3080 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3081 PF_RULES_WUNLOCK(); 3082 ERROUT(EBUSY); 3083 } 3084 3085 free(nvlpacked, M_NVLIST); 3086 nvlpacked = nvlist_pack(nvl, &nv->len); 3087 if (nvlpacked == NULL) { 3088 PF_RULES_WUNLOCK(); 3089 ERROUT(ENOMEM); 3090 } 3091 3092 if (nv->size == 0) { 3093 PF_RULES_WUNLOCK(); 3094 ERROUT(0); 3095 } 3096 else if (nv->size < nv->len) { 3097 PF_RULES_WUNLOCK(); 3098 ERROUT(ENOSPC); 3099 } 3100 3101 if (clear_counter) { 3102 pf_counter_u64_zero(&rule->evaluations); 3103 for (int i = 0; i < 2; i++) { 3104 pf_counter_u64_zero(&rule->packets[i]); 3105 pf_counter_u64_zero(&rule->bytes[i]); 3106 } 3107 counter_u64_zero(rule->states_tot); 3108 } 3109 PF_RULES_WUNLOCK(); 3110 3111 error = copyout(nvlpacked, nv->data, nv->len); 3112 3113 #undef ERROUT 3114 DIOCGETRULENV_error: 3115 free(nvlpacked, M_NVLIST); 3116 nvlist_destroy(nvrule); 3117 nvlist_destroy(nvl); 3118 3119 break; 3120 } 3121 3122 case DIOCCHANGERULE: { 3123 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3124 struct pf_kruleset *ruleset; 3125 struct pf_krule *oldrule = NULL, *newrule = NULL; 3126 struct pfi_kkif *kif = NULL; 3127 struct pf_kpooladdr *pa; 3128 u_int32_t nr = 0; 3129 int rs_num; 3130 3131 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3132 3133 if (pcr->action < PF_CHANGE_ADD_HEAD || 3134 pcr->action > PF_CHANGE_GET_TICKET) { 3135 error = EINVAL; 3136 break; 3137 } 3138 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3139 error = EINVAL; 3140 break; 3141 } 3142 3143 if (pcr->action != PF_CHANGE_REMOVE) { 3144 newrule = pf_krule_alloc(); 3145 error = pf_rule_to_krule(&pcr->rule, newrule); 3146 if (error != 0) { 3147 free(newrule, M_PFRULE); 3148 break; 3149 } 3150 3151 if (newrule->ifname[0]) 3152 kif = pf_kkif_create(M_WAITOK); 3153 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3154 for (int i = 0; i < 2; i++) { 3155 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3156 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3157 } 3158 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3159 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3160 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3161 newrule->cuid = td->td_ucred->cr_ruid; 3162 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3163 TAILQ_INIT(&newrule->rpool.list); 3164 } 3165 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; } 3166 3167 PF_RULES_WLOCK(); 3168 #ifdef PF_WANT_32_TO_64_COUNTER 3169 if (newrule != NULL) { 3170 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3171 newrule->allrulelinked = true; 3172 V_pf_allrulecount++; 3173 } 3174 #endif 3175 3176 if (!(pcr->action == PF_CHANGE_REMOVE || 3177 pcr->action == PF_CHANGE_GET_TICKET) && 3178 pcr->pool_ticket != V_ticket_pabuf) 3179 ERROUT(EBUSY); 3180 3181 ruleset = pf_find_kruleset(pcr->anchor); 3182 if (ruleset == NULL) 3183 ERROUT(EINVAL); 3184 3185 rs_num = pf_get_ruleset_number(pcr->rule.action); 3186 if (rs_num >= PF_RULESET_MAX) 3187 ERROUT(EINVAL); 3188 3189 if (pcr->action == PF_CHANGE_GET_TICKET) { 3190 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3191 ERROUT(0); 3192 } else if (pcr->ticket != 3193 ruleset->rules[rs_num].active.ticket) 3194 ERROUT(EINVAL); 3195 3196 if (pcr->action != PF_CHANGE_REMOVE) { 3197 if (newrule->ifname[0]) { 3198 newrule->kif = pfi_kkif_attach(kif, 3199 newrule->ifname); 3200 kif = NULL; 3201 pfi_kkif_ref(newrule->kif); 3202 } else 3203 newrule->kif = NULL; 3204 3205 if (newrule->rtableid > 0 && 3206 newrule->rtableid >= rt_numfibs) 3207 error = EBUSY; 3208 3209 #ifdef ALTQ 3210 /* set queue IDs */ 3211 if (newrule->qname[0] != 0) { 3212 if ((newrule->qid = 3213 pf_qname2qid(newrule->qname)) == 0) 3214 error = EBUSY; 3215 else if (newrule->pqname[0] != 0) { 3216 if ((newrule->pqid = 3217 pf_qname2qid(newrule->pqname)) == 0) 3218 error = EBUSY; 3219 } else 3220 newrule->pqid = newrule->qid; 3221 } 3222 #endif /* ALTQ */ 3223 if (newrule->tagname[0]) 3224 if ((newrule->tag = 3225 pf_tagname2tag(newrule->tagname)) == 0) 3226 error = EBUSY; 3227 if (newrule->match_tagname[0]) 3228 if ((newrule->match_tag = pf_tagname2tag( 3229 newrule->match_tagname)) == 0) 3230 error = EBUSY; 3231 if (newrule->rt && !newrule->direction) 3232 error = EINVAL; 3233 if (!newrule->log) 3234 newrule->logif = 0; 3235 if (newrule->logif >= PFLOGIFS_MAX) 3236 error = EINVAL; 3237 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3238 error = ENOMEM; 3239 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3240 error = ENOMEM; 3241 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3242 error = EINVAL; 3243 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3244 if (pa->addr.type == PF_ADDR_TABLE) { 3245 pa->addr.p.tbl = 3246 pfr_attach_table(ruleset, 3247 pa->addr.v.tblname); 3248 if (pa->addr.p.tbl == NULL) 3249 error = ENOMEM; 3250 } 3251 3252 newrule->overload_tbl = NULL; 3253 if (newrule->overload_tblname[0]) { 3254 if ((newrule->overload_tbl = pfr_attach_table( 3255 ruleset, newrule->overload_tblname)) == 3256 NULL) 3257 error = EINVAL; 3258 else 3259 newrule->overload_tbl->pfrkt_flags |= 3260 PFR_TFLAG_ACTIVE; 3261 } 3262 3263 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3264 if (((((newrule->action == PF_NAT) || 3265 (newrule->action == PF_RDR) || 3266 (newrule->action == PF_BINAT) || 3267 (newrule->rt > PF_NOPFROUTE)) && 3268 !newrule->anchor)) && 3269 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3270 error = EINVAL; 3271 3272 if (error) { 3273 pf_free_rule(newrule); 3274 PF_RULES_WUNLOCK(); 3275 break; 3276 } 3277 3278 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3279 } 3280 pf_empty_kpool(&V_pf_pabuf); 3281 3282 if (pcr->action == PF_CHANGE_ADD_HEAD) 3283 oldrule = TAILQ_FIRST( 3284 ruleset->rules[rs_num].active.ptr); 3285 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3286 oldrule = TAILQ_LAST( 3287 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3288 else { 3289 oldrule = TAILQ_FIRST( 3290 ruleset->rules[rs_num].active.ptr); 3291 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3292 oldrule = TAILQ_NEXT(oldrule, entries); 3293 if (oldrule == NULL) { 3294 if (newrule != NULL) 3295 pf_free_rule(newrule); 3296 PF_RULES_WUNLOCK(); 3297 error = EINVAL; 3298 break; 3299 } 3300 } 3301 3302 if (pcr->action == PF_CHANGE_REMOVE) { 3303 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3304 oldrule); 3305 ruleset->rules[rs_num].active.rcount--; 3306 } else { 3307 if (oldrule == NULL) 3308 TAILQ_INSERT_TAIL( 3309 ruleset->rules[rs_num].active.ptr, 3310 newrule, entries); 3311 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3312 pcr->action == PF_CHANGE_ADD_BEFORE) 3313 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3314 else 3315 TAILQ_INSERT_AFTER( 3316 ruleset->rules[rs_num].active.ptr, 3317 oldrule, newrule, entries); 3318 ruleset->rules[rs_num].active.rcount++; 3319 } 3320 3321 nr = 0; 3322 TAILQ_FOREACH(oldrule, 3323 ruleset->rules[rs_num].active.ptr, entries) 3324 oldrule->nr = nr++; 3325 3326 ruleset->rules[rs_num].active.ticket++; 3327 3328 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3329 pf_remove_if_empty_kruleset(ruleset); 3330 3331 PF_RULES_WUNLOCK(); 3332 break; 3333 3334 #undef ERROUT 3335 DIOCCHANGERULE_error: 3336 PF_RULES_WUNLOCK(); 3337 pf_krule_free(newrule); 3338 pf_kkif_free(kif); 3339 break; 3340 } 3341 3342 case DIOCCLRSTATES: { 3343 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3344 struct pf_kstate_kill kill; 3345 3346 error = pf_state_kill_to_kstate_kill(psk, &kill); 3347 if (error) 3348 break; 3349 3350 psk->psk_killed = pf_clear_states(&kill); 3351 break; 3352 } 3353 3354 case DIOCCLRSTATESNV: { 3355 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3356 break; 3357 } 3358 3359 case DIOCKILLSTATES: { 3360 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3361 struct pf_kstate_kill kill; 3362 3363 error = pf_state_kill_to_kstate_kill(psk, &kill); 3364 if (error) 3365 break; 3366 3367 psk->psk_killed = 0; 3368 pf_killstates(&kill, &psk->psk_killed); 3369 break; 3370 } 3371 3372 case DIOCKILLSTATESNV: { 3373 error = pf_killstates_nv((struct pfioc_nv *)addr); 3374 break; 3375 } 3376 3377 case DIOCADDSTATE: { 3378 struct pfioc_state *ps = (struct pfioc_state *)addr; 3379 struct pfsync_state *sp = &ps->state; 3380 3381 if (sp->timeout >= PFTM_MAX) { 3382 error = EINVAL; 3383 break; 3384 } 3385 if (V_pfsync_state_import_ptr != NULL) { 3386 PF_RULES_RLOCK(); 3387 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 3388 PF_RULES_RUNLOCK(); 3389 } else 3390 error = EOPNOTSUPP; 3391 break; 3392 } 3393 3394 case DIOCGETSTATE: { 3395 struct pfioc_state *ps = (struct pfioc_state *)addr; 3396 struct pf_kstate *s; 3397 3398 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3399 if (s == NULL) { 3400 error = ENOENT; 3401 break; 3402 } 3403 3404 pfsync_state_export(&ps->state, s); 3405 PF_STATE_UNLOCK(s); 3406 break; 3407 } 3408 3409 case DIOCGETSTATENV: { 3410 error = pf_getstate((struct pfioc_nv *)addr); 3411 break; 3412 } 3413 3414 case DIOCGETSTATES: { 3415 struct pfioc_states *ps = (struct pfioc_states *)addr; 3416 struct pf_kstate *s; 3417 struct pfsync_state *pstore, *p; 3418 int i, nr; 3419 size_t slice_count = 16, count; 3420 void *out; 3421 3422 if (ps->ps_len <= 0) { 3423 nr = uma_zone_get_cur(V_pf_state_z); 3424 ps->ps_len = sizeof(struct pfsync_state) * nr; 3425 break; 3426 } 3427 3428 out = ps->ps_states; 3429 pstore = mallocarray(slice_count, 3430 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 3431 nr = 0; 3432 3433 for (i = 0; i <= pf_hashmask; i++) { 3434 struct pf_idhash *ih = &V_pf_idhash[i]; 3435 3436 DIOCGETSTATES_retry: 3437 p = pstore; 3438 3439 if (LIST_EMPTY(&ih->states)) 3440 continue; 3441 3442 PF_HASHROW_LOCK(ih); 3443 count = 0; 3444 LIST_FOREACH(s, &ih->states, entry) { 3445 if (s->timeout == PFTM_UNLINKED) 3446 continue; 3447 count++; 3448 } 3449 3450 if (count > slice_count) { 3451 PF_HASHROW_UNLOCK(ih); 3452 free(pstore, M_TEMP); 3453 slice_count = count * 2; 3454 pstore = mallocarray(slice_count, 3455 sizeof(struct pfsync_state), M_TEMP, 3456 M_WAITOK | M_ZERO); 3457 goto DIOCGETSTATES_retry; 3458 } 3459 3460 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3461 PF_HASHROW_UNLOCK(ih); 3462 goto DIOCGETSTATES_full; 3463 } 3464 3465 LIST_FOREACH(s, &ih->states, entry) { 3466 if (s->timeout == PFTM_UNLINKED) 3467 continue; 3468 3469 pfsync_state_export(p, s); 3470 p++; 3471 nr++; 3472 } 3473 PF_HASHROW_UNLOCK(ih); 3474 error = copyout(pstore, out, 3475 sizeof(struct pfsync_state) * count); 3476 if (error) 3477 break; 3478 out = ps->ps_states + nr; 3479 } 3480 DIOCGETSTATES_full: 3481 ps->ps_len = sizeof(struct pfsync_state) * nr; 3482 free(pstore, M_TEMP); 3483 3484 break; 3485 } 3486 3487 case DIOCGETSTATESV2: { 3488 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3489 struct pf_kstate *s; 3490 struct pf_state_export *pstore, *p; 3491 int i, nr; 3492 size_t slice_count = 16, count; 3493 void *out; 3494 3495 if (ps->ps_req_version > PF_STATE_VERSION) { 3496 error = ENOTSUP; 3497 break; 3498 } 3499 3500 if (ps->ps_len <= 0) { 3501 nr = uma_zone_get_cur(V_pf_state_z); 3502 ps->ps_len = sizeof(struct pf_state_export) * nr; 3503 break; 3504 } 3505 3506 out = ps->ps_states; 3507 pstore = mallocarray(slice_count, 3508 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3509 nr = 0; 3510 3511 for (i = 0; i <= pf_hashmask; i++) { 3512 struct pf_idhash *ih = &V_pf_idhash[i]; 3513 3514 DIOCGETSTATESV2_retry: 3515 p = pstore; 3516 3517 if (LIST_EMPTY(&ih->states)) 3518 continue; 3519 3520 PF_HASHROW_LOCK(ih); 3521 count = 0; 3522 LIST_FOREACH(s, &ih->states, entry) { 3523 if (s->timeout == PFTM_UNLINKED) 3524 continue; 3525 count++; 3526 } 3527 3528 if (count > slice_count) { 3529 PF_HASHROW_UNLOCK(ih); 3530 free(pstore, M_TEMP); 3531 slice_count = count * 2; 3532 pstore = mallocarray(slice_count, 3533 sizeof(struct pf_state_export), M_TEMP, 3534 M_WAITOK | M_ZERO); 3535 goto DIOCGETSTATESV2_retry; 3536 } 3537 3538 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3539 PF_HASHROW_UNLOCK(ih); 3540 goto DIOCGETSTATESV2_full; 3541 } 3542 3543 LIST_FOREACH(s, &ih->states, entry) { 3544 if (s->timeout == PFTM_UNLINKED) 3545 continue; 3546 3547 pf_state_export(p, s); 3548 p++; 3549 nr++; 3550 } 3551 PF_HASHROW_UNLOCK(ih); 3552 error = copyout(pstore, out, 3553 sizeof(struct pf_state_export) * count); 3554 if (error) 3555 break; 3556 out = ps->ps_states + nr; 3557 } 3558 DIOCGETSTATESV2_full: 3559 ps->ps_len = nr * sizeof(struct pf_state_export); 3560 free(pstore, M_TEMP); 3561 3562 break; 3563 } 3564 3565 case DIOCGETSTATUS: { 3566 struct pf_status *s = (struct pf_status *)addr; 3567 3568 PF_RULES_RLOCK(); 3569 s->running = V_pf_status.running; 3570 s->since = V_pf_status.since; 3571 s->debug = V_pf_status.debug; 3572 s->hostid = V_pf_status.hostid; 3573 s->states = V_pf_status.states; 3574 s->src_nodes = V_pf_status.src_nodes; 3575 3576 for (int i = 0; i < PFRES_MAX; i++) 3577 s->counters[i] = 3578 counter_u64_fetch(V_pf_status.counters[i]); 3579 for (int i = 0; i < LCNT_MAX; i++) 3580 s->lcounters[i] = 3581 counter_u64_fetch(V_pf_status.lcounters[i]); 3582 for (int i = 0; i < FCNT_MAX; i++) 3583 s->fcounters[i] = 3584 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3585 for (int i = 0; i < SCNT_MAX; i++) 3586 s->scounters[i] = 3587 counter_u64_fetch(V_pf_status.scounters[i]); 3588 3589 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3590 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3591 PF_MD5_DIGEST_LENGTH); 3592 3593 pfi_update_status(s->ifname, s); 3594 PF_RULES_RUNLOCK(); 3595 break; 3596 } 3597 3598 case DIOCGETSTATUSNV: { 3599 error = pf_getstatus((struct pfioc_nv *)addr); 3600 break; 3601 } 3602 3603 case DIOCSETSTATUSIF: { 3604 struct pfioc_if *pi = (struct pfioc_if *)addr; 3605 3606 if (pi->ifname[0] == 0) { 3607 bzero(V_pf_status.ifname, IFNAMSIZ); 3608 break; 3609 } 3610 PF_RULES_WLOCK(); 3611 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3612 PF_RULES_WUNLOCK(); 3613 break; 3614 } 3615 3616 case DIOCCLRSTATUS: { 3617 PF_RULES_WLOCK(); 3618 for (int i = 0; i < PFRES_MAX; i++) 3619 counter_u64_zero(V_pf_status.counters[i]); 3620 for (int i = 0; i < FCNT_MAX; i++) 3621 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3622 for (int i = 0; i < SCNT_MAX; i++) 3623 counter_u64_zero(V_pf_status.scounters[i]); 3624 for (int i = 0; i < KLCNT_MAX; i++) 3625 counter_u64_zero(V_pf_status.lcounters[i]); 3626 V_pf_status.since = time_second; 3627 if (*V_pf_status.ifname) 3628 pfi_update_status(V_pf_status.ifname, NULL); 3629 PF_RULES_WUNLOCK(); 3630 break; 3631 } 3632 3633 case DIOCNATLOOK: { 3634 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3635 struct pf_state_key *sk; 3636 struct pf_kstate *state; 3637 struct pf_state_key_cmp key; 3638 int m = 0, direction = pnl->direction; 3639 int sidx, didx; 3640 3641 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3642 sidx = (direction == PF_IN) ? 1 : 0; 3643 didx = (direction == PF_IN) ? 0 : 1; 3644 3645 if (!pnl->proto || 3646 PF_AZERO(&pnl->saddr, pnl->af) || 3647 PF_AZERO(&pnl->daddr, pnl->af) || 3648 ((pnl->proto == IPPROTO_TCP || 3649 pnl->proto == IPPROTO_UDP) && 3650 (!pnl->dport || !pnl->sport))) 3651 error = EINVAL; 3652 else { 3653 bzero(&key, sizeof(key)); 3654 key.af = pnl->af; 3655 key.proto = pnl->proto; 3656 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3657 key.port[sidx] = pnl->sport; 3658 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3659 key.port[didx] = pnl->dport; 3660 3661 state = pf_find_state_all(&key, direction, &m); 3662 if (state == NULL) { 3663 error = ENOENT; 3664 } else { 3665 if (m > 1) { 3666 PF_STATE_UNLOCK(state); 3667 error = E2BIG; /* more than one state */ 3668 } else { 3669 sk = state->key[sidx]; 3670 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3671 pnl->rsport = sk->port[sidx]; 3672 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3673 pnl->rdport = sk->port[didx]; 3674 PF_STATE_UNLOCK(state); 3675 } 3676 } 3677 } 3678 break; 3679 } 3680 3681 case DIOCSETTIMEOUT: { 3682 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3683 int old; 3684 3685 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3686 pt->seconds < 0) { 3687 error = EINVAL; 3688 break; 3689 } 3690 PF_RULES_WLOCK(); 3691 old = V_pf_default_rule.timeout[pt->timeout]; 3692 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3693 pt->seconds = 1; 3694 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3695 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3696 wakeup(pf_purge_thread); 3697 pt->seconds = old; 3698 PF_RULES_WUNLOCK(); 3699 break; 3700 } 3701 3702 case DIOCGETTIMEOUT: { 3703 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3704 3705 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3706 error = EINVAL; 3707 break; 3708 } 3709 PF_RULES_RLOCK(); 3710 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3711 PF_RULES_RUNLOCK(); 3712 break; 3713 } 3714 3715 case DIOCGETLIMIT: { 3716 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3717 3718 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3719 error = EINVAL; 3720 break; 3721 } 3722 PF_RULES_RLOCK(); 3723 pl->limit = V_pf_limits[pl->index].limit; 3724 PF_RULES_RUNLOCK(); 3725 break; 3726 } 3727 3728 case DIOCSETLIMIT: { 3729 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3730 int old_limit; 3731 3732 PF_RULES_WLOCK(); 3733 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3734 V_pf_limits[pl->index].zone == NULL) { 3735 PF_RULES_WUNLOCK(); 3736 error = EINVAL; 3737 break; 3738 } 3739 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3740 old_limit = V_pf_limits[pl->index].limit; 3741 V_pf_limits[pl->index].limit = pl->limit; 3742 pl->limit = old_limit; 3743 PF_RULES_WUNLOCK(); 3744 break; 3745 } 3746 3747 case DIOCSETDEBUG: { 3748 u_int32_t *level = (u_int32_t *)addr; 3749 3750 PF_RULES_WLOCK(); 3751 V_pf_status.debug = *level; 3752 PF_RULES_WUNLOCK(); 3753 break; 3754 } 3755 3756 case DIOCCLRRULECTRS: { 3757 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3758 struct pf_kruleset *ruleset = &pf_main_ruleset; 3759 struct pf_krule *rule; 3760 3761 PF_RULES_WLOCK(); 3762 TAILQ_FOREACH(rule, 3763 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3764 pf_counter_u64_zero(&rule->evaluations); 3765 for (int i = 0; i < 2; i++) { 3766 pf_counter_u64_zero(&rule->packets[i]); 3767 pf_counter_u64_zero(&rule->bytes[i]); 3768 } 3769 } 3770 PF_RULES_WUNLOCK(); 3771 break; 3772 } 3773 3774 case DIOCGIFSPEEDV0: 3775 case DIOCGIFSPEEDV1: { 3776 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3777 struct pf_ifspeed_v1 ps; 3778 struct ifnet *ifp; 3779 3780 if (psp->ifname[0] == '\0') { 3781 error = EINVAL; 3782 break; 3783 } 3784 3785 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3786 if (error != 0) 3787 break; 3788 ifp = ifunit(ps.ifname); 3789 if (ifp != NULL) { 3790 psp->baudrate32 = 3791 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 3792 if (cmd == DIOCGIFSPEEDV1) 3793 psp->baudrate = ifp->if_baudrate; 3794 } else { 3795 error = EINVAL; 3796 } 3797 break; 3798 } 3799 3800 #ifdef ALTQ 3801 case DIOCSTARTALTQ: { 3802 struct pf_altq *altq; 3803 3804 PF_RULES_WLOCK(); 3805 /* enable all altq interfaces on active list */ 3806 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3807 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3808 error = pf_enable_altq(altq); 3809 if (error != 0) 3810 break; 3811 } 3812 } 3813 if (error == 0) 3814 V_pf_altq_running = 1; 3815 PF_RULES_WUNLOCK(); 3816 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 3817 break; 3818 } 3819 3820 case DIOCSTOPALTQ: { 3821 struct pf_altq *altq; 3822 3823 PF_RULES_WLOCK(); 3824 /* disable all altq interfaces on active list */ 3825 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3826 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3827 error = pf_disable_altq(altq); 3828 if (error != 0) 3829 break; 3830 } 3831 } 3832 if (error == 0) 3833 V_pf_altq_running = 0; 3834 PF_RULES_WUNLOCK(); 3835 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 3836 break; 3837 } 3838 3839 case DIOCADDALTQV0: 3840 case DIOCADDALTQV1: { 3841 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3842 struct pf_altq *altq, *a; 3843 struct ifnet *ifp; 3844 3845 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 3846 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 3847 if (error) 3848 break; 3849 altq->local_flags = 0; 3850 3851 PF_RULES_WLOCK(); 3852 if (pa->ticket != V_ticket_altqs_inactive) { 3853 PF_RULES_WUNLOCK(); 3854 free(altq, M_PFALTQ); 3855 error = EBUSY; 3856 break; 3857 } 3858 3859 /* 3860 * if this is for a queue, find the discipline and 3861 * copy the necessary fields 3862 */ 3863 if (altq->qname[0] != 0) { 3864 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 3865 PF_RULES_WUNLOCK(); 3866 error = EBUSY; 3867 free(altq, M_PFALTQ); 3868 break; 3869 } 3870 altq->altq_disc = NULL; 3871 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 3872 if (strncmp(a->ifname, altq->ifname, 3873 IFNAMSIZ) == 0) { 3874 altq->altq_disc = a->altq_disc; 3875 break; 3876 } 3877 } 3878 } 3879 3880 if ((ifp = ifunit(altq->ifname)) == NULL) 3881 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 3882 else 3883 error = altq_add(ifp, altq); 3884 3885 if (error) { 3886 PF_RULES_WUNLOCK(); 3887 free(altq, M_PFALTQ); 3888 break; 3889 } 3890 3891 if (altq->qname[0] != 0) 3892 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 3893 else 3894 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 3895 /* version error check done on import above */ 3896 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3897 PF_RULES_WUNLOCK(); 3898 break; 3899 } 3900 3901 case DIOCGETALTQSV0: 3902 case DIOCGETALTQSV1: { 3903 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3904 struct pf_altq *altq; 3905 3906 PF_RULES_RLOCK(); 3907 pa->nr = 0; 3908 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 3909 pa->nr++; 3910 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 3911 pa->nr++; 3912 pa->ticket = V_ticket_altqs_active; 3913 PF_RULES_RUNLOCK(); 3914 break; 3915 } 3916 3917 case DIOCGETALTQV0: 3918 case DIOCGETALTQV1: { 3919 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3920 struct pf_altq *altq; 3921 3922 PF_RULES_RLOCK(); 3923 if (pa->ticket != V_ticket_altqs_active) { 3924 PF_RULES_RUNLOCK(); 3925 error = EBUSY; 3926 break; 3927 } 3928 altq = pf_altq_get_nth_active(pa->nr); 3929 if (altq == NULL) { 3930 PF_RULES_RUNLOCK(); 3931 error = EBUSY; 3932 break; 3933 } 3934 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3935 PF_RULES_RUNLOCK(); 3936 break; 3937 } 3938 3939 case DIOCCHANGEALTQV0: 3940 case DIOCCHANGEALTQV1: 3941 /* CHANGEALTQ not supported yet! */ 3942 error = ENODEV; 3943 break; 3944 3945 case DIOCGETQSTATSV0: 3946 case DIOCGETQSTATSV1: { 3947 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 3948 struct pf_altq *altq; 3949 int nbytes; 3950 u_int32_t version; 3951 3952 PF_RULES_RLOCK(); 3953 if (pq->ticket != V_ticket_altqs_active) { 3954 PF_RULES_RUNLOCK(); 3955 error = EBUSY; 3956 break; 3957 } 3958 nbytes = pq->nbytes; 3959 altq = pf_altq_get_nth_active(pq->nr); 3960 if (altq == NULL) { 3961 PF_RULES_RUNLOCK(); 3962 error = EBUSY; 3963 break; 3964 } 3965 3966 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 3967 PF_RULES_RUNLOCK(); 3968 error = ENXIO; 3969 break; 3970 } 3971 PF_RULES_RUNLOCK(); 3972 if (cmd == DIOCGETQSTATSV0) 3973 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 3974 else 3975 version = pq->version; 3976 error = altq_getqstats(altq, pq->buf, &nbytes, version); 3977 if (error == 0) { 3978 pq->scheduler = altq->scheduler; 3979 pq->nbytes = nbytes; 3980 } 3981 break; 3982 } 3983 #endif /* ALTQ */ 3984 3985 case DIOCBEGINADDRS: { 3986 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3987 3988 PF_RULES_WLOCK(); 3989 pf_empty_kpool(&V_pf_pabuf); 3990 pp->ticket = ++V_ticket_pabuf; 3991 PF_RULES_WUNLOCK(); 3992 break; 3993 } 3994 3995 case DIOCADDADDR: { 3996 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3997 struct pf_kpooladdr *pa; 3998 struct pfi_kkif *kif = NULL; 3999 4000 #ifndef INET 4001 if (pp->af == AF_INET) { 4002 error = EAFNOSUPPORT; 4003 break; 4004 } 4005 #endif /* INET */ 4006 #ifndef INET6 4007 if (pp->af == AF_INET6) { 4008 error = EAFNOSUPPORT; 4009 break; 4010 } 4011 #endif /* INET6 */ 4012 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4013 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4014 pp->addr.addr.type != PF_ADDR_TABLE) { 4015 error = EINVAL; 4016 break; 4017 } 4018 if (pp->addr.addr.p.dyn != NULL) { 4019 error = EINVAL; 4020 break; 4021 } 4022 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4023 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4024 if (error != 0) 4025 break; 4026 if (pa->ifname[0]) 4027 kif = pf_kkif_create(M_WAITOK); 4028 PF_RULES_WLOCK(); 4029 if (pp->ticket != V_ticket_pabuf) { 4030 PF_RULES_WUNLOCK(); 4031 if (pa->ifname[0]) 4032 pf_kkif_free(kif); 4033 free(pa, M_PFRULE); 4034 error = EBUSY; 4035 break; 4036 } 4037 if (pa->ifname[0]) { 4038 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4039 kif = NULL; 4040 pfi_kkif_ref(pa->kif); 4041 } else 4042 pa->kif = NULL; 4043 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4044 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4045 if (pa->ifname[0]) 4046 pfi_kkif_unref(pa->kif); 4047 PF_RULES_WUNLOCK(); 4048 free(pa, M_PFRULE); 4049 break; 4050 } 4051 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4052 PF_RULES_WUNLOCK(); 4053 break; 4054 } 4055 4056 case DIOCGETADDRS: { 4057 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4058 struct pf_kpool *pool; 4059 struct pf_kpooladdr *pa; 4060 4061 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4062 pp->nr = 0; 4063 4064 PF_RULES_RLOCK(); 4065 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4066 pp->r_num, 0, 1, 0); 4067 if (pool == NULL) { 4068 PF_RULES_RUNLOCK(); 4069 error = EBUSY; 4070 break; 4071 } 4072 TAILQ_FOREACH(pa, &pool->list, entries) 4073 pp->nr++; 4074 PF_RULES_RUNLOCK(); 4075 break; 4076 } 4077 4078 case DIOCGETADDR: { 4079 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4080 struct pf_kpool *pool; 4081 struct pf_kpooladdr *pa; 4082 u_int32_t nr = 0; 4083 4084 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4085 4086 PF_RULES_RLOCK(); 4087 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4088 pp->r_num, 0, 1, 1); 4089 if (pool == NULL) { 4090 PF_RULES_RUNLOCK(); 4091 error = EBUSY; 4092 break; 4093 } 4094 pa = TAILQ_FIRST(&pool->list); 4095 while ((pa != NULL) && (nr < pp->nr)) { 4096 pa = TAILQ_NEXT(pa, entries); 4097 nr++; 4098 } 4099 if (pa == NULL) { 4100 PF_RULES_RUNLOCK(); 4101 error = EBUSY; 4102 break; 4103 } 4104 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4105 pf_addr_copyout(&pp->addr.addr); 4106 PF_RULES_RUNLOCK(); 4107 break; 4108 } 4109 4110 case DIOCCHANGEADDR: { 4111 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4112 struct pf_kpool *pool; 4113 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4114 struct pf_kruleset *ruleset; 4115 struct pfi_kkif *kif = NULL; 4116 4117 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4118 4119 if (pca->action < PF_CHANGE_ADD_HEAD || 4120 pca->action > PF_CHANGE_REMOVE) { 4121 error = EINVAL; 4122 break; 4123 } 4124 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4125 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4126 pca->addr.addr.type != PF_ADDR_TABLE) { 4127 error = EINVAL; 4128 break; 4129 } 4130 if (pca->addr.addr.p.dyn != NULL) { 4131 error = EINVAL; 4132 break; 4133 } 4134 4135 if (pca->action != PF_CHANGE_REMOVE) { 4136 #ifndef INET 4137 if (pca->af == AF_INET) { 4138 error = EAFNOSUPPORT; 4139 break; 4140 } 4141 #endif /* INET */ 4142 #ifndef INET6 4143 if (pca->af == AF_INET6) { 4144 error = EAFNOSUPPORT; 4145 break; 4146 } 4147 #endif /* INET6 */ 4148 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4149 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4150 if (newpa->ifname[0]) 4151 kif = pf_kkif_create(M_WAITOK); 4152 newpa->kif = NULL; 4153 } 4154 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4155 PF_RULES_WLOCK(); 4156 ruleset = pf_find_kruleset(pca->anchor); 4157 if (ruleset == NULL) 4158 ERROUT(EBUSY); 4159 4160 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4161 pca->r_num, pca->r_last, 1, 1); 4162 if (pool == NULL) 4163 ERROUT(EBUSY); 4164 4165 if (pca->action != PF_CHANGE_REMOVE) { 4166 if (newpa->ifname[0]) { 4167 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4168 pfi_kkif_ref(newpa->kif); 4169 kif = NULL; 4170 } 4171 4172 switch (newpa->addr.type) { 4173 case PF_ADDR_DYNIFTL: 4174 error = pfi_dynaddr_setup(&newpa->addr, 4175 pca->af); 4176 break; 4177 case PF_ADDR_TABLE: 4178 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4179 newpa->addr.v.tblname); 4180 if (newpa->addr.p.tbl == NULL) 4181 error = ENOMEM; 4182 break; 4183 } 4184 if (error) 4185 goto DIOCCHANGEADDR_error; 4186 } 4187 4188 switch (pca->action) { 4189 case PF_CHANGE_ADD_HEAD: 4190 oldpa = TAILQ_FIRST(&pool->list); 4191 break; 4192 case PF_CHANGE_ADD_TAIL: 4193 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4194 break; 4195 default: 4196 oldpa = TAILQ_FIRST(&pool->list); 4197 for (int i = 0; oldpa && i < pca->nr; i++) 4198 oldpa = TAILQ_NEXT(oldpa, entries); 4199 4200 if (oldpa == NULL) 4201 ERROUT(EINVAL); 4202 } 4203 4204 if (pca->action == PF_CHANGE_REMOVE) { 4205 TAILQ_REMOVE(&pool->list, oldpa, entries); 4206 switch (oldpa->addr.type) { 4207 case PF_ADDR_DYNIFTL: 4208 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4209 break; 4210 case PF_ADDR_TABLE: 4211 pfr_detach_table(oldpa->addr.p.tbl); 4212 break; 4213 } 4214 if (oldpa->kif) 4215 pfi_kkif_unref(oldpa->kif); 4216 free(oldpa, M_PFRULE); 4217 } else { 4218 if (oldpa == NULL) 4219 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4220 else if (pca->action == PF_CHANGE_ADD_HEAD || 4221 pca->action == PF_CHANGE_ADD_BEFORE) 4222 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4223 else 4224 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4225 newpa, entries); 4226 } 4227 4228 pool->cur = TAILQ_FIRST(&pool->list); 4229 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4230 PF_RULES_WUNLOCK(); 4231 break; 4232 4233 #undef ERROUT 4234 DIOCCHANGEADDR_error: 4235 if (newpa != NULL) { 4236 if (newpa->kif) 4237 pfi_kkif_unref(newpa->kif); 4238 free(newpa, M_PFRULE); 4239 } 4240 PF_RULES_WUNLOCK(); 4241 pf_kkif_free(kif); 4242 break; 4243 } 4244 4245 case DIOCGETRULESETS: { 4246 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4247 struct pf_kruleset *ruleset; 4248 struct pf_kanchor *anchor; 4249 4250 pr->path[sizeof(pr->path) - 1] = 0; 4251 4252 PF_RULES_RLOCK(); 4253 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4254 PF_RULES_RUNLOCK(); 4255 error = ENOENT; 4256 break; 4257 } 4258 pr->nr = 0; 4259 if (ruleset->anchor == NULL) { 4260 /* XXX kludge for pf_main_ruleset */ 4261 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4262 if (anchor->parent == NULL) 4263 pr->nr++; 4264 } else { 4265 RB_FOREACH(anchor, pf_kanchor_node, 4266 &ruleset->anchor->children) 4267 pr->nr++; 4268 } 4269 PF_RULES_RUNLOCK(); 4270 break; 4271 } 4272 4273 case DIOCGETRULESET: { 4274 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4275 struct pf_kruleset *ruleset; 4276 struct pf_kanchor *anchor; 4277 u_int32_t nr = 0; 4278 4279 pr->path[sizeof(pr->path) - 1] = 0; 4280 4281 PF_RULES_RLOCK(); 4282 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4283 PF_RULES_RUNLOCK(); 4284 error = ENOENT; 4285 break; 4286 } 4287 pr->name[0] = 0; 4288 if (ruleset->anchor == NULL) { 4289 /* XXX kludge for pf_main_ruleset */ 4290 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4291 if (anchor->parent == NULL && nr++ == pr->nr) { 4292 strlcpy(pr->name, anchor->name, 4293 sizeof(pr->name)); 4294 break; 4295 } 4296 } else { 4297 RB_FOREACH(anchor, pf_kanchor_node, 4298 &ruleset->anchor->children) 4299 if (nr++ == pr->nr) { 4300 strlcpy(pr->name, anchor->name, 4301 sizeof(pr->name)); 4302 break; 4303 } 4304 } 4305 if (!pr->name[0]) 4306 error = EBUSY; 4307 PF_RULES_RUNLOCK(); 4308 break; 4309 } 4310 4311 case DIOCRCLRTABLES: { 4312 struct pfioc_table *io = (struct pfioc_table *)addr; 4313 4314 if (io->pfrio_esize != 0) { 4315 error = ENODEV; 4316 break; 4317 } 4318 PF_RULES_WLOCK(); 4319 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4320 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4321 PF_RULES_WUNLOCK(); 4322 break; 4323 } 4324 4325 case DIOCRADDTABLES: { 4326 struct pfioc_table *io = (struct pfioc_table *)addr; 4327 struct pfr_table *pfrts; 4328 size_t totlen; 4329 4330 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4331 error = ENODEV; 4332 break; 4333 } 4334 4335 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4336 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4337 error = ENOMEM; 4338 break; 4339 } 4340 4341 totlen = io->pfrio_size * sizeof(struct pfr_table); 4342 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4343 M_TEMP, M_WAITOK); 4344 error = copyin(io->pfrio_buffer, pfrts, totlen); 4345 if (error) { 4346 free(pfrts, M_TEMP); 4347 break; 4348 } 4349 PF_RULES_WLOCK(); 4350 error = pfr_add_tables(pfrts, io->pfrio_size, 4351 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4352 PF_RULES_WUNLOCK(); 4353 free(pfrts, M_TEMP); 4354 break; 4355 } 4356 4357 case DIOCRDELTABLES: { 4358 struct pfioc_table *io = (struct pfioc_table *)addr; 4359 struct pfr_table *pfrts; 4360 size_t totlen; 4361 4362 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4363 error = ENODEV; 4364 break; 4365 } 4366 4367 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4368 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4369 error = ENOMEM; 4370 break; 4371 } 4372 4373 totlen = io->pfrio_size * sizeof(struct pfr_table); 4374 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4375 M_TEMP, M_WAITOK); 4376 error = copyin(io->pfrio_buffer, pfrts, totlen); 4377 if (error) { 4378 free(pfrts, M_TEMP); 4379 break; 4380 } 4381 PF_RULES_WLOCK(); 4382 error = pfr_del_tables(pfrts, io->pfrio_size, 4383 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4384 PF_RULES_WUNLOCK(); 4385 free(pfrts, M_TEMP); 4386 break; 4387 } 4388 4389 case DIOCRGETTABLES: { 4390 struct pfioc_table *io = (struct pfioc_table *)addr; 4391 struct pfr_table *pfrts; 4392 size_t totlen; 4393 int n; 4394 4395 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4396 error = ENODEV; 4397 break; 4398 } 4399 PF_RULES_RLOCK(); 4400 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4401 if (n < 0) { 4402 PF_RULES_RUNLOCK(); 4403 error = EINVAL; 4404 break; 4405 } 4406 io->pfrio_size = min(io->pfrio_size, n); 4407 4408 totlen = io->pfrio_size * sizeof(struct pfr_table); 4409 4410 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4411 M_TEMP, M_NOWAIT | M_ZERO); 4412 if (pfrts == NULL) { 4413 error = ENOMEM; 4414 PF_RULES_RUNLOCK(); 4415 break; 4416 } 4417 error = pfr_get_tables(&io->pfrio_table, pfrts, 4418 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4419 PF_RULES_RUNLOCK(); 4420 if (error == 0) 4421 error = copyout(pfrts, io->pfrio_buffer, totlen); 4422 free(pfrts, M_TEMP); 4423 break; 4424 } 4425 4426 case DIOCRGETTSTATS: { 4427 struct pfioc_table *io = (struct pfioc_table *)addr; 4428 struct pfr_tstats *pfrtstats; 4429 size_t totlen; 4430 int n; 4431 4432 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4433 error = ENODEV; 4434 break; 4435 } 4436 PF_TABLE_STATS_LOCK(); 4437 PF_RULES_RLOCK(); 4438 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4439 if (n < 0) { 4440 PF_RULES_RUNLOCK(); 4441 PF_TABLE_STATS_UNLOCK(); 4442 error = EINVAL; 4443 break; 4444 } 4445 io->pfrio_size = min(io->pfrio_size, n); 4446 4447 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4448 pfrtstats = mallocarray(io->pfrio_size, 4449 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4450 if (pfrtstats == NULL) { 4451 error = ENOMEM; 4452 PF_RULES_RUNLOCK(); 4453 PF_TABLE_STATS_UNLOCK(); 4454 break; 4455 } 4456 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4457 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4458 PF_RULES_RUNLOCK(); 4459 PF_TABLE_STATS_UNLOCK(); 4460 if (error == 0) 4461 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4462 free(pfrtstats, M_TEMP); 4463 break; 4464 } 4465 4466 case DIOCRCLRTSTATS: { 4467 struct pfioc_table *io = (struct pfioc_table *)addr; 4468 struct pfr_table *pfrts; 4469 size_t totlen; 4470 4471 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4472 error = ENODEV; 4473 break; 4474 } 4475 4476 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4477 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4478 /* We used to count tables and use the minimum required 4479 * size, so we didn't fail on overly large requests. 4480 * Keep doing so. */ 4481 io->pfrio_size = pf_ioctl_maxcount; 4482 break; 4483 } 4484 4485 totlen = io->pfrio_size * sizeof(struct pfr_table); 4486 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4487 M_TEMP, M_WAITOK); 4488 error = copyin(io->pfrio_buffer, pfrts, totlen); 4489 if (error) { 4490 free(pfrts, M_TEMP); 4491 break; 4492 } 4493 4494 PF_TABLE_STATS_LOCK(); 4495 PF_RULES_RLOCK(); 4496 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4497 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4498 PF_RULES_RUNLOCK(); 4499 PF_TABLE_STATS_UNLOCK(); 4500 free(pfrts, M_TEMP); 4501 break; 4502 } 4503 4504 case DIOCRSETTFLAGS: { 4505 struct pfioc_table *io = (struct pfioc_table *)addr; 4506 struct pfr_table *pfrts; 4507 size_t totlen; 4508 int n; 4509 4510 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4511 error = ENODEV; 4512 break; 4513 } 4514 4515 PF_RULES_RLOCK(); 4516 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4517 if (n < 0) { 4518 PF_RULES_RUNLOCK(); 4519 error = EINVAL; 4520 break; 4521 } 4522 4523 io->pfrio_size = min(io->pfrio_size, n); 4524 PF_RULES_RUNLOCK(); 4525 4526 totlen = io->pfrio_size * sizeof(struct pfr_table); 4527 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4528 M_TEMP, M_WAITOK); 4529 error = copyin(io->pfrio_buffer, pfrts, totlen); 4530 if (error) { 4531 free(pfrts, M_TEMP); 4532 break; 4533 } 4534 PF_RULES_WLOCK(); 4535 error = pfr_set_tflags(pfrts, io->pfrio_size, 4536 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4537 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4538 PF_RULES_WUNLOCK(); 4539 free(pfrts, M_TEMP); 4540 break; 4541 } 4542 4543 case DIOCRCLRADDRS: { 4544 struct pfioc_table *io = (struct pfioc_table *)addr; 4545 4546 if (io->pfrio_esize != 0) { 4547 error = ENODEV; 4548 break; 4549 } 4550 PF_RULES_WLOCK(); 4551 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4552 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4553 PF_RULES_WUNLOCK(); 4554 break; 4555 } 4556 4557 case DIOCRADDADDRS: { 4558 struct pfioc_table *io = (struct pfioc_table *)addr; 4559 struct pfr_addr *pfras; 4560 size_t totlen; 4561 4562 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4563 error = ENODEV; 4564 break; 4565 } 4566 if (io->pfrio_size < 0 || 4567 io->pfrio_size > pf_ioctl_maxcount || 4568 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4569 error = EINVAL; 4570 break; 4571 } 4572 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4573 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4574 M_TEMP, M_WAITOK); 4575 error = copyin(io->pfrio_buffer, pfras, totlen); 4576 if (error) { 4577 free(pfras, M_TEMP); 4578 break; 4579 } 4580 PF_RULES_WLOCK(); 4581 error = pfr_add_addrs(&io->pfrio_table, pfras, 4582 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4583 PFR_FLAG_USERIOCTL); 4584 PF_RULES_WUNLOCK(); 4585 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4586 error = copyout(pfras, io->pfrio_buffer, totlen); 4587 free(pfras, M_TEMP); 4588 break; 4589 } 4590 4591 case DIOCRDELADDRS: { 4592 struct pfioc_table *io = (struct pfioc_table *)addr; 4593 struct pfr_addr *pfras; 4594 size_t totlen; 4595 4596 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4597 error = ENODEV; 4598 break; 4599 } 4600 if (io->pfrio_size < 0 || 4601 io->pfrio_size > pf_ioctl_maxcount || 4602 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4603 error = EINVAL; 4604 break; 4605 } 4606 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4607 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4608 M_TEMP, M_WAITOK); 4609 error = copyin(io->pfrio_buffer, pfras, totlen); 4610 if (error) { 4611 free(pfras, M_TEMP); 4612 break; 4613 } 4614 PF_RULES_WLOCK(); 4615 error = pfr_del_addrs(&io->pfrio_table, pfras, 4616 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4617 PFR_FLAG_USERIOCTL); 4618 PF_RULES_WUNLOCK(); 4619 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4620 error = copyout(pfras, io->pfrio_buffer, totlen); 4621 free(pfras, M_TEMP); 4622 break; 4623 } 4624 4625 case DIOCRSETADDRS: { 4626 struct pfioc_table *io = (struct pfioc_table *)addr; 4627 struct pfr_addr *pfras; 4628 size_t totlen, count; 4629 4630 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4631 error = ENODEV; 4632 break; 4633 } 4634 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4635 error = EINVAL; 4636 break; 4637 } 4638 count = max(io->pfrio_size, io->pfrio_size2); 4639 if (count > pf_ioctl_maxcount || 4640 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4641 error = EINVAL; 4642 break; 4643 } 4644 totlen = count * sizeof(struct pfr_addr); 4645 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4646 M_WAITOK); 4647 error = copyin(io->pfrio_buffer, pfras, totlen); 4648 if (error) { 4649 free(pfras, M_TEMP); 4650 break; 4651 } 4652 PF_RULES_WLOCK(); 4653 error = pfr_set_addrs(&io->pfrio_table, pfras, 4654 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4655 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4656 PFR_FLAG_USERIOCTL, 0); 4657 PF_RULES_WUNLOCK(); 4658 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4659 error = copyout(pfras, io->pfrio_buffer, totlen); 4660 free(pfras, M_TEMP); 4661 break; 4662 } 4663 4664 case DIOCRGETADDRS: { 4665 struct pfioc_table *io = (struct pfioc_table *)addr; 4666 struct pfr_addr *pfras; 4667 size_t totlen; 4668 4669 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4670 error = ENODEV; 4671 break; 4672 } 4673 if (io->pfrio_size < 0 || 4674 io->pfrio_size > pf_ioctl_maxcount || 4675 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4676 error = EINVAL; 4677 break; 4678 } 4679 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4680 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4681 M_TEMP, M_WAITOK | M_ZERO); 4682 PF_RULES_RLOCK(); 4683 error = pfr_get_addrs(&io->pfrio_table, pfras, 4684 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4685 PF_RULES_RUNLOCK(); 4686 if (error == 0) 4687 error = copyout(pfras, io->pfrio_buffer, totlen); 4688 free(pfras, M_TEMP); 4689 break; 4690 } 4691 4692 case DIOCRGETASTATS: { 4693 struct pfioc_table *io = (struct pfioc_table *)addr; 4694 struct pfr_astats *pfrastats; 4695 size_t totlen; 4696 4697 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4698 error = ENODEV; 4699 break; 4700 } 4701 if (io->pfrio_size < 0 || 4702 io->pfrio_size > pf_ioctl_maxcount || 4703 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4704 error = EINVAL; 4705 break; 4706 } 4707 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4708 pfrastats = mallocarray(io->pfrio_size, 4709 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4710 PF_RULES_RLOCK(); 4711 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4712 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4713 PF_RULES_RUNLOCK(); 4714 if (error == 0) 4715 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4716 free(pfrastats, M_TEMP); 4717 break; 4718 } 4719 4720 case DIOCRCLRASTATS: { 4721 struct pfioc_table *io = (struct pfioc_table *)addr; 4722 struct pfr_addr *pfras; 4723 size_t totlen; 4724 4725 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4726 error = ENODEV; 4727 break; 4728 } 4729 if (io->pfrio_size < 0 || 4730 io->pfrio_size > pf_ioctl_maxcount || 4731 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4732 error = EINVAL; 4733 break; 4734 } 4735 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4736 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4737 M_TEMP, M_WAITOK); 4738 error = copyin(io->pfrio_buffer, pfras, totlen); 4739 if (error) { 4740 free(pfras, M_TEMP); 4741 break; 4742 } 4743 PF_RULES_WLOCK(); 4744 error = pfr_clr_astats(&io->pfrio_table, pfras, 4745 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4746 PFR_FLAG_USERIOCTL); 4747 PF_RULES_WUNLOCK(); 4748 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4749 error = copyout(pfras, io->pfrio_buffer, totlen); 4750 free(pfras, M_TEMP); 4751 break; 4752 } 4753 4754 case DIOCRTSTADDRS: { 4755 struct pfioc_table *io = (struct pfioc_table *)addr; 4756 struct pfr_addr *pfras; 4757 size_t totlen; 4758 4759 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4760 error = ENODEV; 4761 break; 4762 } 4763 if (io->pfrio_size < 0 || 4764 io->pfrio_size > pf_ioctl_maxcount || 4765 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4766 error = EINVAL; 4767 break; 4768 } 4769 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4770 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4771 M_TEMP, M_WAITOK); 4772 error = copyin(io->pfrio_buffer, pfras, totlen); 4773 if (error) { 4774 free(pfras, M_TEMP); 4775 break; 4776 } 4777 PF_RULES_RLOCK(); 4778 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4779 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4780 PFR_FLAG_USERIOCTL); 4781 PF_RULES_RUNLOCK(); 4782 if (error == 0) 4783 error = copyout(pfras, io->pfrio_buffer, totlen); 4784 free(pfras, M_TEMP); 4785 break; 4786 } 4787 4788 case DIOCRINADEFINE: { 4789 struct pfioc_table *io = (struct pfioc_table *)addr; 4790 struct pfr_addr *pfras; 4791 size_t totlen; 4792 4793 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4794 error = ENODEV; 4795 break; 4796 } 4797 if (io->pfrio_size < 0 || 4798 io->pfrio_size > pf_ioctl_maxcount || 4799 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4800 error = EINVAL; 4801 break; 4802 } 4803 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4804 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4805 M_TEMP, M_WAITOK); 4806 error = copyin(io->pfrio_buffer, pfras, totlen); 4807 if (error) { 4808 free(pfras, M_TEMP); 4809 break; 4810 } 4811 PF_RULES_WLOCK(); 4812 error = pfr_ina_define(&io->pfrio_table, pfras, 4813 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 4814 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4815 PF_RULES_WUNLOCK(); 4816 free(pfras, M_TEMP); 4817 break; 4818 } 4819 4820 case DIOCOSFPADD: { 4821 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4822 PF_RULES_WLOCK(); 4823 error = pf_osfp_add(io); 4824 PF_RULES_WUNLOCK(); 4825 break; 4826 } 4827 4828 case DIOCOSFPGET: { 4829 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4830 PF_RULES_RLOCK(); 4831 error = pf_osfp_get(io); 4832 PF_RULES_RUNLOCK(); 4833 break; 4834 } 4835 4836 case DIOCXBEGIN: { 4837 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4838 struct pfioc_trans_e *ioes, *ioe; 4839 size_t totlen; 4840 int i; 4841 4842 if (io->esize != sizeof(*ioe)) { 4843 error = ENODEV; 4844 break; 4845 } 4846 if (io->size < 0 || 4847 io->size > pf_ioctl_maxcount || 4848 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4849 error = EINVAL; 4850 break; 4851 } 4852 totlen = sizeof(struct pfioc_trans_e) * io->size; 4853 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4854 M_TEMP, M_WAITOK); 4855 error = copyin(io->array, ioes, totlen); 4856 if (error) { 4857 free(ioes, M_TEMP); 4858 break; 4859 } 4860 PF_RULES_WLOCK(); 4861 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4862 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 4863 switch (ioe->rs_num) { 4864 case PF_RULESET_ETH: 4865 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 4866 PF_RULES_WUNLOCK(); 4867 free(ioes, M_TEMP); 4868 goto fail; 4869 } 4870 break; 4871 #ifdef ALTQ 4872 case PF_RULESET_ALTQ: 4873 if (ioe->anchor[0]) { 4874 PF_RULES_WUNLOCK(); 4875 free(ioes, M_TEMP); 4876 error = EINVAL; 4877 goto fail; 4878 } 4879 if ((error = pf_begin_altq(&ioe->ticket))) { 4880 PF_RULES_WUNLOCK(); 4881 free(ioes, M_TEMP); 4882 goto fail; 4883 } 4884 break; 4885 #endif /* ALTQ */ 4886 case PF_RULESET_TABLE: 4887 { 4888 struct pfr_table table; 4889 4890 bzero(&table, sizeof(table)); 4891 strlcpy(table.pfrt_anchor, ioe->anchor, 4892 sizeof(table.pfrt_anchor)); 4893 if ((error = pfr_ina_begin(&table, 4894 &ioe->ticket, NULL, 0))) { 4895 PF_RULES_WUNLOCK(); 4896 free(ioes, M_TEMP); 4897 goto fail; 4898 } 4899 break; 4900 } 4901 default: 4902 if ((error = pf_begin_rules(&ioe->ticket, 4903 ioe->rs_num, ioe->anchor))) { 4904 PF_RULES_WUNLOCK(); 4905 free(ioes, M_TEMP); 4906 goto fail; 4907 } 4908 break; 4909 } 4910 } 4911 PF_RULES_WUNLOCK(); 4912 error = copyout(ioes, io->array, totlen); 4913 free(ioes, M_TEMP); 4914 break; 4915 } 4916 4917 case DIOCXROLLBACK: { 4918 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4919 struct pfioc_trans_e *ioe, *ioes; 4920 size_t totlen; 4921 int i; 4922 4923 if (io->esize != sizeof(*ioe)) { 4924 error = ENODEV; 4925 break; 4926 } 4927 if (io->size < 0 || 4928 io->size > pf_ioctl_maxcount || 4929 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4930 error = EINVAL; 4931 break; 4932 } 4933 totlen = sizeof(struct pfioc_trans_e) * io->size; 4934 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4935 M_TEMP, M_WAITOK); 4936 error = copyin(io->array, ioes, totlen); 4937 if (error) { 4938 free(ioes, M_TEMP); 4939 break; 4940 } 4941 PF_RULES_WLOCK(); 4942 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4943 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 4944 switch (ioe->rs_num) { 4945 case PF_RULESET_ETH: 4946 if ((error = pf_rollback_eth(ioe->ticket, 4947 ioe->anchor))) { 4948 PF_RULES_WUNLOCK(); 4949 free(ioes, M_TEMP); 4950 goto fail; /* really bad */ 4951 } 4952 break; 4953 #ifdef ALTQ 4954 case PF_RULESET_ALTQ: 4955 if (ioe->anchor[0]) { 4956 PF_RULES_WUNLOCK(); 4957 free(ioes, M_TEMP); 4958 error = EINVAL; 4959 goto fail; 4960 } 4961 if ((error = pf_rollback_altq(ioe->ticket))) { 4962 PF_RULES_WUNLOCK(); 4963 free(ioes, M_TEMP); 4964 goto fail; /* really bad */ 4965 } 4966 break; 4967 #endif /* ALTQ */ 4968 case PF_RULESET_TABLE: 4969 { 4970 struct pfr_table table; 4971 4972 bzero(&table, sizeof(table)); 4973 strlcpy(table.pfrt_anchor, ioe->anchor, 4974 sizeof(table.pfrt_anchor)); 4975 if ((error = pfr_ina_rollback(&table, 4976 ioe->ticket, NULL, 0))) { 4977 PF_RULES_WUNLOCK(); 4978 free(ioes, M_TEMP); 4979 goto fail; /* really bad */ 4980 } 4981 break; 4982 } 4983 default: 4984 if ((error = pf_rollback_rules(ioe->ticket, 4985 ioe->rs_num, ioe->anchor))) { 4986 PF_RULES_WUNLOCK(); 4987 free(ioes, M_TEMP); 4988 goto fail; /* really bad */ 4989 } 4990 break; 4991 } 4992 } 4993 PF_RULES_WUNLOCK(); 4994 free(ioes, M_TEMP); 4995 break; 4996 } 4997 4998 case DIOCXCOMMIT: { 4999 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5000 struct pfioc_trans_e *ioe, *ioes; 5001 struct pf_kruleset *rs; 5002 struct pf_keth_ruleset *ers; 5003 size_t totlen; 5004 int i; 5005 5006 if (io->esize != sizeof(*ioe)) { 5007 error = ENODEV; 5008 break; 5009 } 5010 5011 if (io->size < 0 || 5012 io->size > pf_ioctl_maxcount || 5013 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5014 error = EINVAL; 5015 break; 5016 } 5017 5018 totlen = sizeof(struct pfioc_trans_e) * io->size; 5019 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5020 M_TEMP, M_WAITOK); 5021 error = copyin(io->array, ioes, totlen); 5022 if (error) { 5023 free(ioes, M_TEMP); 5024 break; 5025 } 5026 PF_RULES_WLOCK(); 5027 /* First makes sure everything will succeed. */ 5028 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5029 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5030 switch (ioe->rs_num) { 5031 case PF_RULESET_ETH: 5032 ers = pf_find_keth_ruleset(ioe->anchor); 5033 if (ers == NULL || ioe->ticket == 0 || 5034 ioe->ticket != ers->inactive.ticket) { 5035 PF_RULES_WUNLOCK(); 5036 free(ioes, M_TEMP); 5037 error = EINVAL; 5038 goto fail; 5039 } 5040 break; 5041 #ifdef ALTQ 5042 case PF_RULESET_ALTQ: 5043 if (ioe->anchor[0]) { 5044 PF_RULES_WUNLOCK(); 5045 free(ioes, M_TEMP); 5046 error = EINVAL; 5047 goto fail; 5048 } 5049 if (!V_altqs_inactive_open || ioe->ticket != 5050 V_ticket_altqs_inactive) { 5051 PF_RULES_WUNLOCK(); 5052 free(ioes, M_TEMP); 5053 error = EBUSY; 5054 goto fail; 5055 } 5056 break; 5057 #endif /* ALTQ */ 5058 case PF_RULESET_TABLE: 5059 rs = pf_find_kruleset(ioe->anchor); 5060 if (rs == NULL || !rs->topen || ioe->ticket != 5061 rs->tticket) { 5062 PF_RULES_WUNLOCK(); 5063 free(ioes, M_TEMP); 5064 error = EBUSY; 5065 goto fail; 5066 } 5067 break; 5068 default: 5069 if (ioe->rs_num < 0 || ioe->rs_num >= 5070 PF_RULESET_MAX) { 5071 PF_RULES_WUNLOCK(); 5072 free(ioes, M_TEMP); 5073 error = EINVAL; 5074 goto fail; 5075 } 5076 rs = pf_find_kruleset(ioe->anchor); 5077 if (rs == NULL || 5078 !rs->rules[ioe->rs_num].inactive.open || 5079 rs->rules[ioe->rs_num].inactive.ticket != 5080 ioe->ticket) { 5081 PF_RULES_WUNLOCK(); 5082 free(ioes, M_TEMP); 5083 error = EBUSY; 5084 goto fail; 5085 } 5086 break; 5087 } 5088 } 5089 /* Now do the commit - no errors should happen here. */ 5090 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5091 switch (ioe->rs_num) { 5092 case PF_RULESET_ETH: 5093 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5094 PF_RULES_WUNLOCK(); 5095 free(ioes, M_TEMP); 5096 goto fail; /* really bad */ 5097 } 5098 break; 5099 #ifdef ALTQ 5100 case PF_RULESET_ALTQ: 5101 if ((error = pf_commit_altq(ioe->ticket))) { 5102 PF_RULES_WUNLOCK(); 5103 free(ioes, M_TEMP); 5104 goto fail; /* really bad */ 5105 } 5106 break; 5107 #endif /* ALTQ */ 5108 case PF_RULESET_TABLE: 5109 { 5110 struct pfr_table table; 5111 5112 bzero(&table, sizeof(table)); 5113 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5114 sizeof(table.pfrt_anchor)); 5115 if ((error = pfr_ina_commit(&table, 5116 ioe->ticket, NULL, NULL, 0))) { 5117 PF_RULES_WUNLOCK(); 5118 free(ioes, M_TEMP); 5119 goto fail; /* really bad */ 5120 } 5121 break; 5122 } 5123 default: 5124 if ((error = pf_commit_rules(ioe->ticket, 5125 ioe->rs_num, ioe->anchor))) { 5126 PF_RULES_WUNLOCK(); 5127 free(ioes, M_TEMP); 5128 goto fail; /* really bad */ 5129 } 5130 break; 5131 } 5132 } 5133 PF_RULES_WUNLOCK(); 5134 5135 /* Only hook into EtherNet taffic if we've got rules for it. */ 5136 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5137 hook_pf_eth(); 5138 else 5139 dehook_pf_eth(); 5140 5141 free(ioes, M_TEMP); 5142 break; 5143 } 5144 5145 case DIOCGETSRCNODES: { 5146 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5147 struct pf_srchash *sh; 5148 struct pf_ksrc_node *n; 5149 struct pf_src_node *p, *pstore; 5150 uint32_t i, nr = 0; 5151 5152 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5153 i++, sh++) { 5154 PF_HASHROW_LOCK(sh); 5155 LIST_FOREACH(n, &sh->nodes, entry) 5156 nr++; 5157 PF_HASHROW_UNLOCK(sh); 5158 } 5159 5160 psn->psn_len = min(psn->psn_len, 5161 sizeof(struct pf_src_node) * nr); 5162 5163 if (psn->psn_len == 0) { 5164 psn->psn_len = sizeof(struct pf_src_node) * nr; 5165 break; 5166 } 5167 5168 nr = 0; 5169 5170 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5171 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5172 i++, sh++) { 5173 PF_HASHROW_LOCK(sh); 5174 LIST_FOREACH(n, &sh->nodes, entry) { 5175 5176 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5177 break; 5178 5179 pf_src_node_copy(n, p); 5180 5181 p++; 5182 nr++; 5183 } 5184 PF_HASHROW_UNLOCK(sh); 5185 } 5186 error = copyout(pstore, psn->psn_src_nodes, 5187 sizeof(struct pf_src_node) * nr); 5188 if (error) { 5189 free(pstore, M_TEMP); 5190 break; 5191 } 5192 psn->psn_len = sizeof(struct pf_src_node) * nr; 5193 free(pstore, M_TEMP); 5194 break; 5195 } 5196 5197 case DIOCCLRSRCNODES: { 5198 pf_clear_srcnodes(NULL); 5199 pf_purge_expired_src_nodes(); 5200 break; 5201 } 5202 5203 case DIOCKILLSRCNODES: 5204 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5205 break; 5206 5207 #ifdef COMPAT_FREEBSD13 5208 case DIOCKEEPCOUNTERS_FREEBSD13: 5209 #endif 5210 case DIOCKEEPCOUNTERS: 5211 error = pf_keepcounters((struct pfioc_nv *)addr); 5212 break; 5213 5214 case DIOCGETSYNCOOKIES: 5215 error = pf_get_syncookies((struct pfioc_nv *)addr); 5216 break; 5217 5218 case DIOCSETSYNCOOKIES: 5219 error = pf_set_syncookies((struct pfioc_nv *)addr); 5220 break; 5221 5222 case DIOCSETHOSTID: { 5223 u_int32_t *hostid = (u_int32_t *)addr; 5224 5225 PF_RULES_WLOCK(); 5226 if (*hostid == 0) 5227 V_pf_status.hostid = arc4random(); 5228 else 5229 V_pf_status.hostid = *hostid; 5230 PF_RULES_WUNLOCK(); 5231 break; 5232 } 5233 5234 case DIOCOSFPFLUSH: 5235 PF_RULES_WLOCK(); 5236 pf_osfp_flush(); 5237 PF_RULES_WUNLOCK(); 5238 break; 5239 5240 case DIOCIGETIFACES: { 5241 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5242 struct pfi_kif *ifstore; 5243 size_t bufsiz; 5244 5245 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5246 error = ENODEV; 5247 break; 5248 } 5249 5250 if (io->pfiio_size < 0 || 5251 io->pfiio_size > pf_ioctl_maxcount || 5252 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5253 error = EINVAL; 5254 break; 5255 } 5256 5257 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5258 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5259 M_TEMP, M_WAITOK | M_ZERO); 5260 5261 PF_RULES_RLOCK(); 5262 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5263 PF_RULES_RUNLOCK(); 5264 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5265 free(ifstore, M_TEMP); 5266 break; 5267 } 5268 5269 case DIOCSETIFFLAG: { 5270 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5271 5272 PF_RULES_WLOCK(); 5273 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5274 PF_RULES_WUNLOCK(); 5275 break; 5276 } 5277 5278 case DIOCCLRIFFLAG: { 5279 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5280 5281 PF_RULES_WLOCK(); 5282 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5283 PF_RULES_WUNLOCK(); 5284 break; 5285 } 5286 5287 default: 5288 error = ENODEV; 5289 break; 5290 } 5291 fail: 5292 if (sx_xlocked(&pf_ioctl_lock)) 5293 sx_xunlock(&pf_ioctl_lock); 5294 CURVNET_RESTORE(); 5295 5296 #undef ERROUT_IOCTL 5297 5298 return (error); 5299 } 5300 5301 void 5302 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 5303 { 5304 bzero(sp, sizeof(struct pfsync_state)); 5305 5306 /* copy from state key */ 5307 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5308 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5309 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5310 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5311 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5312 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5313 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5314 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5315 sp->proto = st->key[PF_SK_WIRE]->proto; 5316 sp->af = st->key[PF_SK_WIRE]->af; 5317 5318 /* copy from state */ 5319 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5320 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5321 sp->creation = htonl(time_uptime - st->creation); 5322 sp->expire = pf_state_expires(st); 5323 if (sp->expire <= time_uptime) 5324 sp->expire = htonl(0); 5325 else 5326 sp->expire = htonl(sp->expire - time_uptime); 5327 5328 sp->direction = st->direction; 5329 sp->log = st->log; 5330 sp->timeout = st->timeout; 5331 sp->state_flags = st->state_flags; 5332 if (st->src_node) 5333 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5334 if (st->nat_src_node) 5335 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5336 5337 sp->id = st->id; 5338 sp->creatorid = st->creatorid; 5339 pf_state_peer_hton(&st->src, &sp->src); 5340 pf_state_peer_hton(&st->dst, &sp->dst); 5341 5342 if (st->rule.ptr == NULL) 5343 sp->rule = htonl(-1); 5344 else 5345 sp->rule = htonl(st->rule.ptr->nr); 5346 if (st->anchor.ptr == NULL) 5347 sp->anchor = htonl(-1); 5348 else 5349 sp->anchor = htonl(st->anchor.ptr->nr); 5350 if (st->nat_rule.ptr == NULL) 5351 sp->nat_rule = htonl(-1); 5352 else 5353 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5354 5355 pf_state_counter_hton(st->packets[0], sp->packets[0]); 5356 pf_state_counter_hton(st->packets[1], sp->packets[1]); 5357 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 5358 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 5359 } 5360 5361 void 5362 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5363 { 5364 bzero(sp, sizeof(*sp)); 5365 5366 sp->version = PF_STATE_VERSION; 5367 5368 /* copy from state key */ 5369 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5370 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5371 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5372 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5373 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5374 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5375 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5376 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5377 sp->proto = st->key[PF_SK_WIRE]->proto; 5378 sp->af = st->key[PF_SK_WIRE]->af; 5379 5380 /* copy from state */ 5381 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5382 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5383 sizeof(sp->orig_ifname)); 5384 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5385 sp->creation = htonl(time_uptime - st->creation); 5386 sp->expire = pf_state_expires(st); 5387 if (sp->expire <= time_uptime) 5388 sp->expire = htonl(0); 5389 else 5390 sp->expire = htonl(sp->expire - time_uptime); 5391 5392 sp->direction = st->direction; 5393 sp->log = st->log; 5394 sp->timeout = st->timeout; 5395 sp->state_flags = st->state_flags; 5396 if (st->src_node) 5397 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5398 if (st->nat_src_node) 5399 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5400 5401 sp->id = st->id; 5402 sp->creatorid = st->creatorid; 5403 pf_state_peer_hton(&st->src, &sp->src); 5404 pf_state_peer_hton(&st->dst, &sp->dst); 5405 5406 if (st->rule.ptr == NULL) 5407 sp->rule = htonl(-1); 5408 else 5409 sp->rule = htonl(st->rule.ptr->nr); 5410 if (st->anchor.ptr == NULL) 5411 sp->anchor = htonl(-1); 5412 else 5413 sp->anchor = htonl(st->anchor.ptr->nr); 5414 if (st->nat_rule.ptr == NULL) 5415 sp->nat_rule = htonl(-1); 5416 else 5417 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5418 5419 sp->packets[0] = st->packets[0]; 5420 sp->packets[1] = st->packets[1]; 5421 sp->bytes[0] = st->bytes[0]; 5422 sp->bytes[1] = st->bytes[1]; 5423 } 5424 5425 static void 5426 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5427 { 5428 struct pfr_ktable *kt; 5429 5430 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5431 5432 kt = aw->p.tbl; 5433 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5434 kt = kt->pfrkt_root; 5435 aw->p.tbl = NULL; 5436 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5437 kt->pfrkt_cnt : -1; 5438 } 5439 5440 static int 5441 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5442 size_t number, char **names) 5443 { 5444 nvlist_t *nvc; 5445 5446 nvc = nvlist_create(0); 5447 if (nvc == NULL) 5448 return (ENOMEM); 5449 5450 for (int i = 0; i < number; i++) { 5451 nvlist_append_number_array(nvc, "counters", 5452 counter_u64_fetch(counters[i])); 5453 nvlist_append_string_array(nvc, "names", 5454 names[i]); 5455 nvlist_append_number_array(nvc, "ids", 5456 i); 5457 } 5458 nvlist_add_nvlist(nvl, name, nvc); 5459 nvlist_destroy(nvc); 5460 5461 return (0); 5462 } 5463 5464 static int 5465 pf_getstatus(struct pfioc_nv *nv) 5466 { 5467 nvlist_t *nvl = NULL, *nvc = NULL; 5468 void *nvlpacked = NULL; 5469 int error; 5470 struct pf_status s; 5471 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5472 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5473 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5474 PF_RULES_RLOCK_TRACKER; 5475 5476 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5477 5478 PF_RULES_RLOCK(); 5479 5480 nvl = nvlist_create(0); 5481 if (nvl == NULL) 5482 ERROUT(ENOMEM); 5483 5484 nvlist_add_bool(nvl, "running", V_pf_status.running); 5485 nvlist_add_number(nvl, "since", V_pf_status.since); 5486 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5487 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5488 nvlist_add_number(nvl, "states", V_pf_status.states); 5489 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5490 5491 /* counters */ 5492 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5493 PFRES_MAX, pf_reasons); 5494 if (error != 0) 5495 ERROUT(error); 5496 5497 /* lcounters */ 5498 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5499 KLCNT_MAX, pf_lcounter); 5500 if (error != 0) 5501 ERROUT(error); 5502 5503 /* fcounters */ 5504 nvc = nvlist_create(0); 5505 if (nvc == NULL) 5506 ERROUT(ENOMEM); 5507 5508 for (int i = 0; i < FCNT_MAX; i++) { 5509 nvlist_append_number_array(nvc, "counters", 5510 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5511 nvlist_append_string_array(nvc, "names", 5512 pf_fcounter[i]); 5513 nvlist_append_number_array(nvc, "ids", 5514 i); 5515 } 5516 nvlist_add_nvlist(nvl, "fcounters", nvc); 5517 nvlist_destroy(nvc); 5518 nvc = NULL; 5519 5520 /* scounters */ 5521 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5522 SCNT_MAX, pf_fcounter); 5523 if (error != 0) 5524 ERROUT(error); 5525 5526 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5527 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5528 PF_MD5_DIGEST_LENGTH); 5529 5530 pfi_update_status(V_pf_status.ifname, &s); 5531 5532 /* pcounters / bcounters */ 5533 for (int i = 0; i < 2; i++) { 5534 for (int j = 0; j < 2; j++) { 5535 for (int k = 0; k < 2; k++) { 5536 nvlist_append_number_array(nvl, "pcounters", 5537 s.pcounters[i][j][k]); 5538 } 5539 nvlist_append_number_array(nvl, "bcounters", 5540 s.bcounters[i][j]); 5541 } 5542 } 5543 5544 nvlpacked = nvlist_pack(nvl, &nv->len); 5545 if (nvlpacked == NULL) 5546 ERROUT(ENOMEM); 5547 5548 if (nv->size == 0) 5549 ERROUT(0); 5550 else if (nv->size < nv->len) 5551 ERROUT(ENOSPC); 5552 5553 PF_RULES_RUNLOCK(); 5554 error = copyout(nvlpacked, nv->data, nv->len); 5555 goto done; 5556 5557 #undef ERROUT 5558 errout: 5559 PF_RULES_RUNLOCK(); 5560 done: 5561 free(nvlpacked, M_NVLIST); 5562 nvlist_destroy(nvc); 5563 nvlist_destroy(nvl); 5564 5565 return (error); 5566 } 5567 5568 /* 5569 * XXX - Check for version missmatch!!! 5570 */ 5571 static void 5572 pf_clear_all_states(void) 5573 { 5574 struct pf_kstate *s; 5575 u_int i; 5576 5577 for (i = 0; i <= pf_hashmask; i++) { 5578 struct pf_idhash *ih = &V_pf_idhash[i]; 5579 relock: 5580 PF_HASHROW_LOCK(ih); 5581 LIST_FOREACH(s, &ih->states, entry) { 5582 s->timeout = PFTM_PURGE; 5583 /* Don't send out individual delete messages. */ 5584 s->state_flags |= PFSTATE_NOSYNC; 5585 pf_unlink_state(s); 5586 goto relock; 5587 } 5588 PF_HASHROW_UNLOCK(ih); 5589 } 5590 } 5591 5592 static int 5593 pf_clear_tables(void) 5594 { 5595 struct pfioc_table io; 5596 int error; 5597 5598 bzero(&io, sizeof(io)); 5599 5600 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5601 io.pfrio_flags); 5602 5603 return (error); 5604 } 5605 5606 static void 5607 pf_clear_srcnodes(struct pf_ksrc_node *n) 5608 { 5609 struct pf_kstate *s; 5610 int i; 5611 5612 for (i = 0; i <= pf_hashmask; i++) { 5613 struct pf_idhash *ih = &V_pf_idhash[i]; 5614 5615 PF_HASHROW_LOCK(ih); 5616 LIST_FOREACH(s, &ih->states, entry) { 5617 if (n == NULL || n == s->src_node) 5618 s->src_node = NULL; 5619 if (n == NULL || n == s->nat_src_node) 5620 s->nat_src_node = NULL; 5621 } 5622 PF_HASHROW_UNLOCK(ih); 5623 } 5624 5625 if (n == NULL) { 5626 struct pf_srchash *sh; 5627 5628 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5629 i++, sh++) { 5630 PF_HASHROW_LOCK(sh); 5631 LIST_FOREACH(n, &sh->nodes, entry) { 5632 n->expire = 1; 5633 n->states = 0; 5634 } 5635 PF_HASHROW_UNLOCK(sh); 5636 } 5637 } else { 5638 /* XXX: hash slot should already be locked here. */ 5639 n->expire = 1; 5640 n->states = 0; 5641 } 5642 } 5643 5644 static void 5645 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5646 { 5647 struct pf_ksrc_node_list kill; 5648 5649 LIST_INIT(&kill); 5650 for (int i = 0; i <= pf_srchashmask; i++) { 5651 struct pf_srchash *sh = &V_pf_srchash[i]; 5652 struct pf_ksrc_node *sn, *tmp; 5653 5654 PF_HASHROW_LOCK(sh); 5655 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5656 if (PF_MATCHA(psnk->psnk_src.neg, 5657 &psnk->psnk_src.addr.v.a.addr, 5658 &psnk->psnk_src.addr.v.a.mask, 5659 &sn->addr, sn->af) && 5660 PF_MATCHA(psnk->psnk_dst.neg, 5661 &psnk->psnk_dst.addr.v.a.addr, 5662 &psnk->psnk_dst.addr.v.a.mask, 5663 &sn->raddr, sn->af)) { 5664 pf_unlink_src_node(sn); 5665 LIST_INSERT_HEAD(&kill, sn, entry); 5666 sn->expire = 1; 5667 } 5668 PF_HASHROW_UNLOCK(sh); 5669 } 5670 5671 for (int i = 0; i <= pf_hashmask; i++) { 5672 struct pf_idhash *ih = &V_pf_idhash[i]; 5673 struct pf_kstate *s; 5674 5675 PF_HASHROW_LOCK(ih); 5676 LIST_FOREACH(s, &ih->states, entry) { 5677 if (s->src_node && s->src_node->expire == 1) 5678 s->src_node = NULL; 5679 if (s->nat_src_node && s->nat_src_node->expire == 1) 5680 s->nat_src_node = NULL; 5681 } 5682 PF_HASHROW_UNLOCK(ih); 5683 } 5684 5685 psnk->psnk_killed = pf_free_src_nodes(&kill); 5686 } 5687 5688 static int 5689 pf_keepcounters(struct pfioc_nv *nv) 5690 { 5691 nvlist_t *nvl = NULL; 5692 void *nvlpacked = NULL; 5693 int error = 0; 5694 5695 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5696 5697 if (nv->len > pf_ioctl_maxcount) 5698 ERROUT(ENOMEM); 5699 5700 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 5701 if (nvlpacked == NULL) 5702 ERROUT(ENOMEM); 5703 5704 error = copyin(nv->data, nvlpacked, nv->len); 5705 if (error) 5706 ERROUT(error); 5707 5708 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5709 if (nvl == NULL) 5710 ERROUT(EBADMSG); 5711 5712 if (! nvlist_exists_bool(nvl, "keep_counters")) 5713 ERROUT(EBADMSG); 5714 5715 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5716 5717 on_error: 5718 nvlist_destroy(nvl); 5719 free(nvlpacked, M_TEMP); 5720 return (error); 5721 } 5722 5723 static unsigned int 5724 pf_clear_states(const struct pf_kstate_kill *kill) 5725 { 5726 struct pf_state_key_cmp match_key; 5727 struct pf_kstate *s; 5728 struct pfi_kkif *kif; 5729 int idx; 5730 unsigned int killed = 0, dir; 5731 5732 for (unsigned int i = 0; i <= pf_hashmask; i++) { 5733 struct pf_idhash *ih = &V_pf_idhash[i]; 5734 5735 relock_DIOCCLRSTATES: 5736 PF_HASHROW_LOCK(ih); 5737 LIST_FOREACH(s, &ih->states, entry) { 5738 /* For floating states look at the original kif. */ 5739 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 5740 5741 if (kill->psk_ifname[0] && 5742 strcmp(kill->psk_ifname, 5743 kif->pfik_name)) 5744 continue; 5745 5746 if (kill->psk_kill_match) { 5747 bzero(&match_key, sizeof(match_key)); 5748 5749 if (s->direction == PF_OUT) { 5750 dir = PF_IN; 5751 idx = PF_SK_STACK; 5752 } else { 5753 dir = PF_OUT; 5754 idx = PF_SK_WIRE; 5755 } 5756 5757 match_key.af = s->key[idx]->af; 5758 match_key.proto = s->key[idx]->proto; 5759 PF_ACPY(&match_key.addr[0], 5760 &s->key[idx]->addr[1], match_key.af); 5761 match_key.port[0] = s->key[idx]->port[1]; 5762 PF_ACPY(&match_key.addr[1], 5763 &s->key[idx]->addr[0], match_key.af); 5764 match_key.port[1] = s->key[idx]->port[0]; 5765 } 5766 5767 /* 5768 * Don't send out individual 5769 * delete messages. 5770 */ 5771 s->state_flags |= PFSTATE_NOSYNC; 5772 pf_unlink_state(s); 5773 killed++; 5774 5775 if (kill->psk_kill_match) 5776 killed += pf_kill_matching_state(&match_key, 5777 dir); 5778 5779 goto relock_DIOCCLRSTATES; 5780 } 5781 PF_HASHROW_UNLOCK(ih); 5782 } 5783 5784 if (V_pfsync_clear_states_ptr != NULL) 5785 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 5786 5787 return (killed); 5788 } 5789 5790 static void 5791 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 5792 { 5793 struct pf_kstate *s; 5794 5795 if (kill->psk_pfcmp.id) { 5796 if (kill->psk_pfcmp.creatorid == 0) 5797 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 5798 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 5799 kill->psk_pfcmp.creatorid))) { 5800 pf_unlink_state(s); 5801 *killed = 1; 5802 } 5803 return; 5804 } 5805 5806 for (unsigned int i = 0; i <= pf_hashmask; i++) 5807 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 5808 5809 return; 5810 } 5811 5812 static int 5813 pf_killstates_nv(struct pfioc_nv *nv) 5814 { 5815 struct pf_kstate_kill kill; 5816 nvlist_t *nvl = NULL; 5817 void *nvlpacked = NULL; 5818 int error = 0; 5819 unsigned int killed = 0; 5820 5821 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5822 5823 if (nv->len > pf_ioctl_maxcount) 5824 ERROUT(ENOMEM); 5825 5826 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5827 if (nvlpacked == NULL) 5828 ERROUT(ENOMEM); 5829 5830 error = copyin(nv->data, nvlpacked, nv->len); 5831 if (error) 5832 ERROUT(error); 5833 5834 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5835 if (nvl == NULL) 5836 ERROUT(EBADMSG); 5837 5838 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 5839 if (error) 5840 ERROUT(error); 5841 5842 pf_killstates(&kill, &killed); 5843 5844 free(nvlpacked, M_NVLIST); 5845 nvlpacked = NULL; 5846 nvlist_destroy(nvl); 5847 nvl = nvlist_create(0); 5848 if (nvl == NULL) 5849 ERROUT(ENOMEM); 5850 5851 nvlist_add_number(nvl, "killed", killed); 5852 5853 nvlpacked = nvlist_pack(nvl, &nv->len); 5854 if (nvlpacked == NULL) 5855 ERROUT(ENOMEM); 5856 5857 if (nv->size == 0) 5858 ERROUT(0); 5859 else if (nv->size < nv->len) 5860 ERROUT(ENOSPC); 5861 5862 error = copyout(nvlpacked, nv->data, nv->len); 5863 5864 on_error: 5865 nvlist_destroy(nvl); 5866 free(nvlpacked, M_NVLIST); 5867 return (error); 5868 } 5869 5870 static int 5871 pf_clearstates_nv(struct pfioc_nv *nv) 5872 { 5873 struct pf_kstate_kill kill; 5874 nvlist_t *nvl = NULL; 5875 void *nvlpacked = NULL; 5876 int error = 0; 5877 unsigned int killed; 5878 5879 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5880 5881 if (nv->len > pf_ioctl_maxcount) 5882 ERROUT(ENOMEM); 5883 5884 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5885 if (nvlpacked == NULL) 5886 ERROUT(ENOMEM); 5887 5888 error = copyin(nv->data, nvlpacked, nv->len); 5889 if (error) 5890 ERROUT(error); 5891 5892 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5893 if (nvl == NULL) 5894 ERROUT(EBADMSG); 5895 5896 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 5897 if (error) 5898 ERROUT(error); 5899 5900 killed = pf_clear_states(&kill); 5901 5902 free(nvlpacked, M_NVLIST); 5903 nvlpacked = NULL; 5904 nvlist_destroy(nvl); 5905 nvl = nvlist_create(0); 5906 if (nvl == NULL) 5907 ERROUT(ENOMEM); 5908 5909 nvlist_add_number(nvl, "killed", killed); 5910 5911 nvlpacked = nvlist_pack(nvl, &nv->len); 5912 if (nvlpacked == NULL) 5913 ERROUT(ENOMEM); 5914 5915 if (nv->size == 0) 5916 ERROUT(0); 5917 else if (nv->size < nv->len) 5918 ERROUT(ENOSPC); 5919 5920 error = copyout(nvlpacked, nv->data, nv->len); 5921 5922 #undef ERROUT 5923 on_error: 5924 nvlist_destroy(nvl); 5925 free(nvlpacked, M_NVLIST); 5926 return (error); 5927 } 5928 5929 static int 5930 pf_getstate(struct pfioc_nv *nv) 5931 { 5932 nvlist_t *nvl = NULL, *nvls; 5933 void *nvlpacked = NULL; 5934 struct pf_kstate *s = NULL; 5935 int error = 0; 5936 uint64_t id, creatorid; 5937 5938 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5939 5940 if (nv->len > pf_ioctl_maxcount) 5941 ERROUT(ENOMEM); 5942 5943 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5944 if (nvlpacked == NULL) 5945 ERROUT(ENOMEM); 5946 5947 error = copyin(nv->data, nvlpacked, nv->len); 5948 if (error) 5949 ERROUT(error); 5950 5951 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5952 if (nvl == NULL) 5953 ERROUT(EBADMSG); 5954 5955 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 5956 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 5957 5958 s = pf_find_state_byid(id, creatorid); 5959 if (s == NULL) 5960 ERROUT(ENOENT); 5961 5962 free(nvlpacked, M_NVLIST); 5963 nvlpacked = NULL; 5964 nvlist_destroy(nvl); 5965 nvl = nvlist_create(0); 5966 if (nvl == NULL) 5967 ERROUT(ENOMEM); 5968 5969 nvls = pf_state_to_nvstate(s); 5970 if (nvls == NULL) 5971 ERROUT(ENOMEM); 5972 5973 nvlist_add_nvlist(nvl, "state", nvls); 5974 nvlist_destroy(nvls); 5975 5976 nvlpacked = nvlist_pack(nvl, &nv->len); 5977 if (nvlpacked == NULL) 5978 ERROUT(ENOMEM); 5979 5980 if (nv->size == 0) 5981 ERROUT(0); 5982 else if (nv->size < nv->len) 5983 ERROUT(ENOSPC); 5984 5985 error = copyout(nvlpacked, nv->data, nv->len); 5986 5987 #undef ERROUT 5988 errout: 5989 if (s != NULL) 5990 PF_STATE_UNLOCK(s); 5991 free(nvlpacked, M_NVLIST); 5992 nvlist_destroy(nvl); 5993 return (error); 5994 } 5995 5996 /* 5997 * XXX - Check for version missmatch!!! 5998 */ 5999 6000 /* 6001 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6002 */ 6003 static int 6004 shutdown_pf(void) 6005 { 6006 int error = 0; 6007 u_int32_t t[5]; 6008 char nn = '\0'; 6009 6010 do { 6011 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6012 != 0) { 6013 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6014 break; 6015 } 6016 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6017 != 0) { 6018 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6019 break; /* XXX: rollback? */ 6020 } 6021 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6022 != 0) { 6023 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6024 break; /* XXX: rollback? */ 6025 } 6026 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6027 != 0) { 6028 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6029 break; /* XXX: rollback? */ 6030 } 6031 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6032 != 0) { 6033 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6034 break; /* XXX: rollback? */ 6035 } 6036 6037 /* XXX: these should always succeed here */ 6038 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6039 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6040 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6041 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6042 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6043 6044 if ((error = pf_clear_tables()) != 0) 6045 break; 6046 6047 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6048 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6049 break; 6050 } 6051 pf_commit_eth(t[0], &nn); 6052 6053 #ifdef ALTQ 6054 if ((error = pf_begin_altq(&t[0])) != 0) { 6055 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6056 break; 6057 } 6058 pf_commit_altq(t[0]); 6059 #endif 6060 6061 pf_clear_all_states(); 6062 6063 pf_clear_srcnodes(NULL); 6064 6065 /* status does not use malloced mem so no need to cleanup */ 6066 /* fingerprints and interfaces have their own cleanup code */ 6067 } while(0); 6068 6069 return (error); 6070 } 6071 6072 static pfil_return_t 6073 pf_check_return(int chk, struct mbuf **m) 6074 { 6075 6076 switch (chk) { 6077 case PF_PASS: 6078 if (*m == NULL) 6079 return (PFIL_CONSUMED); 6080 else 6081 return (PFIL_PASS); 6082 break; 6083 default: 6084 if (*m != NULL) { 6085 m_freem(*m); 6086 *m = NULL; 6087 } 6088 return (PFIL_DROPPED); 6089 } 6090 } 6091 6092 static pfil_return_t 6093 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6094 void *ruleset __unused, struct inpcb *inp) 6095 { 6096 int chk; 6097 6098 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6099 6100 return (pf_check_return(chk, m)); 6101 } 6102 6103 static pfil_return_t 6104 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6105 void *ruleset __unused, struct inpcb *inp) 6106 { 6107 int chk; 6108 6109 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6110 6111 return (pf_check_return(chk, m)); 6112 } 6113 6114 #ifdef INET 6115 static pfil_return_t 6116 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6117 void *ruleset __unused, struct inpcb *inp) 6118 { 6119 int chk; 6120 6121 chk = pf_test(PF_IN, flags, ifp, m, inp); 6122 6123 return (pf_check_return(chk, m)); 6124 } 6125 6126 static pfil_return_t 6127 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6128 void *ruleset __unused, struct inpcb *inp) 6129 { 6130 int chk; 6131 6132 chk = pf_test(PF_OUT, flags, ifp, m, inp); 6133 6134 return (pf_check_return(chk, m)); 6135 } 6136 #endif 6137 6138 #ifdef INET6 6139 static pfil_return_t 6140 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6141 void *ruleset __unused, struct inpcb *inp) 6142 { 6143 int chk; 6144 6145 /* 6146 * In case of loopback traffic IPv6 uses the real interface in 6147 * order to support scoped addresses. In order to support stateful 6148 * filtering we have change this to lo0 as it is the case in IPv4. 6149 */ 6150 CURVNET_SET(ifp->if_vnet); 6151 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 6152 CURVNET_RESTORE(); 6153 6154 return (pf_check_return(chk, m)); 6155 } 6156 6157 static pfil_return_t 6158 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6159 void *ruleset __unused, struct inpcb *inp) 6160 { 6161 int chk; 6162 6163 CURVNET_SET(ifp->if_vnet); 6164 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 6165 CURVNET_RESTORE(); 6166 6167 return (pf_check_return(chk, m)); 6168 } 6169 #endif /* INET6 */ 6170 6171 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6172 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6173 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6174 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6175 6176 #ifdef INET 6177 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6178 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6179 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6180 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6181 #endif 6182 #ifdef INET6 6183 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6184 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6185 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6186 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6187 #endif 6188 6189 static void 6190 hook_pf_eth(void) 6191 { 6192 struct pfil_hook_args pha; 6193 struct pfil_link_args pla; 6194 int ret __diagused; 6195 6196 if (V_pf_pfil_eth_hooked) 6197 return; 6198 6199 pha.pa_version = PFIL_VERSION; 6200 pha.pa_modname = "pf"; 6201 pha.pa_ruleset = NULL; 6202 6203 pla.pa_version = PFIL_VERSION; 6204 6205 pha.pa_type = PFIL_TYPE_ETHERNET; 6206 pha.pa_func = pf_eth_check_in; 6207 pha.pa_flags = PFIL_IN; 6208 pha.pa_rulname = "eth-in"; 6209 V_pf_eth_in_hook = pfil_add_hook(&pha); 6210 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6211 pla.pa_head = V_link_pfil_head; 6212 pla.pa_hook = V_pf_eth_in_hook; 6213 ret = pfil_link(&pla); 6214 MPASS(ret == 0); 6215 pha.pa_func = pf_eth_check_out; 6216 pha.pa_flags = PFIL_OUT; 6217 pha.pa_rulname = "eth-out"; 6218 V_pf_eth_out_hook = pfil_add_hook(&pha); 6219 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6220 pla.pa_head = V_link_pfil_head; 6221 pla.pa_hook = V_pf_eth_out_hook; 6222 ret = pfil_link(&pla); 6223 MPASS(ret == 0); 6224 6225 V_pf_pfil_eth_hooked = 1; 6226 } 6227 6228 static void 6229 hook_pf(void) 6230 { 6231 struct pfil_hook_args pha; 6232 struct pfil_link_args pla; 6233 int ret; 6234 6235 if (V_pf_pfil_hooked) 6236 return; 6237 6238 pha.pa_version = PFIL_VERSION; 6239 pha.pa_modname = "pf"; 6240 pha.pa_ruleset = NULL; 6241 6242 pla.pa_version = PFIL_VERSION; 6243 6244 #ifdef INET 6245 pha.pa_type = PFIL_TYPE_IP4; 6246 pha.pa_func = pf_check_in; 6247 pha.pa_flags = PFIL_IN; 6248 pha.pa_rulname = "default-in"; 6249 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6250 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6251 pla.pa_head = V_inet_pfil_head; 6252 pla.pa_hook = V_pf_ip4_in_hook; 6253 ret = pfil_link(&pla); 6254 MPASS(ret == 0); 6255 pha.pa_func = pf_check_out; 6256 pha.pa_flags = PFIL_OUT; 6257 pha.pa_rulname = "default-out"; 6258 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6259 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6260 pla.pa_head = V_inet_pfil_head; 6261 pla.pa_hook = V_pf_ip4_out_hook; 6262 ret = pfil_link(&pla); 6263 MPASS(ret == 0); 6264 #endif 6265 #ifdef INET6 6266 pha.pa_type = PFIL_TYPE_IP6; 6267 pha.pa_func = pf_check6_in; 6268 pha.pa_flags = PFIL_IN; 6269 pha.pa_rulname = "default-in6"; 6270 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6271 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6272 pla.pa_head = V_inet6_pfil_head; 6273 pla.pa_hook = V_pf_ip6_in_hook; 6274 ret = pfil_link(&pla); 6275 MPASS(ret == 0); 6276 pha.pa_func = pf_check6_out; 6277 pha.pa_rulname = "default-out6"; 6278 pha.pa_flags = PFIL_OUT; 6279 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6280 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6281 pla.pa_head = V_inet6_pfil_head; 6282 pla.pa_hook = V_pf_ip6_out_hook; 6283 ret = pfil_link(&pla); 6284 MPASS(ret == 0); 6285 #endif 6286 6287 V_pf_pfil_hooked = 1; 6288 } 6289 6290 static void 6291 dehook_pf_eth(void) 6292 { 6293 6294 if (V_pf_pfil_eth_hooked == 0) 6295 return; 6296 6297 pfil_remove_hook(V_pf_eth_in_hook); 6298 pfil_remove_hook(V_pf_eth_out_hook); 6299 6300 V_pf_pfil_eth_hooked = 0; 6301 } 6302 6303 static void 6304 dehook_pf(void) 6305 { 6306 6307 if (V_pf_pfil_hooked == 0) 6308 return; 6309 6310 #ifdef INET 6311 pfil_remove_hook(V_pf_ip4_in_hook); 6312 pfil_remove_hook(V_pf_ip4_out_hook); 6313 #endif 6314 #ifdef INET6 6315 pfil_remove_hook(V_pf_ip6_in_hook); 6316 pfil_remove_hook(V_pf_ip6_out_hook); 6317 #endif 6318 6319 V_pf_pfil_hooked = 0; 6320 } 6321 6322 static void 6323 pf_load_vnet(void) 6324 { 6325 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6326 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6327 6328 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6329 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6330 #ifdef ALTQ 6331 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6332 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6333 #endif 6334 6335 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6336 6337 pfattach_vnet(); 6338 V_pf_vnet_active = 1; 6339 } 6340 6341 static int 6342 pf_load(void) 6343 { 6344 int error; 6345 6346 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 6347 sx_init(&pf_ioctl_lock, "pf ioctl"); 6348 sx_init(&pf_end_lock, "pf end thread"); 6349 6350 pf_mtag_initialize(); 6351 6352 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6353 if (pf_dev == NULL) 6354 return (ENOMEM); 6355 6356 pf_end_threads = 0; 6357 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6358 if (error != 0) 6359 return (error); 6360 6361 pfi_initialize(); 6362 6363 return (0); 6364 } 6365 6366 static void 6367 pf_unload_vnet(void) 6368 { 6369 int ret; 6370 6371 V_pf_vnet_active = 0; 6372 V_pf_status.running = 0; 6373 dehook_pf(); 6374 dehook_pf_eth(); 6375 6376 PF_RULES_WLOCK(); 6377 pf_syncookies_cleanup(); 6378 shutdown_pf(); 6379 PF_RULES_WUNLOCK(); 6380 6381 ret = swi_remove(V_pf_swi_cookie); 6382 MPASS(ret == 0); 6383 ret = intr_event_destroy(V_pf_swi_ie); 6384 MPASS(ret == 0); 6385 6386 pf_unload_vnet_purge(); 6387 6388 pf_normalize_cleanup(); 6389 PF_RULES_WLOCK(); 6390 pfi_cleanup_vnet(); 6391 PF_RULES_WUNLOCK(); 6392 pfr_cleanup(); 6393 pf_osfp_flush(); 6394 pf_cleanup(); 6395 if (IS_DEFAULT_VNET(curvnet)) 6396 pf_mtag_cleanup(); 6397 6398 pf_cleanup_tagset(&V_pf_tags); 6399 #ifdef ALTQ 6400 pf_cleanup_tagset(&V_pf_qids); 6401 #endif 6402 uma_zdestroy(V_pf_tag_z); 6403 6404 #ifdef PF_WANT_32_TO_64_COUNTER 6405 PF_RULES_WLOCK(); 6406 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6407 6408 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6409 MPASS(V_pf_allkifcount == 0); 6410 6411 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6412 V_pf_allrulecount--; 6413 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6414 6415 /* 6416 * There are known pf rule leaks when running the test suite. 6417 */ 6418 #ifdef notyet 6419 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6420 MPASS(V_pf_allrulecount == 0); 6421 #endif 6422 6423 PF_RULES_WUNLOCK(); 6424 6425 free(V_pf_kifmarker, PFI_MTYPE); 6426 free(V_pf_rulemarker, M_PFRULE); 6427 #endif 6428 6429 /* Free counters last as we updated them during shutdown. */ 6430 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6431 for (int i = 0; i < 2; i++) { 6432 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6433 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6434 } 6435 counter_u64_free(V_pf_default_rule.states_cur); 6436 counter_u64_free(V_pf_default_rule.states_tot); 6437 counter_u64_free(V_pf_default_rule.src_nodes); 6438 6439 for (int i = 0; i < PFRES_MAX; i++) 6440 counter_u64_free(V_pf_status.counters[i]); 6441 for (int i = 0; i < KLCNT_MAX; i++) 6442 counter_u64_free(V_pf_status.lcounters[i]); 6443 for (int i = 0; i < FCNT_MAX; i++) 6444 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6445 for (int i = 0; i < SCNT_MAX; i++) 6446 counter_u64_free(V_pf_status.scounters[i]); 6447 } 6448 6449 static void 6450 pf_unload(void) 6451 { 6452 6453 sx_xlock(&pf_end_lock); 6454 pf_end_threads = 1; 6455 while (pf_end_threads < 2) { 6456 wakeup_one(pf_purge_thread); 6457 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6458 } 6459 sx_xunlock(&pf_end_lock); 6460 6461 if (pf_dev != NULL) 6462 destroy_dev(pf_dev); 6463 6464 pfi_cleanup(); 6465 6466 rm_destroy(&pf_rules_lock); 6467 sx_destroy(&pf_ioctl_lock); 6468 sx_destroy(&pf_end_lock); 6469 } 6470 6471 static void 6472 vnet_pf_init(void *unused __unused) 6473 { 6474 6475 pf_load_vnet(); 6476 } 6477 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6478 vnet_pf_init, NULL); 6479 6480 static void 6481 vnet_pf_uninit(const void *unused __unused) 6482 { 6483 6484 pf_unload_vnet(); 6485 } 6486 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6487 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6488 vnet_pf_uninit, NULL); 6489 6490 static int 6491 pf_modevent(module_t mod, int type, void *data) 6492 { 6493 int error = 0; 6494 6495 switch(type) { 6496 case MOD_LOAD: 6497 error = pf_load(); 6498 break; 6499 case MOD_UNLOAD: 6500 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6501 * the vnet_pf_uninit()s */ 6502 break; 6503 default: 6504 error = EINVAL; 6505 break; 6506 } 6507 6508 return (error); 6509 } 6510 6511 static moduledata_t pf_mod = { 6512 "pf", 6513 pf_modevent, 6514 0 6515 }; 6516 6517 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6518 MODULE_VERSION(pf, PF_MODVER); 6519