1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule(struct pf_krule *); 128 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 129 static int pf_commit_rules(u_int32_t, int, char *); 130 static int pf_addr_setup(struct pf_kruleset *, 131 struct pf_addr_wrap *, sa_family_t); 132 static void pf_addr_copyout(struct pf_addr_wrap *); 133 static void pf_src_node_copy(const struct pf_ksrc_node *, 134 struct pf_src_node *); 135 #ifdef ALTQ 136 static int pf_export_kaltq(struct pf_altq *, 137 struct pfioc_altq_v1 *, size_t); 138 static int pf_import_kaltq(struct pfioc_altq_v1 *, 139 struct pf_altq *, size_t); 140 #endif /* ALTQ */ 141 142 VNET_DEFINE(struct pf_krule, pf_default_rule); 143 144 static __inline int pf_krule_compare(struct pf_krule *, 145 struct pf_krule *); 146 147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare); 148 149 #ifdef ALTQ 150 VNET_DEFINE_STATIC(int, pf_altq_running); 151 #define V_pf_altq_running VNET(pf_altq_running) 152 #endif 153 154 #define TAGID_MAX 50000 155 struct pf_tagname { 156 TAILQ_ENTRY(pf_tagname) namehash_entries; 157 TAILQ_ENTRY(pf_tagname) taghash_entries; 158 char name[PF_TAG_NAME_SIZE]; 159 uint16_t tag; 160 int ref; 161 }; 162 163 struct pf_tagset { 164 TAILQ_HEAD(, pf_tagname) *namehash; 165 TAILQ_HEAD(, pf_tagname) *taghash; 166 unsigned int mask; 167 uint32_t seed; 168 BITSET_DEFINE(, TAGID_MAX) avail; 169 }; 170 171 VNET_DEFINE(struct pf_tagset, pf_tags); 172 #define V_pf_tags VNET(pf_tags) 173 static unsigned int pf_rule_tag_hashsize; 174 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 176 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 177 "Size of pf(4) rule tag hashtable"); 178 179 #ifdef ALTQ 180 VNET_DEFINE(struct pf_tagset, pf_qids); 181 #define V_pf_qids VNET(pf_qids) 182 static unsigned int pf_queue_tag_hashsize; 183 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 185 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 186 "Size of pf(4) queue tag hashtable"); 187 #endif 188 VNET_DEFINE(uma_zone_t, pf_tag_z); 189 #define V_pf_tag_z VNET(pf_tag_z) 190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 192 193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 195 #endif 196 197 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 198 unsigned int); 199 static void pf_cleanup_tagset(struct pf_tagset *); 200 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 201 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 202 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 203 static u_int16_t pf_tagname2tag(const char *); 204 static void tag_unref(struct pf_tagset *, u_int16_t); 205 206 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 207 208 struct cdev *pf_dev; 209 210 /* 211 * XXX - These are new and need to be checked when moveing to a new version 212 */ 213 static void pf_clear_all_states(void); 214 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 215 static void pf_killstates(struct pf_kstate_kill *, 216 unsigned int *); 217 static int pf_killstates_row(struct pf_kstate_kill *, 218 struct pf_idhash *); 219 static int pf_killstates_nv(struct pfioc_nv *); 220 static int pf_clearstates_nv(struct pfioc_nv *); 221 static int pf_getstate(struct pfioc_nv *); 222 static int pf_getstatus(struct pfioc_nv *); 223 static int pf_clear_tables(void); 224 static void pf_clear_srcnodes(struct pf_ksrc_node *); 225 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 226 static int pf_keepcounters(struct pfioc_nv *); 227 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 228 229 /* 230 * Wrapper functions for pfil(9) hooks 231 */ 232 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 233 int flags, void *ruleset __unused, struct inpcb *inp); 234 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 235 int flags, void *ruleset __unused, struct inpcb *inp); 236 #ifdef INET 237 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 238 int flags, void *ruleset __unused, struct inpcb *inp); 239 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 #endif 242 #ifdef INET6 243 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 244 int flags, void *ruleset __unused, struct inpcb *inp); 245 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 246 int flags, void *ruleset __unused, struct inpcb *inp); 247 #endif 248 249 static void hook_pf_eth(void); 250 static void hook_pf(void); 251 static void dehook_pf_eth(void); 252 static void dehook_pf(void); 253 static int shutdown_pf(void); 254 static int pf_load(void); 255 static void pf_unload(void); 256 257 static struct cdevsw pf_cdevsw = { 258 .d_ioctl = pfioctl, 259 .d_name = PF_NAME, 260 .d_version = D_VERSION, 261 }; 262 263 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 264 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 265 volatile VNET_DEFINE_STATIC(int, pf_pfil_eth_hooked); 266 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 267 268 /* 269 * We need a flag that is neither hooked nor running to know when 270 * the VNET is "valid". We primarily need this to control (global) 271 * external event, e.g., eventhandlers. 272 */ 273 VNET_DEFINE(int, pf_vnet_active); 274 #define V_pf_vnet_active VNET(pf_vnet_active) 275 276 int pf_end_threads; 277 struct proc *pf_purge_proc; 278 279 struct rmlock pf_rules_lock; 280 struct sx pf_ioctl_lock; 281 struct sx pf_end_lock; 282 283 /* pfsync */ 284 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 285 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 286 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 287 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 288 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 289 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 290 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 291 292 /* pflog */ 293 pflog_packet_t *pflog_packet_ptr = NULL; 294 295 /* 296 * Copy a user-provided string, returning an error if truncation would occur. 297 * Avoid scanning past "sz" bytes in the source string since there's no 298 * guarantee that it's nul-terminated. 299 */ 300 static int 301 pf_user_strcpy(char *dst, const char *src, size_t sz) 302 { 303 if (strnlen(src, sz) == sz) 304 return (EINVAL); 305 (void)strlcpy(dst, src, sz); 306 return (0); 307 } 308 309 static void 310 pfattach_vnet(void) 311 { 312 u_int32_t *my_timeout = V_pf_default_rule.timeout; 313 314 pf_initialize(); 315 pfr_initialize(); 316 pfi_initialize_vnet(); 317 pf_normalize_init(); 318 pf_syncookies_init(); 319 320 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 321 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 322 323 RB_INIT(&V_pf_anchors); 324 pf_init_kruleset(&pf_main_ruleset); 325 326 pf_init_keth(V_pf_keth); 327 328 /* default rule should never be garbage collected */ 329 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 330 #ifdef PF_DEFAULT_TO_DROP 331 V_pf_default_rule.action = PF_DROP; 332 #else 333 V_pf_default_rule.action = PF_PASS; 334 #endif 335 V_pf_default_rule.nr = -1; 336 V_pf_default_rule.rtableid = -1; 337 338 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 339 for (int i = 0; i < 2; i++) { 340 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 341 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 342 } 343 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 344 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 345 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 346 347 V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 348 M_WAITOK | M_ZERO); 349 350 #ifdef PF_WANT_32_TO_64_COUNTER 351 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 352 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 353 PF_RULES_WLOCK(); 354 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 355 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 356 V_pf_allrulecount++; 357 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 358 PF_RULES_WUNLOCK(); 359 #endif 360 361 /* initialize default timeouts */ 362 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 363 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 364 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 365 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 366 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 367 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 368 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 369 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 370 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 371 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 372 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 373 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 374 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 375 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 376 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 377 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 378 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 379 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 380 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 381 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 382 383 bzero(&V_pf_status, sizeof(V_pf_status)); 384 V_pf_status.debug = PF_DEBUG_URGENT; 385 386 V_pf_pfil_hooked = 0; 387 V_pf_pfil_eth_hooked = 0; 388 389 /* XXX do our best to avoid a conflict */ 390 V_pf_status.hostid = arc4random(); 391 392 for (int i = 0; i < PFRES_MAX; i++) 393 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 394 for (int i = 0; i < KLCNT_MAX; i++) 395 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 396 for (int i = 0; i < FCNT_MAX; i++) 397 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 398 for (int i = 0; i < SCNT_MAX; i++) 399 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 400 401 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 402 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 403 /* XXXGL: leaked all above. */ 404 return; 405 } 406 407 static struct pf_kpool * 408 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 409 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 410 u_int8_t check_ticket) 411 { 412 struct pf_kruleset *ruleset; 413 struct pf_krule *rule; 414 int rs_num; 415 416 ruleset = pf_find_kruleset(anchor); 417 if (ruleset == NULL) 418 return (NULL); 419 rs_num = pf_get_ruleset_number(rule_action); 420 if (rs_num >= PF_RULESET_MAX) 421 return (NULL); 422 if (active) { 423 if (check_ticket && ticket != 424 ruleset->rules[rs_num].active.ticket) 425 return (NULL); 426 if (r_last) 427 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 428 pf_krulequeue); 429 else 430 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 431 } else { 432 if (check_ticket && ticket != 433 ruleset->rules[rs_num].inactive.ticket) 434 return (NULL); 435 if (r_last) 436 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 437 pf_krulequeue); 438 else 439 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 440 } 441 if (!r_last) { 442 while ((rule != NULL) && (rule->nr != rule_number)) 443 rule = TAILQ_NEXT(rule, entries); 444 } 445 if (rule == NULL) 446 return (NULL); 447 448 return (&rule->rpool); 449 } 450 451 static void 452 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 453 { 454 struct pf_kpooladdr *mv_pool_pa; 455 456 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 457 TAILQ_REMOVE(poola, mv_pool_pa, entries); 458 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 459 } 460 } 461 462 static void 463 pf_empty_kpool(struct pf_kpalist *poola) 464 { 465 struct pf_kpooladdr *pa; 466 467 while ((pa = TAILQ_FIRST(poola)) != NULL) { 468 switch (pa->addr.type) { 469 case PF_ADDR_DYNIFTL: 470 pfi_dynaddr_remove(pa->addr.p.dyn); 471 break; 472 case PF_ADDR_TABLE: 473 /* XXX: this could be unfinished pooladdr on pabuf */ 474 if (pa->addr.p.tbl != NULL) 475 pfr_detach_table(pa->addr.p.tbl); 476 break; 477 } 478 if (pa->kif) 479 pfi_kkif_unref(pa->kif); 480 TAILQ_REMOVE(poola, pa, entries); 481 free(pa, M_PFRULE); 482 } 483 } 484 485 static void 486 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 487 { 488 489 PF_RULES_WASSERT(); 490 PF_UNLNKDRULES_ASSERT(); 491 492 TAILQ_REMOVE(rulequeue, rule, entries); 493 494 rule->rule_ref |= PFRULE_REFS; 495 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 496 } 497 498 static void 499 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 500 { 501 502 PF_RULES_WASSERT(); 503 504 PF_UNLNKDRULES_LOCK(); 505 pf_unlink_rule_locked(rulequeue, rule); 506 PF_UNLNKDRULES_UNLOCK(); 507 } 508 509 static void 510 pf_free_eth_rule(struct pf_keth_rule *rule) 511 { 512 PF_RULES_WASSERT(); 513 514 if (rule == NULL) 515 return; 516 517 if (rule->tag) 518 tag_unref(&V_pf_tags, rule->tag); 519 if (rule->match_tag) 520 tag_unref(&V_pf_tags, rule->match_tag); 521 #ifdef ALTQ 522 pf_qid_unref(rule->qid); 523 #endif 524 525 if (rule->kif) 526 pfi_kkif_unref(rule->kif); 527 528 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) 529 pfr_detach_table(rule->ipsrc.addr.p.tbl); 530 if (rule->ipdst.addr.type == PF_ADDR_TABLE) 531 pfr_detach_table(rule->ipdst.addr.p.tbl); 532 533 counter_u64_free(rule->evaluations); 534 for (int i = 0; i < 2; i++) { 535 counter_u64_free(rule->packets[i]); 536 counter_u64_free(rule->bytes[i]); 537 } 538 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 539 pf_keth_anchor_remove(rule); 540 541 free(rule, M_PFRULE); 542 } 543 544 void 545 pf_free_rule(struct pf_krule *rule) 546 { 547 548 PF_RULES_WASSERT(); 549 PF_CONFIG_ASSERT(); 550 551 if (rule->tag) 552 tag_unref(&V_pf_tags, rule->tag); 553 if (rule->match_tag) 554 tag_unref(&V_pf_tags, rule->match_tag); 555 #ifdef ALTQ 556 if (rule->pqid != rule->qid) 557 pf_qid_unref(rule->pqid); 558 pf_qid_unref(rule->qid); 559 #endif 560 switch (rule->src.addr.type) { 561 case PF_ADDR_DYNIFTL: 562 pfi_dynaddr_remove(rule->src.addr.p.dyn); 563 break; 564 case PF_ADDR_TABLE: 565 pfr_detach_table(rule->src.addr.p.tbl); 566 break; 567 } 568 switch (rule->dst.addr.type) { 569 case PF_ADDR_DYNIFTL: 570 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 571 break; 572 case PF_ADDR_TABLE: 573 pfr_detach_table(rule->dst.addr.p.tbl); 574 break; 575 } 576 if (rule->overload_tbl) 577 pfr_detach_table(rule->overload_tbl); 578 if (rule->kif) 579 pfi_kkif_unref(rule->kif); 580 pf_kanchor_remove(rule); 581 pf_empty_kpool(&rule->rpool.list); 582 583 pf_krule_free(rule); 584 } 585 586 static void 587 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 588 unsigned int default_size) 589 { 590 unsigned int i; 591 unsigned int hashsize; 592 593 if (*tunable_size == 0 || !powerof2(*tunable_size)) 594 *tunable_size = default_size; 595 596 hashsize = *tunable_size; 597 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 598 M_WAITOK); 599 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 600 M_WAITOK); 601 ts->mask = hashsize - 1; 602 ts->seed = arc4random(); 603 for (i = 0; i < hashsize; i++) { 604 TAILQ_INIT(&ts->namehash[i]); 605 TAILQ_INIT(&ts->taghash[i]); 606 } 607 BIT_FILL(TAGID_MAX, &ts->avail); 608 } 609 610 static void 611 pf_cleanup_tagset(struct pf_tagset *ts) 612 { 613 unsigned int i; 614 unsigned int hashsize; 615 struct pf_tagname *t, *tmp; 616 617 /* 618 * Only need to clean up one of the hashes as each tag is hashed 619 * into each table. 620 */ 621 hashsize = ts->mask + 1; 622 for (i = 0; i < hashsize; i++) 623 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 624 uma_zfree(V_pf_tag_z, t); 625 626 free(ts->namehash, M_PFHASH); 627 free(ts->taghash, M_PFHASH); 628 } 629 630 static uint16_t 631 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 632 { 633 size_t len; 634 635 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 636 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 637 } 638 639 static uint16_t 640 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 641 { 642 643 return (tag & ts->mask); 644 } 645 646 static u_int16_t 647 tagname2tag(struct pf_tagset *ts, const char *tagname) 648 { 649 struct pf_tagname *tag; 650 u_int32_t index; 651 u_int16_t new_tagid; 652 653 PF_RULES_WASSERT(); 654 655 index = tagname2hashindex(ts, tagname); 656 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 657 if (strcmp(tagname, tag->name) == 0) { 658 tag->ref++; 659 return (tag->tag); 660 } 661 662 /* 663 * new entry 664 * 665 * to avoid fragmentation, we do a linear search from the beginning 666 * and take the first free slot we find. 667 */ 668 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 669 /* 670 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 671 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 672 * set. It may also return a bit number greater than TAGID_MAX due 673 * to rounding of the number of bits in the vector up to a multiple 674 * of the vector word size at declaration/allocation time. 675 */ 676 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 677 return (0); 678 679 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 680 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 681 682 /* allocate and fill new struct pf_tagname */ 683 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 684 if (tag == NULL) 685 return (0); 686 strlcpy(tag->name, tagname, sizeof(tag->name)); 687 tag->tag = new_tagid; 688 tag->ref = 1; 689 690 /* Insert into namehash */ 691 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 692 693 /* Insert into taghash */ 694 index = tag2hashindex(ts, new_tagid); 695 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 696 697 return (tag->tag); 698 } 699 700 static void 701 tag_unref(struct pf_tagset *ts, u_int16_t tag) 702 { 703 struct pf_tagname *t; 704 uint16_t index; 705 706 PF_RULES_WASSERT(); 707 708 index = tag2hashindex(ts, tag); 709 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 710 if (tag == t->tag) { 711 if (--t->ref == 0) { 712 TAILQ_REMOVE(&ts->taghash[index], t, 713 taghash_entries); 714 index = tagname2hashindex(ts, t->name); 715 TAILQ_REMOVE(&ts->namehash[index], t, 716 namehash_entries); 717 /* Bits are 0-based for BIT_SET() */ 718 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 719 uma_zfree(V_pf_tag_z, t); 720 } 721 break; 722 } 723 } 724 725 static uint16_t 726 pf_tagname2tag(const char *tagname) 727 { 728 return (tagname2tag(&V_pf_tags, tagname)); 729 } 730 731 static int 732 pf_begin_eth(uint32_t *ticket, const char *anchor) 733 { 734 struct pf_keth_rule *rule, *tmp; 735 struct pf_keth_ruleset *rs; 736 737 PF_RULES_WASSERT(); 738 739 rs = pf_find_or_create_keth_ruleset(anchor); 740 if (rs == NULL) 741 return (EINVAL); 742 743 /* Purge old inactive rules. */ 744 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 745 tmp) { 746 TAILQ_REMOVE(rs->inactive.rules, rule, 747 entries); 748 pf_free_eth_rule(rule); 749 } 750 751 *ticket = ++rs->inactive.ticket; 752 rs->inactive.open = 1; 753 754 return (0); 755 } 756 757 static void 758 pf_rollback_eth_cb(struct epoch_context *ctx) 759 { 760 struct pf_keth_ruleset *rs; 761 762 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 763 764 CURVNET_SET(rs->vnet); 765 766 PF_RULES_WLOCK(); 767 pf_rollback_eth(rs->inactive.ticket, 768 rs->anchor ? rs->anchor->path : ""); 769 PF_RULES_WUNLOCK(); 770 771 CURVNET_RESTORE(); 772 } 773 774 static int 775 pf_rollback_eth(uint32_t ticket, const char *anchor) 776 { 777 struct pf_keth_rule *rule, *tmp; 778 struct pf_keth_ruleset *rs; 779 780 PF_RULES_WASSERT(); 781 782 rs = pf_find_keth_ruleset(anchor); 783 if (rs == NULL) 784 return (EINVAL); 785 786 if (!rs->inactive.open || 787 ticket != rs->inactive.ticket) 788 return (0); 789 790 /* Purge old inactive rules. */ 791 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 792 tmp) { 793 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 794 pf_free_eth_rule(rule); 795 } 796 797 rs->inactive.open = 0; 798 799 pf_remove_if_empty_keth_ruleset(rs); 800 801 return (0); 802 } 803 804 #define PF_SET_SKIP_STEPS(i) \ 805 do { \ 806 while (head[i] != cur) { \ 807 head[i]->skip[i].ptr = cur; \ 808 head[i] = TAILQ_NEXT(head[i], entries); \ 809 } \ 810 } while (0) 811 812 static void 813 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 814 { 815 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 816 int i; 817 818 cur = TAILQ_FIRST(rules); 819 prev = cur; 820 for (i = 0; i < PFE_SKIP_COUNT; ++i) 821 head[i] = cur; 822 while (cur != NULL) { 823 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 824 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 825 if (cur->direction != prev->direction) 826 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 827 if (cur->proto != prev->proto) 828 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 829 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 830 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 831 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 832 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 833 834 prev = cur; 835 cur = TAILQ_NEXT(cur, entries); 836 } 837 for (i = 0; i < PFE_SKIP_COUNT; ++i) 838 PF_SET_SKIP_STEPS(i); 839 } 840 841 static int 842 pf_commit_eth(uint32_t ticket, const char *anchor) 843 { 844 struct pf_keth_ruleq *rules; 845 struct pf_keth_ruleset *rs; 846 847 rs = pf_find_keth_ruleset(anchor); 848 if (rs == NULL) { 849 return (EINVAL); 850 } 851 852 if (!rs->inactive.open || 853 ticket != rs->inactive.ticket) 854 return (EBUSY); 855 856 PF_RULES_WASSERT(); 857 858 pf_eth_calc_skip_steps(rs->inactive.rules); 859 860 rules = rs->active.rules; 861 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 862 rs->inactive.rules = rules; 863 rs->inactive.ticket = rs->active.ticket; 864 865 /* Clean up inactive rules (i.e. previously active rules), only when 866 * we're sure they're no longer used. */ 867 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 868 869 return (0); 870 } 871 872 #ifdef ALTQ 873 static uint16_t 874 pf_qname2qid(const char *qname) 875 { 876 return (tagname2tag(&V_pf_qids, qname)); 877 } 878 879 static void 880 pf_qid_unref(uint16_t qid) 881 { 882 tag_unref(&V_pf_qids, qid); 883 } 884 885 static int 886 pf_begin_altq(u_int32_t *ticket) 887 { 888 struct pf_altq *altq, *tmp; 889 int error = 0; 890 891 PF_RULES_WASSERT(); 892 893 /* Purge the old altq lists */ 894 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 895 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 896 /* detach and destroy the discipline */ 897 error = altq_remove(altq); 898 } 899 free(altq, M_PFALTQ); 900 } 901 TAILQ_INIT(V_pf_altq_ifs_inactive); 902 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 903 pf_qid_unref(altq->qid); 904 free(altq, M_PFALTQ); 905 } 906 TAILQ_INIT(V_pf_altqs_inactive); 907 if (error) 908 return (error); 909 *ticket = ++V_ticket_altqs_inactive; 910 V_altqs_inactive_open = 1; 911 return (0); 912 } 913 914 static int 915 pf_rollback_altq(u_int32_t ticket) 916 { 917 struct pf_altq *altq, *tmp; 918 int error = 0; 919 920 PF_RULES_WASSERT(); 921 922 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 923 return (0); 924 /* Purge the old altq lists */ 925 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 926 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 927 /* detach and destroy the discipline */ 928 error = altq_remove(altq); 929 } 930 free(altq, M_PFALTQ); 931 } 932 TAILQ_INIT(V_pf_altq_ifs_inactive); 933 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 934 pf_qid_unref(altq->qid); 935 free(altq, M_PFALTQ); 936 } 937 TAILQ_INIT(V_pf_altqs_inactive); 938 V_altqs_inactive_open = 0; 939 return (error); 940 } 941 942 static int 943 pf_commit_altq(u_int32_t ticket) 944 { 945 struct pf_altqqueue *old_altqs, *old_altq_ifs; 946 struct pf_altq *altq, *tmp; 947 int err, error = 0; 948 949 PF_RULES_WASSERT(); 950 951 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 952 return (EBUSY); 953 954 /* swap altqs, keep the old. */ 955 old_altqs = V_pf_altqs_active; 956 old_altq_ifs = V_pf_altq_ifs_active; 957 V_pf_altqs_active = V_pf_altqs_inactive; 958 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 959 V_pf_altqs_inactive = old_altqs; 960 V_pf_altq_ifs_inactive = old_altq_ifs; 961 V_ticket_altqs_active = V_ticket_altqs_inactive; 962 963 /* Attach new disciplines */ 964 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 965 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 966 /* attach the discipline */ 967 error = altq_pfattach(altq); 968 if (error == 0 && V_pf_altq_running) 969 error = pf_enable_altq(altq); 970 if (error != 0) 971 return (error); 972 } 973 } 974 975 /* Purge the old altq lists */ 976 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 977 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 978 /* detach and destroy the discipline */ 979 if (V_pf_altq_running) 980 error = pf_disable_altq(altq); 981 err = altq_pfdetach(altq); 982 if (err != 0 && error == 0) 983 error = err; 984 err = altq_remove(altq); 985 if (err != 0 && error == 0) 986 error = err; 987 } 988 free(altq, M_PFALTQ); 989 } 990 TAILQ_INIT(V_pf_altq_ifs_inactive); 991 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 992 pf_qid_unref(altq->qid); 993 free(altq, M_PFALTQ); 994 } 995 TAILQ_INIT(V_pf_altqs_inactive); 996 997 V_altqs_inactive_open = 0; 998 return (error); 999 } 1000 1001 static int 1002 pf_enable_altq(struct pf_altq *altq) 1003 { 1004 struct ifnet *ifp; 1005 struct tb_profile tb; 1006 int error = 0; 1007 1008 if ((ifp = ifunit(altq->ifname)) == NULL) 1009 return (EINVAL); 1010 1011 if (ifp->if_snd.altq_type != ALTQT_NONE) 1012 error = altq_enable(&ifp->if_snd); 1013 1014 /* set tokenbucket regulator */ 1015 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 1016 tb.rate = altq->ifbandwidth; 1017 tb.depth = altq->tbrsize; 1018 error = tbr_set(&ifp->if_snd, &tb); 1019 } 1020 1021 return (error); 1022 } 1023 1024 static int 1025 pf_disable_altq(struct pf_altq *altq) 1026 { 1027 struct ifnet *ifp; 1028 struct tb_profile tb; 1029 int error; 1030 1031 if ((ifp = ifunit(altq->ifname)) == NULL) 1032 return (EINVAL); 1033 1034 /* 1035 * when the discipline is no longer referenced, it was overridden 1036 * by a new one. if so, just return. 1037 */ 1038 if (altq->altq_disc != ifp->if_snd.altq_disc) 1039 return (0); 1040 1041 error = altq_disable(&ifp->if_snd); 1042 1043 if (error == 0) { 1044 /* clear tokenbucket regulator */ 1045 tb.rate = 0; 1046 error = tbr_set(&ifp->if_snd, &tb); 1047 } 1048 1049 return (error); 1050 } 1051 1052 static int 1053 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1054 struct pf_altq *altq) 1055 { 1056 struct ifnet *ifp1; 1057 int error = 0; 1058 1059 /* Deactivate the interface in question */ 1060 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1061 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1062 (remove && ifp1 == ifp)) { 1063 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1064 } else { 1065 error = altq_add(ifp1, altq); 1066 1067 if (ticket != V_ticket_altqs_inactive) 1068 error = EBUSY; 1069 1070 if (error) 1071 free(altq, M_PFALTQ); 1072 } 1073 1074 return (error); 1075 } 1076 1077 void 1078 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1079 { 1080 struct pf_altq *a1, *a2, *a3; 1081 u_int32_t ticket; 1082 int error = 0; 1083 1084 /* 1085 * No need to re-evaluate the configuration for events on interfaces 1086 * that do not support ALTQ, as it's not possible for such 1087 * interfaces to be part of the configuration. 1088 */ 1089 if (!ALTQ_IS_READY(&ifp->if_snd)) 1090 return; 1091 1092 /* Interrupt userland queue modifications */ 1093 if (V_altqs_inactive_open) 1094 pf_rollback_altq(V_ticket_altqs_inactive); 1095 1096 /* Start new altq ruleset */ 1097 if (pf_begin_altq(&ticket)) 1098 return; 1099 1100 /* Copy the current active set */ 1101 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1102 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1103 if (a2 == NULL) { 1104 error = ENOMEM; 1105 break; 1106 } 1107 bcopy(a1, a2, sizeof(struct pf_altq)); 1108 1109 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1110 if (error) 1111 break; 1112 1113 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1114 } 1115 if (error) 1116 goto out; 1117 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1118 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1119 if (a2 == NULL) { 1120 error = ENOMEM; 1121 break; 1122 } 1123 bcopy(a1, a2, sizeof(struct pf_altq)); 1124 1125 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1126 error = EBUSY; 1127 free(a2, M_PFALTQ); 1128 break; 1129 } 1130 a2->altq_disc = NULL; 1131 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1132 if (strncmp(a3->ifname, a2->ifname, 1133 IFNAMSIZ) == 0) { 1134 a2->altq_disc = a3->altq_disc; 1135 break; 1136 } 1137 } 1138 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1139 if (error) 1140 break; 1141 1142 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1143 } 1144 1145 out: 1146 if (error != 0) 1147 pf_rollback_altq(ticket); 1148 else 1149 pf_commit_altq(ticket); 1150 } 1151 #endif /* ALTQ */ 1152 1153 static struct pf_krule_global * 1154 pf_rule_tree_alloc(int flags) 1155 { 1156 struct pf_krule_global *tree; 1157 1158 tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags); 1159 if (tree == NULL) 1160 return (NULL); 1161 RB_INIT(tree); 1162 return (tree); 1163 } 1164 1165 static void 1166 pf_rule_tree_free(struct pf_krule_global *tree) 1167 { 1168 1169 free(tree, M_TEMP); 1170 } 1171 1172 static int 1173 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1174 { 1175 struct pf_krule_global *tree; 1176 struct pf_kruleset *rs; 1177 struct pf_krule *rule; 1178 1179 PF_RULES_WASSERT(); 1180 1181 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1182 return (EINVAL); 1183 tree = pf_rule_tree_alloc(M_NOWAIT); 1184 if (tree == NULL) 1185 return (ENOMEM); 1186 rs = pf_find_or_create_kruleset(anchor); 1187 if (rs == NULL) { 1188 free(tree, M_TEMP); 1189 return (EINVAL); 1190 } 1191 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); 1192 rs->rules[rs_num].inactive.tree = tree; 1193 1194 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1195 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1196 rs->rules[rs_num].inactive.rcount--; 1197 } 1198 *ticket = ++rs->rules[rs_num].inactive.ticket; 1199 rs->rules[rs_num].inactive.open = 1; 1200 return (0); 1201 } 1202 1203 static int 1204 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1205 { 1206 struct pf_kruleset *rs; 1207 struct pf_krule *rule; 1208 1209 PF_RULES_WASSERT(); 1210 1211 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1212 return (EINVAL); 1213 rs = pf_find_kruleset(anchor); 1214 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1215 rs->rules[rs_num].inactive.ticket != ticket) 1216 return (0); 1217 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1218 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1219 rs->rules[rs_num].inactive.rcount--; 1220 } 1221 rs->rules[rs_num].inactive.open = 0; 1222 return (0); 1223 } 1224 1225 #define PF_MD5_UPD(st, elm) \ 1226 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1227 1228 #define PF_MD5_UPD_STR(st, elm) \ 1229 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1230 1231 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1232 (stor) = htonl((st)->elm); \ 1233 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1234 } while (0) 1235 1236 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1237 (stor) = htons((st)->elm); \ 1238 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1239 } while (0) 1240 1241 static void 1242 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1243 { 1244 PF_MD5_UPD(pfr, addr.type); 1245 switch (pfr->addr.type) { 1246 case PF_ADDR_DYNIFTL: 1247 PF_MD5_UPD(pfr, addr.v.ifname); 1248 PF_MD5_UPD(pfr, addr.iflags); 1249 break; 1250 case PF_ADDR_TABLE: 1251 PF_MD5_UPD(pfr, addr.v.tblname); 1252 break; 1253 case PF_ADDR_ADDRMASK: 1254 /* XXX ignore af? */ 1255 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1256 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1257 break; 1258 } 1259 1260 PF_MD5_UPD(pfr, port[0]); 1261 PF_MD5_UPD(pfr, port[1]); 1262 PF_MD5_UPD(pfr, neg); 1263 PF_MD5_UPD(pfr, port_op); 1264 } 1265 1266 static void 1267 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule) 1268 { 1269 u_int16_t x; 1270 u_int32_t y; 1271 1272 pf_hash_rule_addr(ctx, &rule->src); 1273 pf_hash_rule_addr(ctx, &rule->dst); 1274 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1275 PF_MD5_UPD_STR(rule, label[i]); 1276 PF_MD5_UPD_STR(rule, ifname); 1277 PF_MD5_UPD_STR(rule, match_tagname); 1278 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1279 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1280 PF_MD5_UPD_HTONL(rule, prob, y); 1281 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1282 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1283 PF_MD5_UPD(rule, uid.op); 1284 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1285 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1286 PF_MD5_UPD(rule, gid.op); 1287 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1288 PF_MD5_UPD(rule, action); 1289 PF_MD5_UPD(rule, direction); 1290 PF_MD5_UPD(rule, af); 1291 PF_MD5_UPD(rule, quick); 1292 PF_MD5_UPD(rule, ifnot); 1293 PF_MD5_UPD(rule, match_tag_not); 1294 PF_MD5_UPD(rule, natpass); 1295 PF_MD5_UPD(rule, keep_state); 1296 PF_MD5_UPD(rule, proto); 1297 PF_MD5_UPD(rule, type); 1298 PF_MD5_UPD(rule, code); 1299 PF_MD5_UPD(rule, flags); 1300 PF_MD5_UPD(rule, flagset); 1301 PF_MD5_UPD(rule, allow_opts); 1302 PF_MD5_UPD(rule, rt); 1303 PF_MD5_UPD(rule, tos); 1304 if (rule->anchor != NULL) 1305 PF_MD5_UPD_STR(rule, anchor->path); 1306 } 1307 1308 static void 1309 pf_hash_rule(struct pf_krule *rule) 1310 { 1311 MD5_CTX ctx; 1312 1313 MD5Init(&ctx); 1314 pf_hash_rule_rolling(&ctx, rule); 1315 MD5Final(rule->md5sum, &ctx); 1316 } 1317 1318 static int 1319 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1320 { 1321 1322 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); 1323 } 1324 1325 static int 1326 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1327 { 1328 struct pf_kruleset *rs; 1329 struct pf_krule *rule, **old_array, *old_rule; 1330 struct pf_krulequeue *old_rules; 1331 struct pf_krule_global *old_tree; 1332 int error; 1333 u_int32_t old_rcount; 1334 1335 PF_RULES_WASSERT(); 1336 1337 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1338 return (EINVAL); 1339 rs = pf_find_kruleset(anchor); 1340 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1341 ticket != rs->rules[rs_num].inactive.ticket) 1342 return (EBUSY); 1343 1344 /* Calculate checksum for the main ruleset */ 1345 if (rs == &pf_main_ruleset) { 1346 error = pf_setup_pfsync_matching(rs); 1347 if (error != 0) 1348 return (error); 1349 } 1350 1351 /* Swap rules, keep the old. */ 1352 old_rules = rs->rules[rs_num].active.ptr; 1353 old_rcount = rs->rules[rs_num].active.rcount; 1354 old_array = rs->rules[rs_num].active.ptr_array; 1355 old_tree = rs->rules[rs_num].active.tree; 1356 1357 rs->rules[rs_num].active.ptr = 1358 rs->rules[rs_num].inactive.ptr; 1359 rs->rules[rs_num].active.ptr_array = 1360 rs->rules[rs_num].inactive.ptr_array; 1361 rs->rules[rs_num].active.tree = 1362 rs->rules[rs_num].inactive.tree; 1363 rs->rules[rs_num].active.rcount = 1364 rs->rules[rs_num].inactive.rcount; 1365 1366 /* Attempt to preserve counter information. */ 1367 if (V_pf_status.keep_counters && old_tree != NULL) { 1368 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1369 entries) { 1370 old_rule = RB_FIND(pf_krule_global, old_tree, rule); 1371 if (old_rule == NULL) { 1372 continue; 1373 } 1374 pf_counter_u64_critical_enter(); 1375 pf_counter_u64_add_protected(&rule->evaluations, 1376 pf_counter_u64_fetch(&old_rule->evaluations)); 1377 pf_counter_u64_add_protected(&rule->packets[0], 1378 pf_counter_u64_fetch(&old_rule->packets[0])); 1379 pf_counter_u64_add_protected(&rule->packets[1], 1380 pf_counter_u64_fetch(&old_rule->packets[1])); 1381 pf_counter_u64_add_protected(&rule->bytes[0], 1382 pf_counter_u64_fetch(&old_rule->bytes[0])); 1383 pf_counter_u64_add_protected(&rule->bytes[1], 1384 pf_counter_u64_fetch(&old_rule->bytes[1])); 1385 pf_counter_u64_critical_exit(); 1386 } 1387 } 1388 1389 rs->rules[rs_num].inactive.ptr = old_rules; 1390 rs->rules[rs_num].inactive.ptr_array = old_array; 1391 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ 1392 rs->rules[rs_num].inactive.rcount = old_rcount; 1393 1394 rs->rules[rs_num].active.ticket = 1395 rs->rules[rs_num].inactive.ticket; 1396 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1397 1398 /* Purge the old rule list. */ 1399 PF_UNLNKDRULES_LOCK(); 1400 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1401 pf_unlink_rule_locked(old_rules, rule); 1402 PF_UNLNKDRULES_UNLOCK(); 1403 if (rs->rules[rs_num].inactive.ptr_array) 1404 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1405 rs->rules[rs_num].inactive.ptr_array = NULL; 1406 rs->rules[rs_num].inactive.rcount = 0; 1407 rs->rules[rs_num].inactive.open = 0; 1408 pf_remove_if_empty_kruleset(rs); 1409 free(old_tree, M_TEMP); 1410 1411 return (0); 1412 } 1413 1414 static int 1415 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1416 { 1417 MD5_CTX ctx; 1418 struct pf_krule *rule; 1419 int rs_cnt; 1420 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1421 1422 MD5Init(&ctx); 1423 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1424 /* XXX PF_RULESET_SCRUB as well? */ 1425 if (rs_cnt == PF_RULESET_SCRUB) 1426 continue; 1427 1428 if (rs->rules[rs_cnt].inactive.ptr_array) 1429 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1430 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1431 1432 if (rs->rules[rs_cnt].inactive.rcount) { 1433 rs->rules[rs_cnt].inactive.ptr_array = 1434 malloc(sizeof(caddr_t) * 1435 rs->rules[rs_cnt].inactive.rcount, 1436 M_TEMP, M_NOWAIT); 1437 1438 if (!rs->rules[rs_cnt].inactive.ptr_array) 1439 return (ENOMEM); 1440 } 1441 1442 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1443 entries) { 1444 pf_hash_rule_rolling(&ctx, rule); 1445 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1446 } 1447 } 1448 1449 MD5Final(digest, &ctx); 1450 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1451 return (0); 1452 } 1453 1454 static int 1455 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr) 1456 { 1457 int error = 0; 1458 1459 switch (addr->type) { 1460 case PF_ADDR_TABLE: 1461 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); 1462 if (addr->p.tbl == NULL) 1463 error = ENOMEM; 1464 break; 1465 default: 1466 error = EINVAL; 1467 } 1468 1469 return (error); 1470 } 1471 1472 static int 1473 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1474 sa_family_t af) 1475 { 1476 int error = 0; 1477 1478 switch (addr->type) { 1479 case PF_ADDR_TABLE: 1480 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1481 if (addr->p.tbl == NULL) 1482 error = ENOMEM; 1483 break; 1484 case PF_ADDR_DYNIFTL: 1485 error = pfi_dynaddr_setup(addr, af); 1486 break; 1487 } 1488 1489 return (error); 1490 } 1491 1492 static void 1493 pf_addr_copyout(struct pf_addr_wrap *addr) 1494 { 1495 1496 switch (addr->type) { 1497 case PF_ADDR_DYNIFTL: 1498 pfi_dynaddr_copyout(addr); 1499 break; 1500 case PF_ADDR_TABLE: 1501 pf_tbladdr_copyout(addr); 1502 break; 1503 } 1504 } 1505 1506 static void 1507 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1508 { 1509 int secs = time_uptime, diff; 1510 1511 bzero(out, sizeof(struct pf_src_node)); 1512 1513 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1514 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1515 1516 if (in->rule.ptr != NULL) 1517 out->rule.nr = in->rule.ptr->nr; 1518 1519 for (int i = 0; i < 2; i++) { 1520 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1521 out->packets[i] = counter_u64_fetch(in->packets[i]); 1522 } 1523 1524 out->states = in->states; 1525 out->conn = in->conn; 1526 out->af = in->af; 1527 out->ruletype = in->ruletype; 1528 1529 out->creation = secs - in->creation; 1530 if (out->expire > secs) 1531 out->expire -= secs; 1532 else 1533 out->expire = 0; 1534 1535 /* Adjust the connection rate estimate. */ 1536 diff = secs - in->conn_rate.last; 1537 if (diff >= in->conn_rate.seconds) 1538 out->conn_rate.count = 0; 1539 else 1540 out->conn_rate.count -= 1541 in->conn_rate.count * diff / 1542 in->conn_rate.seconds; 1543 } 1544 1545 #ifdef ALTQ 1546 /* 1547 * Handle export of struct pf_kaltq to user binaries that may be using any 1548 * version of struct pf_altq. 1549 */ 1550 static int 1551 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1552 { 1553 u_int32_t version; 1554 1555 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1556 version = 0; 1557 else 1558 version = pa->version; 1559 1560 if (version > PFIOC_ALTQ_VERSION) 1561 return (EINVAL); 1562 1563 #define ASSIGN(x) exported_q->x = q->x 1564 #define COPY(x) \ 1565 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1566 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1567 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1568 1569 switch (version) { 1570 case 0: { 1571 struct pf_altq_v0 *exported_q = 1572 &((struct pfioc_altq_v0 *)pa)->altq; 1573 1574 COPY(ifname); 1575 1576 ASSIGN(scheduler); 1577 ASSIGN(tbrsize); 1578 exported_q->tbrsize = SATU16(q->tbrsize); 1579 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1580 1581 COPY(qname); 1582 COPY(parent); 1583 ASSIGN(parent_qid); 1584 exported_q->bandwidth = SATU32(q->bandwidth); 1585 ASSIGN(priority); 1586 ASSIGN(local_flags); 1587 1588 ASSIGN(qlimit); 1589 ASSIGN(flags); 1590 1591 if (q->scheduler == ALTQT_HFSC) { 1592 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1593 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1594 SATU32(q->pq_u.hfsc_opts.x) 1595 1596 ASSIGN_OPT_SATU32(rtsc_m1); 1597 ASSIGN_OPT(rtsc_d); 1598 ASSIGN_OPT_SATU32(rtsc_m2); 1599 1600 ASSIGN_OPT_SATU32(lssc_m1); 1601 ASSIGN_OPT(lssc_d); 1602 ASSIGN_OPT_SATU32(lssc_m2); 1603 1604 ASSIGN_OPT_SATU32(ulsc_m1); 1605 ASSIGN_OPT(ulsc_d); 1606 ASSIGN_OPT_SATU32(ulsc_m2); 1607 1608 ASSIGN_OPT(flags); 1609 1610 #undef ASSIGN_OPT 1611 #undef ASSIGN_OPT_SATU32 1612 } else 1613 COPY(pq_u); 1614 1615 ASSIGN(qid); 1616 break; 1617 } 1618 case 1: { 1619 struct pf_altq_v1 *exported_q = 1620 &((struct pfioc_altq_v1 *)pa)->altq; 1621 1622 COPY(ifname); 1623 1624 ASSIGN(scheduler); 1625 ASSIGN(tbrsize); 1626 ASSIGN(ifbandwidth); 1627 1628 COPY(qname); 1629 COPY(parent); 1630 ASSIGN(parent_qid); 1631 ASSIGN(bandwidth); 1632 ASSIGN(priority); 1633 ASSIGN(local_flags); 1634 1635 ASSIGN(qlimit); 1636 ASSIGN(flags); 1637 COPY(pq_u); 1638 1639 ASSIGN(qid); 1640 break; 1641 } 1642 default: 1643 panic("%s: unhandled struct pfioc_altq version", __func__); 1644 break; 1645 } 1646 1647 #undef ASSIGN 1648 #undef COPY 1649 #undef SATU16 1650 #undef SATU32 1651 1652 return (0); 1653 } 1654 1655 /* 1656 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1657 * that may be using any version of it. 1658 */ 1659 static int 1660 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1661 { 1662 u_int32_t version; 1663 1664 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1665 version = 0; 1666 else 1667 version = pa->version; 1668 1669 if (version > PFIOC_ALTQ_VERSION) 1670 return (EINVAL); 1671 1672 #define ASSIGN(x) q->x = imported_q->x 1673 #define COPY(x) \ 1674 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1675 1676 switch (version) { 1677 case 0: { 1678 struct pf_altq_v0 *imported_q = 1679 &((struct pfioc_altq_v0 *)pa)->altq; 1680 1681 COPY(ifname); 1682 1683 ASSIGN(scheduler); 1684 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1685 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1686 1687 COPY(qname); 1688 COPY(parent); 1689 ASSIGN(parent_qid); 1690 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1691 ASSIGN(priority); 1692 ASSIGN(local_flags); 1693 1694 ASSIGN(qlimit); 1695 ASSIGN(flags); 1696 1697 if (imported_q->scheduler == ALTQT_HFSC) { 1698 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1699 1700 /* 1701 * The m1 and m2 parameters are being copied from 1702 * 32-bit to 64-bit. 1703 */ 1704 ASSIGN_OPT(rtsc_m1); 1705 ASSIGN_OPT(rtsc_d); 1706 ASSIGN_OPT(rtsc_m2); 1707 1708 ASSIGN_OPT(lssc_m1); 1709 ASSIGN_OPT(lssc_d); 1710 ASSIGN_OPT(lssc_m2); 1711 1712 ASSIGN_OPT(ulsc_m1); 1713 ASSIGN_OPT(ulsc_d); 1714 ASSIGN_OPT(ulsc_m2); 1715 1716 ASSIGN_OPT(flags); 1717 1718 #undef ASSIGN_OPT 1719 } else 1720 COPY(pq_u); 1721 1722 ASSIGN(qid); 1723 break; 1724 } 1725 case 1: { 1726 struct pf_altq_v1 *imported_q = 1727 &((struct pfioc_altq_v1 *)pa)->altq; 1728 1729 COPY(ifname); 1730 1731 ASSIGN(scheduler); 1732 ASSIGN(tbrsize); 1733 ASSIGN(ifbandwidth); 1734 1735 COPY(qname); 1736 COPY(parent); 1737 ASSIGN(parent_qid); 1738 ASSIGN(bandwidth); 1739 ASSIGN(priority); 1740 ASSIGN(local_flags); 1741 1742 ASSIGN(qlimit); 1743 ASSIGN(flags); 1744 COPY(pq_u); 1745 1746 ASSIGN(qid); 1747 break; 1748 } 1749 default: 1750 panic("%s: unhandled struct pfioc_altq version", __func__); 1751 break; 1752 } 1753 1754 #undef ASSIGN 1755 #undef COPY 1756 1757 return (0); 1758 } 1759 1760 static struct pf_altq * 1761 pf_altq_get_nth_active(u_int32_t n) 1762 { 1763 struct pf_altq *altq; 1764 u_int32_t nr; 1765 1766 nr = 0; 1767 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1768 if (nr == n) 1769 return (altq); 1770 nr++; 1771 } 1772 1773 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1774 if (nr == n) 1775 return (altq); 1776 nr++; 1777 } 1778 1779 return (NULL); 1780 } 1781 #endif /* ALTQ */ 1782 1783 struct pf_krule * 1784 pf_krule_alloc(void) 1785 { 1786 struct pf_krule *rule; 1787 1788 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1789 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1790 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 1791 M_WAITOK | M_ZERO); 1792 return (rule); 1793 } 1794 1795 void 1796 pf_krule_free(struct pf_krule *rule) 1797 { 1798 #ifdef PF_WANT_32_TO_64_COUNTER 1799 bool wowned; 1800 #endif 1801 1802 if (rule == NULL) 1803 return; 1804 1805 #ifdef PF_WANT_32_TO_64_COUNTER 1806 if (rule->allrulelinked) { 1807 wowned = PF_RULES_WOWNED(); 1808 if (!wowned) 1809 PF_RULES_WLOCK(); 1810 LIST_REMOVE(rule, allrulelist); 1811 V_pf_allrulecount--; 1812 if (!wowned) 1813 PF_RULES_WUNLOCK(); 1814 } 1815 #endif 1816 1817 pf_counter_u64_deinit(&rule->evaluations); 1818 for (int i = 0; i < 2; i++) { 1819 pf_counter_u64_deinit(&rule->packets[i]); 1820 pf_counter_u64_deinit(&rule->bytes[i]); 1821 } 1822 counter_u64_free(rule->states_cur); 1823 counter_u64_free(rule->states_tot); 1824 counter_u64_free(rule->src_nodes); 1825 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); 1826 1827 mtx_destroy(&rule->rpool.mtx); 1828 free(rule, M_PFRULE); 1829 } 1830 1831 static void 1832 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1833 struct pf_pooladdr *pool) 1834 { 1835 1836 bzero(pool, sizeof(*pool)); 1837 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1838 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1839 } 1840 1841 static int 1842 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1843 struct pf_kpooladdr *kpool) 1844 { 1845 int ret; 1846 1847 bzero(kpool, sizeof(*kpool)); 1848 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1849 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1850 sizeof(kpool->ifname)); 1851 return (ret); 1852 } 1853 1854 static void 1855 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1856 { 1857 bzero(pool, sizeof(*pool)); 1858 1859 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1860 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1861 1862 pool->tblidx = kpool->tblidx; 1863 pool->proxy_port[0] = kpool->proxy_port[0]; 1864 pool->proxy_port[1] = kpool->proxy_port[1]; 1865 pool->opts = kpool->opts; 1866 } 1867 1868 static void 1869 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1870 { 1871 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1872 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1873 1874 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1875 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1876 1877 kpool->tblidx = pool->tblidx; 1878 kpool->proxy_port[0] = pool->proxy_port[0]; 1879 kpool->proxy_port[1] = pool->proxy_port[1]; 1880 kpool->opts = pool->opts; 1881 } 1882 1883 static void 1884 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1885 { 1886 1887 bzero(rule, sizeof(*rule)); 1888 1889 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1890 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1891 1892 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1893 if (rule->skip[i].ptr == NULL) 1894 rule->skip[i].nr = -1; 1895 else 1896 rule->skip[i].nr = krule->skip[i].ptr->nr; 1897 } 1898 1899 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1900 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1901 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1902 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1903 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1904 strlcpy(rule->match_tagname, krule->match_tagname, 1905 sizeof(rule->match_tagname)); 1906 strlcpy(rule->overload_tblname, krule->overload_tblname, 1907 sizeof(rule->overload_tblname)); 1908 1909 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1910 1911 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1912 for (int i = 0; i < 2; i++) { 1913 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1914 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1915 } 1916 1917 /* kif, anchor, overload_tbl are not copied over. */ 1918 1919 rule->os_fingerprint = krule->os_fingerprint; 1920 1921 rule->rtableid = krule->rtableid; 1922 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1923 rule->max_states = krule->max_states; 1924 rule->max_src_nodes = krule->max_src_nodes; 1925 rule->max_src_states = krule->max_src_states; 1926 rule->max_src_conn = krule->max_src_conn; 1927 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1928 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1929 rule->qid = krule->qid; 1930 rule->pqid = krule->pqid; 1931 rule->nr = krule->nr; 1932 rule->prob = krule->prob; 1933 rule->cuid = krule->cuid; 1934 rule->cpid = krule->cpid; 1935 1936 rule->return_icmp = krule->return_icmp; 1937 rule->return_icmp6 = krule->return_icmp6; 1938 rule->max_mss = krule->max_mss; 1939 rule->tag = krule->tag; 1940 rule->match_tag = krule->match_tag; 1941 rule->scrub_flags = krule->scrub_flags; 1942 1943 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1944 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1945 1946 rule->rule_flag = krule->rule_flag; 1947 rule->action = krule->action; 1948 rule->direction = krule->direction; 1949 rule->log = krule->log; 1950 rule->logif = krule->logif; 1951 rule->quick = krule->quick; 1952 rule->ifnot = krule->ifnot; 1953 rule->match_tag_not = krule->match_tag_not; 1954 rule->natpass = krule->natpass; 1955 1956 rule->keep_state = krule->keep_state; 1957 rule->af = krule->af; 1958 rule->proto = krule->proto; 1959 rule->type = krule->type; 1960 rule->code = krule->code; 1961 rule->flags = krule->flags; 1962 rule->flagset = krule->flagset; 1963 rule->min_ttl = krule->min_ttl; 1964 rule->allow_opts = krule->allow_opts; 1965 rule->rt = krule->rt; 1966 rule->return_ttl = krule->return_ttl; 1967 rule->tos = krule->tos; 1968 rule->set_tos = krule->set_tos; 1969 rule->anchor_relative = krule->anchor_relative; 1970 rule->anchor_wildcard = krule->anchor_wildcard; 1971 1972 rule->flush = krule->flush; 1973 rule->prio = krule->prio; 1974 rule->set_prio[0] = krule->set_prio[0]; 1975 rule->set_prio[1] = krule->set_prio[1]; 1976 1977 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1978 1979 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1980 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1981 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1982 } 1983 1984 static int 1985 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1986 { 1987 int ret; 1988 1989 #ifndef INET 1990 if (rule->af == AF_INET) { 1991 return (EAFNOSUPPORT); 1992 } 1993 #endif /* INET */ 1994 #ifndef INET6 1995 if (rule->af == AF_INET6) { 1996 return (EAFNOSUPPORT); 1997 } 1998 #endif /* INET6 */ 1999 2000 ret = pf_check_rule_addr(&rule->src); 2001 if (ret != 0) 2002 return (ret); 2003 ret = pf_check_rule_addr(&rule->dst); 2004 if (ret != 0) 2005 return (ret); 2006 2007 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 2008 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 2009 2010 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 2011 if (ret != 0) 2012 return (ret); 2013 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 2014 if (ret != 0) 2015 return (ret); 2016 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 2017 if (ret != 0) 2018 return (ret); 2019 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 2020 if (ret != 0) 2021 return (ret); 2022 ret = pf_user_strcpy(krule->tagname, rule->tagname, 2023 sizeof(rule->tagname)); 2024 if (ret != 0) 2025 return (ret); 2026 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 2027 sizeof(rule->match_tagname)); 2028 if (ret != 0) 2029 return (ret); 2030 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 2031 sizeof(rule->overload_tblname)); 2032 if (ret != 0) 2033 return (ret); 2034 2035 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 2036 2037 /* Don't allow userspace to set evaulations, packets or bytes. */ 2038 /* kif, anchor, overload_tbl are not copied over. */ 2039 2040 krule->os_fingerprint = rule->os_fingerprint; 2041 2042 krule->rtableid = rule->rtableid; 2043 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 2044 krule->max_states = rule->max_states; 2045 krule->max_src_nodes = rule->max_src_nodes; 2046 krule->max_src_states = rule->max_src_states; 2047 krule->max_src_conn = rule->max_src_conn; 2048 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 2049 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 2050 krule->qid = rule->qid; 2051 krule->pqid = rule->pqid; 2052 krule->nr = rule->nr; 2053 krule->prob = rule->prob; 2054 krule->cuid = rule->cuid; 2055 krule->cpid = rule->cpid; 2056 2057 krule->return_icmp = rule->return_icmp; 2058 krule->return_icmp6 = rule->return_icmp6; 2059 krule->max_mss = rule->max_mss; 2060 krule->tag = rule->tag; 2061 krule->match_tag = rule->match_tag; 2062 krule->scrub_flags = rule->scrub_flags; 2063 2064 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 2065 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 2066 2067 krule->rule_flag = rule->rule_flag; 2068 krule->action = rule->action; 2069 krule->direction = rule->direction; 2070 krule->log = rule->log; 2071 krule->logif = rule->logif; 2072 krule->quick = rule->quick; 2073 krule->ifnot = rule->ifnot; 2074 krule->match_tag_not = rule->match_tag_not; 2075 krule->natpass = rule->natpass; 2076 2077 krule->keep_state = rule->keep_state; 2078 krule->af = rule->af; 2079 krule->proto = rule->proto; 2080 krule->type = rule->type; 2081 krule->code = rule->code; 2082 krule->flags = rule->flags; 2083 krule->flagset = rule->flagset; 2084 krule->min_ttl = rule->min_ttl; 2085 krule->allow_opts = rule->allow_opts; 2086 krule->rt = rule->rt; 2087 krule->return_ttl = rule->return_ttl; 2088 krule->tos = rule->tos; 2089 krule->set_tos = rule->set_tos; 2090 2091 krule->flush = rule->flush; 2092 krule->prio = rule->prio; 2093 krule->set_prio[0] = rule->set_prio[0]; 2094 krule->set_prio[1] = rule->set_prio[1]; 2095 2096 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2097 2098 return (0); 2099 } 2100 2101 static int 2102 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2103 struct pf_kstate_kill *kill) 2104 { 2105 int ret; 2106 2107 bzero(kill, sizeof(*kill)); 2108 2109 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2110 kill->psk_af = psk->psk_af; 2111 kill->psk_proto = psk->psk_proto; 2112 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2113 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2114 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2115 sizeof(kill->psk_ifname)); 2116 if (ret != 0) 2117 return (ret); 2118 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2119 sizeof(kill->psk_label)); 2120 if (ret != 0) 2121 return (ret); 2122 2123 return (0); 2124 } 2125 2126 static int 2127 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2128 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2129 struct thread *td) 2130 { 2131 struct pf_kruleset *ruleset; 2132 struct pf_krule *tail; 2133 struct pf_kpooladdr *pa; 2134 struct pfi_kkif *kif = NULL; 2135 int rs_num; 2136 int error = 0; 2137 2138 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2139 error = EINVAL; 2140 goto errout_unlocked; 2141 } 2142 2143 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2144 2145 if (rule->ifname[0]) 2146 kif = pf_kkif_create(M_WAITOK); 2147 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2148 for (int i = 0; i < 2; i++) { 2149 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2150 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2151 } 2152 rule->states_cur = counter_u64_alloc(M_WAITOK); 2153 rule->states_tot = counter_u64_alloc(M_WAITOK); 2154 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2155 rule->cuid = td->td_ucred->cr_ruid; 2156 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2157 TAILQ_INIT(&rule->rpool.list); 2158 2159 PF_CONFIG_LOCK(); 2160 PF_RULES_WLOCK(); 2161 #ifdef PF_WANT_32_TO_64_COUNTER 2162 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2163 MPASS(!rule->allrulelinked); 2164 rule->allrulelinked = true; 2165 V_pf_allrulecount++; 2166 #endif 2167 ruleset = pf_find_kruleset(anchor); 2168 if (ruleset == NULL) 2169 ERROUT(EINVAL); 2170 rs_num = pf_get_ruleset_number(rule->action); 2171 if (rs_num >= PF_RULESET_MAX) 2172 ERROUT(EINVAL); 2173 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2174 DPFPRINTF(PF_DEBUG_MISC, 2175 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2176 ruleset->rules[rs_num].inactive.ticket)); 2177 ERROUT(EBUSY); 2178 } 2179 if (pool_ticket != V_ticket_pabuf) { 2180 DPFPRINTF(PF_DEBUG_MISC, 2181 ("pool_ticket: %d != %d\n", pool_ticket, 2182 V_ticket_pabuf)); 2183 ERROUT(EBUSY); 2184 } 2185 /* 2186 * XXXMJG hack: there is no mechanism to ensure they started the 2187 * transaction. Ticket checked above may happen to match by accident, 2188 * even if nobody called DIOCXBEGIN, let alone this process. 2189 * Partially work around it by checking if the RB tree got allocated, 2190 * see pf_begin_rules. 2191 */ 2192 if (ruleset->rules[rs_num].inactive.tree == NULL) { 2193 ERROUT(EINVAL); 2194 } 2195 2196 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2197 pf_krulequeue); 2198 if (tail) 2199 rule->nr = tail->nr + 1; 2200 else 2201 rule->nr = 0; 2202 if (rule->ifname[0]) { 2203 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2204 kif = NULL; 2205 pfi_kkif_ref(rule->kif); 2206 } else 2207 rule->kif = NULL; 2208 2209 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2210 error = EBUSY; 2211 2212 #ifdef ALTQ 2213 /* set queue IDs */ 2214 if (rule->qname[0] != 0) { 2215 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2216 error = EBUSY; 2217 else if (rule->pqname[0] != 0) { 2218 if ((rule->pqid = 2219 pf_qname2qid(rule->pqname)) == 0) 2220 error = EBUSY; 2221 } else 2222 rule->pqid = rule->qid; 2223 } 2224 #endif 2225 if (rule->tagname[0]) 2226 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2227 error = EBUSY; 2228 if (rule->match_tagname[0]) 2229 if ((rule->match_tag = 2230 pf_tagname2tag(rule->match_tagname)) == 0) 2231 error = EBUSY; 2232 if (rule->rt && !rule->direction) 2233 error = EINVAL; 2234 if (!rule->log) 2235 rule->logif = 0; 2236 if (rule->logif >= PFLOGIFS_MAX) 2237 error = EINVAL; 2238 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2239 error = ENOMEM; 2240 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2241 error = ENOMEM; 2242 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2243 error = EINVAL; 2244 if (rule->scrub_flags & PFSTATE_SETPRIO && 2245 (rule->set_prio[0] > PF_PRIO_MAX || 2246 rule->set_prio[1] > PF_PRIO_MAX)) 2247 error = EINVAL; 2248 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2249 if (pa->addr.type == PF_ADDR_TABLE) { 2250 pa->addr.p.tbl = pfr_attach_table(ruleset, 2251 pa->addr.v.tblname); 2252 if (pa->addr.p.tbl == NULL) 2253 error = ENOMEM; 2254 } 2255 2256 rule->overload_tbl = NULL; 2257 if (rule->overload_tblname[0]) { 2258 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2259 rule->overload_tblname)) == NULL) 2260 error = EINVAL; 2261 else 2262 rule->overload_tbl->pfrkt_flags |= 2263 PFR_TFLAG_ACTIVE; 2264 } 2265 2266 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2267 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2268 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2269 (rule->rt > PF_NOPFROUTE)) && 2270 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2271 error = EINVAL; 2272 2273 if (error) { 2274 pf_free_rule(rule); 2275 rule = NULL; 2276 ERROUT(error); 2277 } 2278 2279 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2280 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2281 rule, entries); 2282 ruleset->rules[rs_num].inactive.rcount++; 2283 2284 PF_RULES_WUNLOCK(); 2285 pf_hash_rule(rule); 2286 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { 2287 PF_RULES_WLOCK(); 2288 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); 2289 ruleset->rules[rs_num].inactive.rcount--; 2290 pf_free_rule(rule); 2291 rule = NULL; 2292 ERROUT(EEXIST); 2293 } 2294 PF_CONFIG_UNLOCK(); 2295 2296 return (0); 2297 2298 #undef ERROUT 2299 errout: 2300 PF_RULES_WUNLOCK(); 2301 PF_CONFIG_UNLOCK(); 2302 errout_unlocked: 2303 pf_kkif_free(kif); 2304 pf_krule_free(rule); 2305 return (error); 2306 } 2307 2308 static bool 2309 pf_label_match(const struct pf_krule *rule, const char *label) 2310 { 2311 int i = 0; 2312 2313 while (*rule->label[i]) { 2314 if (strcmp(rule->label[i], label) == 0) 2315 return (true); 2316 i++; 2317 } 2318 2319 return (false); 2320 } 2321 2322 static unsigned int 2323 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2324 { 2325 struct pf_kstate *s; 2326 int more = 0; 2327 2328 s = pf_find_state_all(key, dir, &more); 2329 if (s == NULL) 2330 return (0); 2331 2332 if (more) { 2333 PF_STATE_UNLOCK(s); 2334 return (0); 2335 } 2336 2337 pf_unlink_state(s); 2338 return (1); 2339 } 2340 2341 static int 2342 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2343 { 2344 struct pf_kstate *s; 2345 struct pf_state_key *sk; 2346 struct pf_addr *srcaddr, *dstaddr; 2347 struct pf_state_key_cmp match_key; 2348 int idx, killed = 0; 2349 unsigned int dir; 2350 u_int16_t srcport, dstport; 2351 struct pfi_kkif *kif; 2352 2353 relock_DIOCKILLSTATES: 2354 PF_HASHROW_LOCK(ih); 2355 LIST_FOREACH(s, &ih->states, entry) { 2356 /* For floating states look at the original kif. */ 2357 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2358 2359 sk = s->key[PF_SK_WIRE]; 2360 if (s->direction == PF_OUT) { 2361 srcaddr = &sk->addr[1]; 2362 dstaddr = &sk->addr[0]; 2363 srcport = sk->port[1]; 2364 dstport = sk->port[0]; 2365 } else { 2366 srcaddr = &sk->addr[0]; 2367 dstaddr = &sk->addr[1]; 2368 srcport = sk->port[0]; 2369 dstport = sk->port[1]; 2370 } 2371 2372 if (psk->psk_af && sk->af != psk->psk_af) 2373 continue; 2374 2375 if (psk->psk_proto && psk->psk_proto != sk->proto) 2376 continue; 2377 2378 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2379 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2380 continue; 2381 2382 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2383 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2384 continue; 2385 2386 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2387 &psk->psk_rt_addr.addr.v.a.addr, 2388 &psk->psk_rt_addr.addr.v.a.mask, 2389 &s->rt_addr, sk->af)) 2390 continue; 2391 2392 if (psk->psk_src.port_op != 0 && 2393 ! pf_match_port(psk->psk_src.port_op, 2394 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2395 continue; 2396 2397 if (psk->psk_dst.port_op != 0 && 2398 ! pf_match_port(psk->psk_dst.port_op, 2399 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2400 continue; 2401 2402 if (psk->psk_label[0] && 2403 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2404 continue; 2405 2406 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2407 kif->pfik_name)) 2408 continue; 2409 2410 if (psk->psk_kill_match) { 2411 /* Create the key to find matching states, with lock 2412 * held. */ 2413 2414 bzero(&match_key, sizeof(match_key)); 2415 2416 if (s->direction == PF_OUT) { 2417 dir = PF_IN; 2418 idx = PF_SK_STACK; 2419 } else { 2420 dir = PF_OUT; 2421 idx = PF_SK_WIRE; 2422 } 2423 2424 match_key.af = s->key[idx]->af; 2425 match_key.proto = s->key[idx]->proto; 2426 PF_ACPY(&match_key.addr[0], 2427 &s->key[idx]->addr[1], match_key.af); 2428 match_key.port[0] = s->key[idx]->port[1]; 2429 PF_ACPY(&match_key.addr[1], 2430 &s->key[idx]->addr[0], match_key.af); 2431 match_key.port[1] = s->key[idx]->port[0]; 2432 } 2433 2434 pf_unlink_state(s); 2435 killed++; 2436 2437 if (psk->psk_kill_match) 2438 killed += pf_kill_matching_state(&match_key, dir); 2439 2440 goto relock_DIOCKILLSTATES; 2441 } 2442 PF_HASHROW_UNLOCK(ih); 2443 2444 return (killed); 2445 } 2446 2447 static int 2448 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2449 { 2450 int error = 0; 2451 PF_RULES_RLOCK_TRACKER; 2452 2453 #define ERROUT_IOCTL(target, x) \ 2454 do { \ 2455 error = (x); \ 2456 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2457 goto target; \ 2458 } while (0) 2459 2460 2461 /* XXX keep in sync with switch() below */ 2462 if (securelevel_gt(td->td_ucred, 2)) 2463 switch (cmd) { 2464 case DIOCGETRULES: 2465 case DIOCGETRULE: 2466 case DIOCGETRULENV: 2467 case DIOCGETADDRS: 2468 case DIOCGETADDR: 2469 case DIOCGETSTATE: 2470 case DIOCGETSTATENV: 2471 case DIOCSETSTATUSIF: 2472 case DIOCGETSTATUS: 2473 case DIOCGETSTATUSNV: 2474 case DIOCCLRSTATUS: 2475 case DIOCNATLOOK: 2476 case DIOCSETDEBUG: 2477 case DIOCGETSTATES: 2478 case DIOCGETSTATESV2: 2479 case DIOCGETTIMEOUT: 2480 case DIOCCLRRULECTRS: 2481 case DIOCGETLIMIT: 2482 case DIOCGETALTQSV0: 2483 case DIOCGETALTQSV1: 2484 case DIOCGETALTQV0: 2485 case DIOCGETALTQV1: 2486 case DIOCGETQSTATSV0: 2487 case DIOCGETQSTATSV1: 2488 case DIOCGETRULESETS: 2489 case DIOCGETRULESET: 2490 case DIOCRGETTABLES: 2491 case DIOCRGETTSTATS: 2492 case DIOCRCLRTSTATS: 2493 case DIOCRCLRADDRS: 2494 case DIOCRADDADDRS: 2495 case DIOCRDELADDRS: 2496 case DIOCRSETADDRS: 2497 case DIOCRGETADDRS: 2498 case DIOCRGETASTATS: 2499 case DIOCRCLRASTATS: 2500 case DIOCRTSTADDRS: 2501 case DIOCOSFPGET: 2502 case DIOCGETSRCNODES: 2503 case DIOCCLRSRCNODES: 2504 case DIOCGETSYNCOOKIES: 2505 case DIOCIGETIFACES: 2506 case DIOCGIFSPEEDV0: 2507 case DIOCGIFSPEEDV1: 2508 case DIOCSETIFFLAG: 2509 case DIOCCLRIFFLAG: 2510 case DIOCGETETHRULES: 2511 case DIOCGETETHRULE: 2512 case DIOCGETETHRULESETS: 2513 case DIOCGETETHRULESET: 2514 break; 2515 case DIOCRCLRTABLES: 2516 case DIOCRADDTABLES: 2517 case DIOCRDELTABLES: 2518 case DIOCRSETTFLAGS: 2519 if (((struct pfioc_table *)addr)->pfrio_flags & 2520 PFR_FLAG_DUMMY) 2521 break; /* dummy operation ok */ 2522 return (EPERM); 2523 default: 2524 return (EPERM); 2525 } 2526 2527 if (!(flags & FWRITE)) 2528 switch (cmd) { 2529 case DIOCGETRULES: 2530 case DIOCGETADDRS: 2531 case DIOCGETADDR: 2532 case DIOCGETSTATE: 2533 case DIOCGETSTATENV: 2534 case DIOCGETSTATUS: 2535 case DIOCGETSTATUSNV: 2536 case DIOCGETSTATES: 2537 case DIOCGETSTATESV2: 2538 case DIOCGETTIMEOUT: 2539 case DIOCGETLIMIT: 2540 case DIOCGETALTQSV0: 2541 case DIOCGETALTQSV1: 2542 case DIOCGETALTQV0: 2543 case DIOCGETALTQV1: 2544 case DIOCGETQSTATSV0: 2545 case DIOCGETQSTATSV1: 2546 case DIOCGETRULESETS: 2547 case DIOCGETRULESET: 2548 case DIOCNATLOOK: 2549 case DIOCRGETTABLES: 2550 case DIOCRGETTSTATS: 2551 case DIOCRGETADDRS: 2552 case DIOCRGETASTATS: 2553 case DIOCRTSTADDRS: 2554 case DIOCOSFPGET: 2555 case DIOCGETSRCNODES: 2556 case DIOCGETSYNCOOKIES: 2557 case DIOCIGETIFACES: 2558 case DIOCGIFSPEEDV1: 2559 case DIOCGIFSPEEDV0: 2560 case DIOCGETRULENV: 2561 case DIOCGETETHRULES: 2562 case DIOCGETETHRULE: 2563 case DIOCGETETHRULESETS: 2564 case DIOCGETETHRULESET: 2565 break; 2566 case DIOCRCLRTABLES: 2567 case DIOCRADDTABLES: 2568 case DIOCRDELTABLES: 2569 case DIOCRCLRTSTATS: 2570 case DIOCRCLRADDRS: 2571 case DIOCRADDADDRS: 2572 case DIOCRDELADDRS: 2573 case DIOCRSETADDRS: 2574 case DIOCRSETTFLAGS: 2575 if (((struct pfioc_table *)addr)->pfrio_flags & 2576 PFR_FLAG_DUMMY) { 2577 flags |= FWRITE; /* need write lock for dummy */ 2578 break; /* dummy operation ok */ 2579 } 2580 return (EACCES); 2581 case DIOCGETRULE: 2582 if (((struct pfioc_rule *)addr)->action == 2583 PF_GET_CLR_CNTR) 2584 return (EACCES); 2585 break; 2586 default: 2587 return (EACCES); 2588 } 2589 2590 CURVNET_SET(TD_TO_VNET(td)); 2591 2592 switch (cmd) { 2593 case DIOCSTART: 2594 sx_xlock(&pf_ioctl_lock); 2595 if (V_pf_status.running) 2596 error = EEXIST; 2597 else { 2598 hook_pf(); 2599 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2600 hook_pf_eth(); 2601 V_pf_status.running = 1; 2602 V_pf_status.since = time_second; 2603 new_unrhdr64(&V_pf_stateid, time_second); 2604 2605 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2606 } 2607 break; 2608 2609 case DIOCSTOP: 2610 sx_xlock(&pf_ioctl_lock); 2611 if (!V_pf_status.running) 2612 error = ENOENT; 2613 else { 2614 V_pf_status.running = 0; 2615 dehook_pf(); 2616 dehook_pf_eth(); 2617 V_pf_status.since = time_second; 2618 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2619 } 2620 break; 2621 2622 case DIOCGETETHRULES: { 2623 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2624 nvlist_t *nvl; 2625 void *packed; 2626 struct pf_keth_rule *tail; 2627 struct pf_keth_ruleset *rs; 2628 u_int32_t ticket, nr; 2629 const char *anchor = ""; 2630 2631 nvl = NULL; 2632 packed = NULL; 2633 2634 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x) 2635 2636 if (nv->len > pf_ioctl_maxcount) 2637 ERROUT(ENOMEM); 2638 2639 /* Copy the request in */ 2640 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2641 if (packed == NULL) 2642 ERROUT(ENOMEM); 2643 2644 error = copyin(nv->data, packed, nv->len); 2645 if (error) 2646 ERROUT(error); 2647 2648 nvl = nvlist_unpack(packed, nv->len, 0); 2649 if (nvl == NULL) 2650 ERROUT(EBADMSG); 2651 2652 if (! nvlist_exists_string(nvl, "anchor")) 2653 ERROUT(EBADMSG); 2654 2655 anchor = nvlist_get_string(nvl, "anchor"); 2656 2657 rs = pf_find_keth_ruleset(anchor); 2658 2659 nvlist_destroy(nvl); 2660 nvl = NULL; 2661 free(packed, M_NVLIST); 2662 packed = NULL; 2663 2664 if (rs == NULL) 2665 ERROUT(ENOENT); 2666 2667 /* Reply */ 2668 nvl = nvlist_create(0); 2669 if (nvl == NULL) 2670 ERROUT(ENOMEM); 2671 2672 PF_RULES_RLOCK(); 2673 2674 ticket = rs->active.ticket; 2675 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2676 if (tail) 2677 nr = tail->nr + 1; 2678 else 2679 nr = 0; 2680 2681 PF_RULES_RUNLOCK(); 2682 2683 nvlist_add_number(nvl, "ticket", ticket); 2684 nvlist_add_number(nvl, "nr", nr); 2685 2686 packed = nvlist_pack(nvl, &nv->len); 2687 if (packed == NULL) 2688 ERROUT(ENOMEM); 2689 2690 if (nv->size == 0) 2691 ERROUT(0); 2692 else if (nv->size < nv->len) 2693 ERROUT(ENOSPC); 2694 2695 error = copyout(packed, nv->data, nv->len); 2696 2697 #undef ERROUT 2698 DIOCGETETHRULES_error: 2699 free(packed, M_NVLIST); 2700 nvlist_destroy(nvl); 2701 break; 2702 } 2703 2704 case DIOCGETETHRULE: { 2705 struct epoch_tracker et; 2706 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2707 nvlist_t *nvl = NULL; 2708 void *nvlpacked = NULL; 2709 struct pf_keth_rule *rule = NULL; 2710 struct pf_keth_ruleset *rs; 2711 u_int32_t ticket, nr; 2712 bool clear = false; 2713 const char *anchor; 2714 2715 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x) 2716 2717 if (nv->len > pf_ioctl_maxcount) 2718 ERROUT(ENOMEM); 2719 2720 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2721 if (nvlpacked == NULL) 2722 ERROUT(ENOMEM); 2723 2724 error = copyin(nv->data, nvlpacked, nv->len); 2725 if (error) 2726 ERROUT(error); 2727 2728 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2729 if (nvl == NULL) 2730 ERROUT(EBADMSG); 2731 if (! nvlist_exists_number(nvl, "ticket")) 2732 ERROUT(EBADMSG); 2733 ticket = nvlist_get_number(nvl, "ticket"); 2734 if (! nvlist_exists_string(nvl, "anchor")) 2735 ERROUT(EBADMSG); 2736 anchor = nvlist_get_string(nvl, "anchor"); 2737 2738 if (nvlist_exists_bool(nvl, "clear")) 2739 clear = nvlist_get_bool(nvl, "clear"); 2740 2741 if (clear && !(flags & FWRITE)) 2742 ERROUT(EACCES); 2743 2744 if (! nvlist_exists_number(nvl, "nr")) 2745 ERROUT(EBADMSG); 2746 nr = nvlist_get_number(nvl, "nr"); 2747 2748 PF_RULES_RLOCK(); 2749 rs = pf_find_keth_ruleset(anchor); 2750 if (rs == NULL) { 2751 PF_RULES_RUNLOCK(); 2752 ERROUT(ENOENT); 2753 } 2754 if (ticket != rs->active.ticket) { 2755 PF_RULES_RUNLOCK(); 2756 ERROUT(EBUSY); 2757 } 2758 2759 nvlist_destroy(nvl); 2760 nvl = NULL; 2761 free(nvlpacked, M_NVLIST); 2762 nvlpacked = NULL; 2763 2764 rule = TAILQ_FIRST(rs->active.rules); 2765 while ((rule != NULL) && (rule->nr != nr)) 2766 rule = TAILQ_NEXT(rule, entries); 2767 if (rule == NULL) { 2768 PF_RULES_RUNLOCK(); 2769 ERROUT(ENOENT); 2770 } 2771 /* Make sure rule can't go away. */ 2772 NET_EPOCH_ENTER(et); 2773 PF_RULES_RUNLOCK(); 2774 nvl = pf_keth_rule_to_nveth_rule(rule); 2775 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2776 ERROUT(EBUSY); 2777 NET_EPOCH_EXIT(et); 2778 if (nvl == NULL) 2779 ERROUT(ENOMEM); 2780 2781 nvlpacked = nvlist_pack(nvl, &nv->len); 2782 if (nvlpacked == NULL) 2783 ERROUT(ENOMEM); 2784 2785 if (nv->size == 0) 2786 ERROUT(0); 2787 else if (nv->size < nv->len) 2788 ERROUT(ENOSPC); 2789 2790 error = copyout(nvlpacked, nv->data, nv->len); 2791 if (error == 0 && clear) { 2792 counter_u64_zero(rule->evaluations); 2793 for (int i = 0; i < 2; i++) { 2794 counter_u64_zero(rule->packets[i]); 2795 counter_u64_zero(rule->bytes[i]); 2796 } 2797 } 2798 2799 #undef ERROUT 2800 DIOCGETETHRULE_error: 2801 free(nvlpacked, M_NVLIST); 2802 nvlist_destroy(nvl); 2803 break; 2804 } 2805 2806 case DIOCADDETHRULE: { 2807 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2808 nvlist_t *nvl = NULL; 2809 void *nvlpacked = NULL; 2810 struct pf_keth_rule *rule = NULL, *tail = NULL; 2811 struct pf_keth_ruleset *ruleset = NULL; 2812 struct pfi_kkif *kif = NULL; 2813 const char *anchor = "", *anchor_call = ""; 2814 2815 #define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x) 2816 2817 if (nv->len > pf_ioctl_maxcount) 2818 ERROUT(ENOMEM); 2819 2820 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2821 if (nvlpacked == NULL) 2822 ERROUT(ENOMEM); 2823 2824 error = copyin(nv->data, nvlpacked, nv->len); 2825 if (error) 2826 ERROUT(error); 2827 2828 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2829 if (nvl == NULL) 2830 ERROUT(EBADMSG); 2831 2832 if (! nvlist_exists_number(nvl, "ticket")) 2833 ERROUT(EBADMSG); 2834 2835 if (nvlist_exists_string(nvl, "anchor")) 2836 anchor = nvlist_get_string(nvl, "anchor"); 2837 if (nvlist_exists_string(nvl, "anchor_call")) 2838 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2839 2840 ruleset = pf_find_keth_ruleset(anchor); 2841 if (ruleset == NULL) 2842 ERROUT(EINVAL); 2843 2844 if (nvlist_get_number(nvl, "ticket") != 2845 ruleset->inactive.ticket) { 2846 DPFPRINTF(PF_DEBUG_MISC, 2847 ("ticket: %d != %d\n", 2848 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2849 ruleset->inactive.ticket)); 2850 ERROUT(EBUSY); 2851 } 2852 2853 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2854 if (rule == NULL) 2855 ERROUT(ENOMEM); 2856 rule->timestamp = NULL; 2857 2858 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2859 if (error != 0) 2860 ERROUT(error); 2861 2862 if (rule->ifname[0]) 2863 kif = pf_kkif_create(M_WAITOK); 2864 rule->evaluations = counter_u64_alloc(M_WAITOK); 2865 for (int i = 0; i < 2; i++) { 2866 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2867 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2868 } 2869 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, 2870 M_WAITOK | M_ZERO); 2871 2872 PF_RULES_WLOCK(); 2873 2874 if (rule->ifname[0]) { 2875 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2876 pfi_kkif_ref(rule->kif); 2877 } else 2878 rule->kif = NULL; 2879 2880 #ifdef ALTQ 2881 /* set queue IDs */ 2882 if (rule->qname[0] != 0) { 2883 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2884 error = EBUSY; 2885 else 2886 rule->qid = rule->qid; 2887 } 2888 #endif 2889 if (rule->tagname[0]) 2890 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2891 error = EBUSY; 2892 if (rule->match_tagname[0]) 2893 if ((rule->match_tag = pf_tagname2tag( 2894 rule->match_tagname)) == 0) 2895 error = EBUSY; 2896 2897 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) 2898 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); 2899 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) 2900 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); 2901 2902 if (error) { 2903 pf_free_eth_rule(rule); 2904 PF_RULES_WUNLOCK(); 2905 ERROUT(error); 2906 } 2907 2908 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2909 pf_free_eth_rule(rule); 2910 PF_RULES_WUNLOCK(); 2911 ERROUT(EINVAL); 2912 } 2913 2914 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2915 if (tail) 2916 rule->nr = tail->nr + 1; 2917 else 2918 rule->nr = 0; 2919 2920 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2921 2922 PF_RULES_WUNLOCK(); 2923 2924 #undef ERROUT 2925 DIOCADDETHRULE_error: 2926 nvlist_destroy(nvl); 2927 free(nvlpacked, M_NVLIST); 2928 break; 2929 } 2930 2931 case DIOCGETETHRULESETS: { 2932 struct epoch_tracker et; 2933 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2934 nvlist_t *nvl = NULL; 2935 void *nvlpacked = NULL; 2936 struct pf_keth_ruleset *ruleset; 2937 struct pf_keth_anchor *anchor; 2938 int nr = 0; 2939 2940 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x) 2941 2942 if (nv->len > pf_ioctl_maxcount) 2943 ERROUT(ENOMEM); 2944 2945 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 2946 if (nvlpacked == NULL) 2947 ERROUT(ENOMEM); 2948 2949 error = copyin(nv->data, nvlpacked, nv->len); 2950 if (error) 2951 ERROUT(error); 2952 2953 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2954 if (nvl == NULL) 2955 ERROUT(EBADMSG); 2956 if (! nvlist_exists_string(nvl, "path")) 2957 ERROUT(EBADMSG); 2958 2959 NET_EPOCH_ENTER(et); 2960 2961 if ((ruleset = pf_find_keth_ruleset( 2962 nvlist_get_string(nvl, "path"))) == NULL) { 2963 NET_EPOCH_EXIT(et); 2964 ERROUT(ENOENT); 2965 } 2966 2967 if (ruleset->anchor == NULL) { 2968 RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors) 2969 if (anchor->parent == NULL) 2970 nr++; 2971 } else { 2972 RB_FOREACH(anchor, pf_keth_anchor_node, 2973 &ruleset->anchor->children) 2974 nr++; 2975 } 2976 2977 NET_EPOCH_EXIT(et); 2978 2979 nvlist_destroy(nvl); 2980 nvl = NULL; 2981 free(nvlpacked, M_NVLIST); 2982 nvlpacked = NULL; 2983 2984 nvl = nvlist_create(0); 2985 if (nvl == NULL) 2986 ERROUT(ENOMEM); 2987 2988 nvlist_add_number(nvl, "nr", nr); 2989 2990 nvlpacked = nvlist_pack(nvl, &nv->len); 2991 if (nvlpacked == NULL) 2992 ERROUT(ENOMEM); 2993 2994 if (nv->size == 0) 2995 ERROUT(0); 2996 else if (nv->size < nv->len) 2997 ERROUT(ENOSPC); 2998 2999 error = copyout(nvlpacked, nv->data, nv->len); 3000 3001 #undef ERROUT 3002 DIOCGETETHRULESETS_error: 3003 free(nvlpacked, M_NVLIST); 3004 nvlist_destroy(nvl); 3005 break; 3006 } 3007 3008 case DIOCGETETHRULESET: { 3009 struct epoch_tracker et; 3010 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3011 nvlist_t *nvl = NULL; 3012 void *nvlpacked = NULL; 3013 struct pf_keth_ruleset *ruleset; 3014 struct pf_keth_anchor *anchor; 3015 int nr = 0, req_nr = 0; 3016 bool found = false; 3017 3018 #define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x) 3019 3020 if (nv->len > pf_ioctl_maxcount) 3021 ERROUT(ENOMEM); 3022 3023 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3024 if (nvlpacked == NULL) 3025 ERROUT(ENOMEM); 3026 3027 error = copyin(nv->data, nvlpacked, nv->len); 3028 if (error) 3029 ERROUT(error); 3030 3031 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3032 if (nvl == NULL) 3033 ERROUT(EBADMSG); 3034 if (! nvlist_exists_string(nvl, "path")) 3035 ERROUT(EBADMSG); 3036 if (! nvlist_exists_number(nvl, "nr")) 3037 ERROUT(EBADMSG); 3038 3039 req_nr = nvlist_get_number(nvl, "nr"); 3040 3041 NET_EPOCH_ENTER(et); 3042 3043 if ((ruleset = pf_find_keth_ruleset( 3044 nvlist_get_string(nvl, "path"))) == NULL) { 3045 NET_EPOCH_EXIT(et); 3046 ERROUT(ENOENT); 3047 } 3048 3049 nvlist_destroy(nvl); 3050 nvl = NULL; 3051 free(nvlpacked, M_NVLIST); 3052 nvlpacked = NULL; 3053 3054 nvl = nvlist_create(0); 3055 if (nvl == NULL) { 3056 NET_EPOCH_EXIT(et); 3057 ERROUT(ENOMEM); 3058 } 3059 3060 if (ruleset->anchor == NULL) { 3061 RB_FOREACH(anchor, pf_keth_anchor_global, 3062 &V_pf_keth_anchors) { 3063 if (anchor->parent == NULL && nr++ == req_nr) { 3064 found = true; 3065 break; 3066 } 3067 } 3068 } else { 3069 RB_FOREACH(anchor, pf_keth_anchor_node, 3070 &ruleset->anchor->children) { 3071 if (nr++ == req_nr) { 3072 found = true; 3073 break; 3074 } 3075 } 3076 } 3077 3078 NET_EPOCH_EXIT(et); 3079 if (found) { 3080 nvlist_add_number(nvl, "nr", nr); 3081 nvlist_add_string(nvl, "name", anchor->name); 3082 if (ruleset->anchor) 3083 nvlist_add_string(nvl, "path", 3084 ruleset->anchor->path); 3085 else 3086 nvlist_add_string(nvl, "path", ""); 3087 } else { 3088 ERROUT(EBUSY); 3089 } 3090 3091 nvlpacked = nvlist_pack(nvl, &nv->len); 3092 if (nvlpacked == NULL) 3093 ERROUT(ENOMEM); 3094 3095 if (nv->size == 0) 3096 ERROUT(0); 3097 else if (nv->size < nv->len) 3098 ERROUT(ENOSPC); 3099 3100 error = copyout(nvlpacked, nv->data, nv->len); 3101 3102 #undef ERROUT 3103 DIOCGETETHRULESET_error: 3104 free(nvlpacked, M_NVLIST); 3105 nvlist_destroy(nvl); 3106 break; 3107 } 3108 3109 case DIOCADDRULENV: { 3110 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3111 nvlist_t *nvl = NULL; 3112 void *nvlpacked = NULL; 3113 struct pf_krule *rule = NULL; 3114 const char *anchor = "", *anchor_call = ""; 3115 uint32_t ticket = 0, pool_ticket = 0; 3116 3117 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 3118 3119 if (nv->len > pf_ioctl_maxcount) 3120 ERROUT(ENOMEM); 3121 3122 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3123 error = copyin(nv->data, nvlpacked, nv->len); 3124 if (error) 3125 ERROUT(error); 3126 3127 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3128 if (nvl == NULL) 3129 ERROUT(EBADMSG); 3130 3131 if (! nvlist_exists_number(nvl, "ticket")) 3132 ERROUT(EINVAL); 3133 ticket = nvlist_get_number(nvl, "ticket"); 3134 3135 if (! nvlist_exists_number(nvl, "pool_ticket")) 3136 ERROUT(EINVAL); 3137 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 3138 3139 if (! nvlist_exists_nvlist(nvl, "rule")) 3140 ERROUT(EINVAL); 3141 3142 rule = pf_krule_alloc(); 3143 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 3144 rule); 3145 if (error) 3146 ERROUT(error); 3147 3148 if (nvlist_exists_string(nvl, "anchor")) 3149 anchor = nvlist_get_string(nvl, "anchor"); 3150 if (nvlist_exists_string(nvl, "anchor_call")) 3151 anchor_call = nvlist_get_string(nvl, "anchor_call"); 3152 3153 if ((error = nvlist_error(nvl))) 3154 ERROUT(error); 3155 3156 /* Frees rule on error */ 3157 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 3158 anchor_call, td); 3159 3160 nvlist_destroy(nvl); 3161 free(nvlpacked, M_NVLIST); 3162 break; 3163 #undef ERROUT 3164 DIOCADDRULENV_error: 3165 pf_krule_free(rule); 3166 nvlist_destroy(nvl); 3167 free(nvlpacked, M_NVLIST); 3168 3169 break; 3170 } 3171 case DIOCADDRULE: { 3172 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3173 struct pf_krule *rule; 3174 3175 rule = pf_krule_alloc(); 3176 error = pf_rule_to_krule(&pr->rule, rule); 3177 if (error != 0) { 3178 pf_krule_free(rule); 3179 break; 3180 } 3181 3182 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3183 3184 /* Frees rule on error */ 3185 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 3186 pr->anchor, pr->anchor_call, td); 3187 break; 3188 } 3189 3190 case DIOCGETRULES: { 3191 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3192 struct pf_kruleset *ruleset; 3193 struct pf_krule *tail; 3194 int rs_num; 3195 3196 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3197 3198 PF_RULES_WLOCK(); 3199 ruleset = pf_find_kruleset(pr->anchor); 3200 if (ruleset == NULL) { 3201 PF_RULES_WUNLOCK(); 3202 error = EINVAL; 3203 break; 3204 } 3205 rs_num = pf_get_ruleset_number(pr->rule.action); 3206 if (rs_num >= PF_RULESET_MAX) { 3207 PF_RULES_WUNLOCK(); 3208 error = EINVAL; 3209 break; 3210 } 3211 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 3212 pf_krulequeue); 3213 if (tail) 3214 pr->nr = tail->nr + 1; 3215 else 3216 pr->nr = 0; 3217 pr->ticket = ruleset->rules[rs_num].active.ticket; 3218 PF_RULES_WUNLOCK(); 3219 break; 3220 } 3221 3222 case DIOCGETRULE: { 3223 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 3224 struct pf_kruleset *ruleset; 3225 struct pf_krule *rule; 3226 int rs_num; 3227 3228 pr->anchor[sizeof(pr->anchor) - 1] = 0; 3229 3230 PF_RULES_WLOCK(); 3231 ruleset = pf_find_kruleset(pr->anchor); 3232 if (ruleset == NULL) { 3233 PF_RULES_WUNLOCK(); 3234 error = EINVAL; 3235 break; 3236 } 3237 rs_num = pf_get_ruleset_number(pr->rule.action); 3238 if (rs_num >= PF_RULESET_MAX) { 3239 PF_RULES_WUNLOCK(); 3240 error = EINVAL; 3241 break; 3242 } 3243 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 3244 PF_RULES_WUNLOCK(); 3245 error = EBUSY; 3246 break; 3247 } 3248 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3249 while ((rule != NULL) && (rule->nr != pr->nr)) 3250 rule = TAILQ_NEXT(rule, entries); 3251 if (rule == NULL) { 3252 PF_RULES_WUNLOCK(); 3253 error = EBUSY; 3254 break; 3255 } 3256 3257 pf_krule_to_rule(rule, &pr->rule); 3258 3259 if (pf_kanchor_copyout(ruleset, rule, pr)) { 3260 PF_RULES_WUNLOCK(); 3261 error = EBUSY; 3262 break; 3263 } 3264 pf_addr_copyout(&pr->rule.src.addr); 3265 pf_addr_copyout(&pr->rule.dst.addr); 3266 3267 if (pr->action == PF_GET_CLR_CNTR) { 3268 pf_counter_u64_zero(&rule->evaluations); 3269 for (int i = 0; i < 2; i++) { 3270 pf_counter_u64_zero(&rule->packets[i]); 3271 pf_counter_u64_zero(&rule->bytes[i]); 3272 } 3273 counter_u64_zero(rule->states_tot); 3274 } 3275 PF_RULES_WUNLOCK(); 3276 break; 3277 } 3278 3279 case DIOCGETRULENV: { 3280 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 3281 nvlist_t *nvrule = NULL; 3282 nvlist_t *nvl = NULL; 3283 struct pf_kruleset *ruleset; 3284 struct pf_krule *rule; 3285 void *nvlpacked = NULL; 3286 int rs_num, nr; 3287 bool clear_counter = false; 3288 3289 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 3290 3291 if (nv->len > pf_ioctl_maxcount) 3292 ERROUT(ENOMEM); 3293 3294 /* Copy the request in */ 3295 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3296 if (nvlpacked == NULL) 3297 ERROUT(ENOMEM); 3298 3299 error = copyin(nv->data, nvlpacked, nv->len); 3300 if (error) 3301 ERROUT(error); 3302 3303 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3304 if (nvl == NULL) 3305 ERROUT(EBADMSG); 3306 3307 if (! nvlist_exists_string(nvl, "anchor")) 3308 ERROUT(EBADMSG); 3309 if (! nvlist_exists_number(nvl, "ruleset")) 3310 ERROUT(EBADMSG); 3311 if (! nvlist_exists_number(nvl, "ticket")) 3312 ERROUT(EBADMSG); 3313 if (! nvlist_exists_number(nvl, "nr")) 3314 ERROUT(EBADMSG); 3315 3316 if (nvlist_exists_bool(nvl, "clear_counter")) 3317 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3318 3319 if (clear_counter && !(flags & FWRITE)) 3320 ERROUT(EACCES); 3321 3322 nr = nvlist_get_number(nvl, "nr"); 3323 3324 PF_RULES_WLOCK(); 3325 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3326 if (ruleset == NULL) { 3327 PF_RULES_WUNLOCK(); 3328 ERROUT(ENOENT); 3329 } 3330 3331 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3332 if (rs_num >= PF_RULESET_MAX) { 3333 PF_RULES_WUNLOCK(); 3334 ERROUT(EINVAL); 3335 } 3336 3337 if (nvlist_get_number(nvl, "ticket") != 3338 ruleset->rules[rs_num].active.ticket) { 3339 PF_RULES_WUNLOCK(); 3340 ERROUT(EBUSY); 3341 } 3342 3343 if ((error = nvlist_error(nvl))) { 3344 PF_RULES_WUNLOCK(); 3345 ERROUT(error); 3346 } 3347 3348 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3349 while ((rule != NULL) && (rule->nr != nr)) 3350 rule = TAILQ_NEXT(rule, entries); 3351 if (rule == NULL) { 3352 PF_RULES_WUNLOCK(); 3353 ERROUT(EBUSY); 3354 } 3355 3356 nvrule = pf_krule_to_nvrule(rule); 3357 3358 nvlist_destroy(nvl); 3359 nvl = nvlist_create(0); 3360 if (nvl == NULL) { 3361 PF_RULES_WUNLOCK(); 3362 ERROUT(ENOMEM); 3363 } 3364 nvlist_add_number(nvl, "nr", nr); 3365 nvlist_add_nvlist(nvl, "rule", nvrule); 3366 nvlist_destroy(nvrule); 3367 nvrule = NULL; 3368 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3369 PF_RULES_WUNLOCK(); 3370 ERROUT(EBUSY); 3371 } 3372 3373 free(nvlpacked, M_NVLIST); 3374 nvlpacked = nvlist_pack(nvl, &nv->len); 3375 if (nvlpacked == NULL) { 3376 PF_RULES_WUNLOCK(); 3377 ERROUT(ENOMEM); 3378 } 3379 3380 if (nv->size == 0) { 3381 PF_RULES_WUNLOCK(); 3382 ERROUT(0); 3383 } 3384 else if (nv->size < nv->len) { 3385 PF_RULES_WUNLOCK(); 3386 ERROUT(ENOSPC); 3387 } 3388 3389 if (clear_counter) { 3390 pf_counter_u64_zero(&rule->evaluations); 3391 for (int i = 0; i < 2; i++) { 3392 pf_counter_u64_zero(&rule->packets[i]); 3393 pf_counter_u64_zero(&rule->bytes[i]); 3394 } 3395 counter_u64_zero(rule->states_tot); 3396 } 3397 PF_RULES_WUNLOCK(); 3398 3399 error = copyout(nvlpacked, nv->data, nv->len); 3400 3401 #undef ERROUT 3402 DIOCGETRULENV_error: 3403 free(nvlpacked, M_NVLIST); 3404 nvlist_destroy(nvrule); 3405 nvlist_destroy(nvl); 3406 3407 break; 3408 } 3409 3410 case DIOCCHANGERULE: { 3411 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3412 struct pf_kruleset *ruleset; 3413 struct pf_krule *oldrule = NULL, *newrule = NULL; 3414 struct pfi_kkif *kif = NULL; 3415 struct pf_kpooladdr *pa; 3416 u_int32_t nr = 0; 3417 int rs_num; 3418 3419 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3420 3421 if (pcr->action < PF_CHANGE_ADD_HEAD || 3422 pcr->action > PF_CHANGE_GET_TICKET) { 3423 error = EINVAL; 3424 break; 3425 } 3426 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3427 error = EINVAL; 3428 break; 3429 } 3430 3431 if (pcr->action != PF_CHANGE_REMOVE) { 3432 newrule = pf_krule_alloc(); 3433 error = pf_rule_to_krule(&pcr->rule, newrule); 3434 if (error != 0) { 3435 pf_krule_free(newrule); 3436 break; 3437 } 3438 3439 if (newrule->ifname[0]) 3440 kif = pf_kkif_create(M_WAITOK); 3441 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3442 for (int i = 0; i < 2; i++) { 3443 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3444 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3445 } 3446 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3447 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3448 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3449 newrule->cuid = td->td_ucred->cr_ruid; 3450 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3451 TAILQ_INIT(&newrule->rpool.list); 3452 } 3453 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x) 3454 3455 PF_CONFIG_LOCK(); 3456 PF_RULES_WLOCK(); 3457 #ifdef PF_WANT_32_TO_64_COUNTER 3458 if (newrule != NULL) { 3459 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3460 newrule->allrulelinked = true; 3461 V_pf_allrulecount++; 3462 } 3463 #endif 3464 3465 if (!(pcr->action == PF_CHANGE_REMOVE || 3466 pcr->action == PF_CHANGE_GET_TICKET) && 3467 pcr->pool_ticket != V_ticket_pabuf) 3468 ERROUT(EBUSY); 3469 3470 ruleset = pf_find_kruleset(pcr->anchor); 3471 if (ruleset == NULL) 3472 ERROUT(EINVAL); 3473 3474 rs_num = pf_get_ruleset_number(pcr->rule.action); 3475 if (rs_num >= PF_RULESET_MAX) 3476 ERROUT(EINVAL); 3477 3478 /* 3479 * XXXMJG: there is no guarantee that the ruleset was 3480 * created by the usual route of calling DIOCXBEGIN. 3481 * As a result it is possible the rule tree will not 3482 * be allocated yet. Hack around it by doing it here. 3483 * Note it is fine to let the tree persist in case of 3484 * error as it will be freed down the road on future 3485 * updates (if need be). 3486 */ 3487 if (ruleset->rules[rs_num].active.tree == NULL) { 3488 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); 3489 if (ruleset->rules[rs_num].active.tree == NULL) { 3490 ERROUT(ENOMEM); 3491 } 3492 } 3493 3494 if (pcr->action == PF_CHANGE_GET_TICKET) { 3495 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3496 ERROUT(0); 3497 } else if (pcr->ticket != 3498 ruleset->rules[rs_num].active.ticket) 3499 ERROUT(EINVAL); 3500 3501 if (pcr->action != PF_CHANGE_REMOVE) { 3502 if (newrule->ifname[0]) { 3503 newrule->kif = pfi_kkif_attach(kif, 3504 newrule->ifname); 3505 kif = NULL; 3506 pfi_kkif_ref(newrule->kif); 3507 } else 3508 newrule->kif = NULL; 3509 3510 if (newrule->rtableid > 0 && 3511 newrule->rtableid >= rt_numfibs) 3512 error = EBUSY; 3513 3514 #ifdef ALTQ 3515 /* set queue IDs */ 3516 if (newrule->qname[0] != 0) { 3517 if ((newrule->qid = 3518 pf_qname2qid(newrule->qname)) == 0) 3519 error = EBUSY; 3520 else if (newrule->pqname[0] != 0) { 3521 if ((newrule->pqid = 3522 pf_qname2qid(newrule->pqname)) == 0) 3523 error = EBUSY; 3524 } else 3525 newrule->pqid = newrule->qid; 3526 } 3527 #endif /* ALTQ */ 3528 if (newrule->tagname[0]) 3529 if ((newrule->tag = 3530 pf_tagname2tag(newrule->tagname)) == 0) 3531 error = EBUSY; 3532 if (newrule->match_tagname[0]) 3533 if ((newrule->match_tag = pf_tagname2tag( 3534 newrule->match_tagname)) == 0) 3535 error = EBUSY; 3536 if (newrule->rt && !newrule->direction) 3537 error = EINVAL; 3538 if (!newrule->log) 3539 newrule->logif = 0; 3540 if (newrule->logif >= PFLOGIFS_MAX) 3541 error = EINVAL; 3542 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3543 error = ENOMEM; 3544 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3545 error = ENOMEM; 3546 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3547 error = EINVAL; 3548 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3549 if (pa->addr.type == PF_ADDR_TABLE) { 3550 pa->addr.p.tbl = 3551 pfr_attach_table(ruleset, 3552 pa->addr.v.tblname); 3553 if (pa->addr.p.tbl == NULL) 3554 error = ENOMEM; 3555 } 3556 3557 newrule->overload_tbl = NULL; 3558 if (newrule->overload_tblname[0]) { 3559 if ((newrule->overload_tbl = pfr_attach_table( 3560 ruleset, newrule->overload_tblname)) == 3561 NULL) 3562 error = EINVAL; 3563 else 3564 newrule->overload_tbl->pfrkt_flags |= 3565 PFR_TFLAG_ACTIVE; 3566 } 3567 3568 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3569 if (((((newrule->action == PF_NAT) || 3570 (newrule->action == PF_RDR) || 3571 (newrule->action == PF_BINAT) || 3572 (newrule->rt > PF_NOPFROUTE)) && 3573 !newrule->anchor)) && 3574 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3575 error = EINVAL; 3576 3577 if (error) { 3578 pf_free_rule(newrule); 3579 PF_RULES_WUNLOCK(); 3580 PF_CONFIG_UNLOCK(); 3581 break; 3582 } 3583 3584 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3585 } 3586 pf_empty_kpool(&V_pf_pabuf); 3587 3588 if (pcr->action == PF_CHANGE_ADD_HEAD) 3589 oldrule = TAILQ_FIRST( 3590 ruleset->rules[rs_num].active.ptr); 3591 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3592 oldrule = TAILQ_LAST( 3593 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3594 else { 3595 oldrule = TAILQ_FIRST( 3596 ruleset->rules[rs_num].active.ptr); 3597 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3598 oldrule = TAILQ_NEXT(oldrule, entries); 3599 if (oldrule == NULL) { 3600 if (newrule != NULL) 3601 pf_free_rule(newrule); 3602 PF_RULES_WUNLOCK(); 3603 PF_CONFIG_UNLOCK(); 3604 error = EINVAL; 3605 break; 3606 } 3607 } 3608 3609 if (pcr->action == PF_CHANGE_REMOVE) { 3610 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3611 oldrule); 3612 RB_REMOVE(pf_krule_global, 3613 ruleset->rules[rs_num].active.tree, oldrule); 3614 ruleset->rules[rs_num].active.rcount--; 3615 } else { 3616 pf_hash_rule(newrule); 3617 if (RB_INSERT(pf_krule_global, 3618 ruleset->rules[rs_num].active.tree, newrule) != NULL) { 3619 pf_free_rule(newrule); 3620 PF_RULES_WUNLOCK(); 3621 PF_CONFIG_UNLOCK(); 3622 error = EEXIST; 3623 break; 3624 } 3625 3626 if (oldrule == NULL) 3627 TAILQ_INSERT_TAIL( 3628 ruleset->rules[rs_num].active.ptr, 3629 newrule, entries); 3630 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3631 pcr->action == PF_CHANGE_ADD_BEFORE) 3632 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3633 else 3634 TAILQ_INSERT_AFTER( 3635 ruleset->rules[rs_num].active.ptr, 3636 oldrule, newrule, entries); 3637 ruleset->rules[rs_num].active.rcount++; 3638 } 3639 3640 nr = 0; 3641 TAILQ_FOREACH(oldrule, 3642 ruleset->rules[rs_num].active.ptr, entries) 3643 oldrule->nr = nr++; 3644 3645 ruleset->rules[rs_num].active.ticket++; 3646 3647 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3648 pf_remove_if_empty_kruleset(ruleset); 3649 3650 PF_RULES_WUNLOCK(); 3651 PF_CONFIG_UNLOCK(); 3652 break; 3653 3654 #undef ERROUT 3655 DIOCCHANGERULE_error: 3656 PF_RULES_WUNLOCK(); 3657 PF_CONFIG_UNLOCK(); 3658 pf_krule_free(newrule); 3659 pf_kkif_free(kif); 3660 break; 3661 } 3662 3663 case DIOCCLRSTATES: { 3664 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3665 struct pf_kstate_kill kill; 3666 3667 error = pf_state_kill_to_kstate_kill(psk, &kill); 3668 if (error) 3669 break; 3670 3671 psk->psk_killed = pf_clear_states(&kill); 3672 break; 3673 } 3674 3675 case DIOCCLRSTATESNV: { 3676 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3677 break; 3678 } 3679 3680 case DIOCKILLSTATES: { 3681 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3682 struct pf_kstate_kill kill; 3683 3684 error = pf_state_kill_to_kstate_kill(psk, &kill); 3685 if (error) 3686 break; 3687 3688 psk->psk_killed = 0; 3689 pf_killstates(&kill, &psk->psk_killed); 3690 break; 3691 } 3692 3693 case DIOCKILLSTATESNV: { 3694 error = pf_killstates_nv((struct pfioc_nv *)addr); 3695 break; 3696 } 3697 3698 case DIOCADDSTATE: { 3699 struct pfioc_state *ps = (struct pfioc_state *)addr; 3700 struct pfsync_state *sp = &ps->state; 3701 3702 if (sp->timeout >= PFTM_MAX) { 3703 error = EINVAL; 3704 break; 3705 } 3706 if (V_pfsync_state_import_ptr != NULL) { 3707 PF_RULES_RLOCK(); 3708 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 3709 PF_RULES_RUNLOCK(); 3710 } else 3711 error = EOPNOTSUPP; 3712 break; 3713 } 3714 3715 case DIOCGETSTATE: { 3716 struct pfioc_state *ps = (struct pfioc_state *)addr; 3717 struct pf_kstate *s; 3718 3719 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3720 if (s == NULL) { 3721 error = ENOENT; 3722 break; 3723 } 3724 3725 pfsync_state_export(&ps->state, s); 3726 PF_STATE_UNLOCK(s); 3727 break; 3728 } 3729 3730 case DIOCGETSTATENV: { 3731 error = pf_getstate((struct pfioc_nv *)addr); 3732 break; 3733 } 3734 3735 case DIOCGETSTATES: { 3736 struct pfioc_states *ps = (struct pfioc_states *)addr; 3737 struct pf_kstate *s; 3738 struct pfsync_state *pstore, *p; 3739 int i, nr; 3740 size_t slice_count = 16, count; 3741 void *out; 3742 3743 if (ps->ps_len <= 0) { 3744 nr = uma_zone_get_cur(V_pf_state_z); 3745 ps->ps_len = sizeof(struct pfsync_state) * nr; 3746 break; 3747 } 3748 3749 out = ps->ps_states; 3750 pstore = mallocarray(slice_count, 3751 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 3752 nr = 0; 3753 3754 for (i = 0; i <= pf_hashmask; i++) { 3755 struct pf_idhash *ih = &V_pf_idhash[i]; 3756 3757 DIOCGETSTATES_retry: 3758 p = pstore; 3759 3760 if (LIST_EMPTY(&ih->states)) 3761 continue; 3762 3763 PF_HASHROW_LOCK(ih); 3764 count = 0; 3765 LIST_FOREACH(s, &ih->states, entry) { 3766 if (s->timeout == PFTM_UNLINKED) 3767 continue; 3768 count++; 3769 } 3770 3771 if (count > slice_count) { 3772 PF_HASHROW_UNLOCK(ih); 3773 free(pstore, M_TEMP); 3774 slice_count = count * 2; 3775 pstore = mallocarray(slice_count, 3776 sizeof(struct pfsync_state), M_TEMP, 3777 M_WAITOK | M_ZERO); 3778 goto DIOCGETSTATES_retry; 3779 } 3780 3781 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3782 PF_HASHROW_UNLOCK(ih); 3783 goto DIOCGETSTATES_full; 3784 } 3785 3786 LIST_FOREACH(s, &ih->states, entry) { 3787 if (s->timeout == PFTM_UNLINKED) 3788 continue; 3789 3790 pfsync_state_export(p, s); 3791 p++; 3792 nr++; 3793 } 3794 PF_HASHROW_UNLOCK(ih); 3795 error = copyout(pstore, out, 3796 sizeof(struct pfsync_state) * count); 3797 if (error) 3798 break; 3799 out = ps->ps_states + nr; 3800 } 3801 DIOCGETSTATES_full: 3802 ps->ps_len = sizeof(struct pfsync_state) * nr; 3803 free(pstore, M_TEMP); 3804 3805 break; 3806 } 3807 3808 case DIOCGETSTATESV2: { 3809 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3810 struct pf_kstate *s; 3811 struct pf_state_export *pstore, *p; 3812 int i, nr; 3813 size_t slice_count = 16, count; 3814 void *out; 3815 3816 if (ps->ps_req_version > PF_STATE_VERSION) { 3817 error = ENOTSUP; 3818 break; 3819 } 3820 3821 if (ps->ps_len <= 0) { 3822 nr = uma_zone_get_cur(V_pf_state_z); 3823 ps->ps_len = sizeof(struct pf_state_export) * nr; 3824 break; 3825 } 3826 3827 out = ps->ps_states; 3828 pstore = mallocarray(slice_count, 3829 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3830 nr = 0; 3831 3832 for (i = 0; i <= pf_hashmask; i++) { 3833 struct pf_idhash *ih = &V_pf_idhash[i]; 3834 3835 DIOCGETSTATESV2_retry: 3836 p = pstore; 3837 3838 if (LIST_EMPTY(&ih->states)) 3839 continue; 3840 3841 PF_HASHROW_LOCK(ih); 3842 count = 0; 3843 LIST_FOREACH(s, &ih->states, entry) { 3844 if (s->timeout == PFTM_UNLINKED) 3845 continue; 3846 count++; 3847 } 3848 3849 if (count > slice_count) { 3850 PF_HASHROW_UNLOCK(ih); 3851 free(pstore, M_TEMP); 3852 slice_count = count * 2; 3853 pstore = mallocarray(slice_count, 3854 sizeof(struct pf_state_export), M_TEMP, 3855 M_WAITOK | M_ZERO); 3856 goto DIOCGETSTATESV2_retry; 3857 } 3858 3859 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3860 PF_HASHROW_UNLOCK(ih); 3861 goto DIOCGETSTATESV2_full; 3862 } 3863 3864 LIST_FOREACH(s, &ih->states, entry) { 3865 if (s->timeout == PFTM_UNLINKED) 3866 continue; 3867 3868 pf_state_export(p, s); 3869 p++; 3870 nr++; 3871 } 3872 PF_HASHROW_UNLOCK(ih); 3873 error = copyout(pstore, out, 3874 sizeof(struct pf_state_export) * count); 3875 if (error) 3876 break; 3877 out = ps->ps_states + nr; 3878 } 3879 DIOCGETSTATESV2_full: 3880 ps->ps_len = nr * sizeof(struct pf_state_export); 3881 free(pstore, M_TEMP); 3882 3883 break; 3884 } 3885 3886 case DIOCGETSTATUS: { 3887 struct pf_status *s = (struct pf_status *)addr; 3888 3889 PF_RULES_RLOCK(); 3890 s->running = V_pf_status.running; 3891 s->since = V_pf_status.since; 3892 s->debug = V_pf_status.debug; 3893 s->hostid = V_pf_status.hostid; 3894 s->states = V_pf_status.states; 3895 s->src_nodes = V_pf_status.src_nodes; 3896 3897 for (int i = 0; i < PFRES_MAX; i++) 3898 s->counters[i] = 3899 counter_u64_fetch(V_pf_status.counters[i]); 3900 for (int i = 0; i < LCNT_MAX; i++) 3901 s->lcounters[i] = 3902 counter_u64_fetch(V_pf_status.lcounters[i]); 3903 for (int i = 0; i < FCNT_MAX; i++) 3904 s->fcounters[i] = 3905 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3906 for (int i = 0; i < SCNT_MAX; i++) 3907 s->scounters[i] = 3908 counter_u64_fetch(V_pf_status.scounters[i]); 3909 3910 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3911 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3912 PF_MD5_DIGEST_LENGTH); 3913 3914 pfi_update_status(s->ifname, s); 3915 PF_RULES_RUNLOCK(); 3916 break; 3917 } 3918 3919 case DIOCGETSTATUSNV: { 3920 error = pf_getstatus((struct pfioc_nv *)addr); 3921 break; 3922 } 3923 3924 case DIOCSETSTATUSIF: { 3925 struct pfioc_if *pi = (struct pfioc_if *)addr; 3926 3927 if (pi->ifname[0] == 0) { 3928 bzero(V_pf_status.ifname, IFNAMSIZ); 3929 break; 3930 } 3931 PF_RULES_WLOCK(); 3932 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3933 PF_RULES_WUNLOCK(); 3934 break; 3935 } 3936 3937 case DIOCCLRSTATUS: { 3938 PF_RULES_WLOCK(); 3939 for (int i = 0; i < PFRES_MAX; i++) 3940 counter_u64_zero(V_pf_status.counters[i]); 3941 for (int i = 0; i < FCNT_MAX; i++) 3942 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3943 for (int i = 0; i < SCNT_MAX; i++) 3944 counter_u64_zero(V_pf_status.scounters[i]); 3945 for (int i = 0; i < KLCNT_MAX; i++) 3946 counter_u64_zero(V_pf_status.lcounters[i]); 3947 V_pf_status.since = time_second; 3948 if (*V_pf_status.ifname) 3949 pfi_update_status(V_pf_status.ifname, NULL); 3950 PF_RULES_WUNLOCK(); 3951 break; 3952 } 3953 3954 case DIOCNATLOOK: { 3955 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3956 struct pf_state_key *sk; 3957 struct pf_kstate *state; 3958 struct pf_state_key_cmp key; 3959 int m = 0, direction = pnl->direction; 3960 int sidx, didx; 3961 3962 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3963 sidx = (direction == PF_IN) ? 1 : 0; 3964 didx = (direction == PF_IN) ? 0 : 1; 3965 3966 if (!pnl->proto || 3967 PF_AZERO(&pnl->saddr, pnl->af) || 3968 PF_AZERO(&pnl->daddr, pnl->af) || 3969 ((pnl->proto == IPPROTO_TCP || 3970 pnl->proto == IPPROTO_UDP) && 3971 (!pnl->dport || !pnl->sport))) 3972 error = EINVAL; 3973 else { 3974 bzero(&key, sizeof(key)); 3975 key.af = pnl->af; 3976 key.proto = pnl->proto; 3977 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3978 key.port[sidx] = pnl->sport; 3979 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3980 key.port[didx] = pnl->dport; 3981 3982 state = pf_find_state_all(&key, direction, &m); 3983 if (state == NULL) { 3984 error = ENOENT; 3985 } else { 3986 if (m > 1) { 3987 PF_STATE_UNLOCK(state); 3988 error = E2BIG; /* more than one state */ 3989 } else { 3990 sk = state->key[sidx]; 3991 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3992 pnl->rsport = sk->port[sidx]; 3993 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3994 pnl->rdport = sk->port[didx]; 3995 PF_STATE_UNLOCK(state); 3996 } 3997 } 3998 } 3999 break; 4000 } 4001 4002 case DIOCSETTIMEOUT: { 4003 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4004 int old; 4005 4006 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 4007 pt->seconds < 0) { 4008 error = EINVAL; 4009 break; 4010 } 4011 PF_RULES_WLOCK(); 4012 old = V_pf_default_rule.timeout[pt->timeout]; 4013 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 4014 pt->seconds = 1; 4015 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 4016 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 4017 wakeup(pf_purge_thread); 4018 pt->seconds = old; 4019 PF_RULES_WUNLOCK(); 4020 break; 4021 } 4022 4023 case DIOCGETTIMEOUT: { 4024 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 4025 4026 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 4027 error = EINVAL; 4028 break; 4029 } 4030 PF_RULES_RLOCK(); 4031 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 4032 PF_RULES_RUNLOCK(); 4033 break; 4034 } 4035 4036 case DIOCGETLIMIT: { 4037 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4038 4039 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 4040 error = EINVAL; 4041 break; 4042 } 4043 PF_RULES_RLOCK(); 4044 pl->limit = V_pf_limits[pl->index].limit; 4045 PF_RULES_RUNLOCK(); 4046 break; 4047 } 4048 4049 case DIOCSETLIMIT: { 4050 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 4051 int old_limit; 4052 4053 PF_RULES_WLOCK(); 4054 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 4055 V_pf_limits[pl->index].zone == NULL) { 4056 PF_RULES_WUNLOCK(); 4057 error = EINVAL; 4058 break; 4059 } 4060 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 4061 old_limit = V_pf_limits[pl->index].limit; 4062 V_pf_limits[pl->index].limit = pl->limit; 4063 pl->limit = old_limit; 4064 PF_RULES_WUNLOCK(); 4065 break; 4066 } 4067 4068 case DIOCSETDEBUG: { 4069 u_int32_t *level = (u_int32_t *)addr; 4070 4071 PF_RULES_WLOCK(); 4072 V_pf_status.debug = *level; 4073 PF_RULES_WUNLOCK(); 4074 break; 4075 } 4076 4077 case DIOCCLRRULECTRS: { 4078 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 4079 struct pf_kruleset *ruleset = &pf_main_ruleset; 4080 struct pf_krule *rule; 4081 4082 PF_RULES_WLOCK(); 4083 TAILQ_FOREACH(rule, 4084 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 4085 pf_counter_u64_zero(&rule->evaluations); 4086 for (int i = 0; i < 2; i++) { 4087 pf_counter_u64_zero(&rule->packets[i]); 4088 pf_counter_u64_zero(&rule->bytes[i]); 4089 } 4090 } 4091 PF_RULES_WUNLOCK(); 4092 break; 4093 } 4094 4095 case DIOCGIFSPEEDV0: 4096 case DIOCGIFSPEEDV1: { 4097 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 4098 struct pf_ifspeed_v1 ps; 4099 struct ifnet *ifp; 4100 4101 if (psp->ifname[0] == '\0') { 4102 error = EINVAL; 4103 break; 4104 } 4105 4106 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 4107 if (error != 0) 4108 break; 4109 ifp = ifunit(ps.ifname); 4110 if (ifp != NULL) { 4111 psp->baudrate32 = 4112 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 4113 if (cmd == DIOCGIFSPEEDV1) 4114 psp->baudrate = ifp->if_baudrate; 4115 } else { 4116 error = EINVAL; 4117 } 4118 break; 4119 } 4120 4121 #ifdef ALTQ 4122 case DIOCSTARTALTQ: { 4123 struct pf_altq *altq; 4124 4125 PF_RULES_WLOCK(); 4126 /* enable all altq interfaces on active list */ 4127 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4128 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4129 error = pf_enable_altq(altq); 4130 if (error != 0) 4131 break; 4132 } 4133 } 4134 if (error == 0) 4135 V_pf_altq_running = 1; 4136 PF_RULES_WUNLOCK(); 4137 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 4138 break; 4139 } 4140 4141 case DIOCSTOPALTQ: { 4142 struct pf_altq *altq; 4143 4144 PF_RULES_WLOCK(); 4145 /* disable all altq interfaces on active list */ 4146 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 4147 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 4148 error = pf_disable_altq(altq); 4149 if (error != 0) 4150 break; 4151 } 4152 } 4153 if (error == 0) 4154 V_pf_altq_running = 0; 4155 PF_RULES_WUNLOCK(); 4156 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 4157 break; 4158 } 4159 4160 case DIOCADDALTQV0: 4161 case DIOCADDALTQV1: { 4162 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4163 struct pf_altq *altq, *a; 4164 struct ifnet *ifp; 4165 4166 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 4167 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 4168 if (error) 4169 break; 4170 altq->local_flags = 0; 4171 4172 PF_RULES_WLOCK(); 4173 if (pa->ticket != V_ticket_altqs_inactive) { 4174 PF_RULES_WUNLOCK(); 4175 free(altq, M_PFALTQ); 4176 error = EBUSY; 4177 break; 4178 } 4179 4180 /* 4181 * if this is for a queue, find the discipline and 4182 * copy the necessary fields 4183 */ 4184 if (altq->qname[0] != 0) { 4185 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 4186 PF_RULES_WUNLOCK(); 4187 error = EBUSY; 4188 free(altq, M_PFALTQ); 4189 break; 4190 } 4191 altq->altq_disc = NULL; 4192 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 4193 if (strncmp(a->ifname, altq->ifname, 4194 IFNAMSIZ) == 0) { 4195 altq->altq_disc = a->altq_disc; 4196 break; 4197 } 4198 } 4199 } 4200 4201 if ((ifp = ifunit(altq->ifname)) == NULL) 4202 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 4203 else 4204 error = altq_add(ifp, altq); 4205 4206 if (error) { 4207 PF_RULES_WUNLOCK(); 4208 free(altq, M_PFALTQ); 4209 break; 4210 } 4211 4212 if (altq->qname[0] != 0) 4213 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 4214 else 4215 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 4216 /* version error check done on import above */ 4217 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4218 PF_RULES_WUNLOCK(); 4219 break; 4220 } 4221 4222 case DIOCGETALTQSV0: 4223 case DIOCGETALTQSV1: { 4224 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4225 struct pf_altq *altq; 4226 4227 PF_RULES_RLOCK(); 4228 pa->nr = 0; 4229 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 4230 pa->nr++; 4231 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 4232 pa->nr++; 4233 pa->ticket = V_ticket_altqs_active; 4234 PF_RULES_RUNLOCK(); 4235 break; 4236 } 4237 4238 case DIOCGETALTQV0: 4239 case DIOCGETALTQV1: { 4240 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 4241 struct pf_altq *altq; 4242 4243 PF_RULES_RLOCK(); 4244 if (pa->ticket != V_ticket_altqs_active) { 4245 PF_RULES_RUNLOCK(); 4246 error = EBUSY; 4247 break; 4248 } 4249 altq = pf_altq_get_nth_active(pa->nr); 4250 if (altq == NULL) { 4251 PF_RULES_RUNLOCK(); 4252 error = EBUSY; 4253 break; 4254 } 4255 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 4256 PF_RULES_RUNLOCK(); 4257 break; 4258 } 4259 4260 case DIOCCHANGEALTQV0: 4261 case DIOCCHANGEALTQV1: 4262 /* CHANGEALTQ not supported yet! */ 4263 error = ENODEV; 4264 break; 4265 4266 case DIOCGETQSTATSV0: 4267 case DIOCGETQSTATSV1: { 4268 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 4269 struct pf_altq *altq; 4270 int nbytes; 4271 u_int32_t version; 4272 4273 PF_RULES_RLOCK(); 4274 if (pq->ticket != V_ticket_altqs_active) { 4275 PF_RULES_RUNLOCK(); 4276 error = EBUSY; 4277 break; 4278 } 4279 nbytes = pq->nbytes; 4280 altq = pf_altq_get_nth_active(pq->nr); 4281 if (altq == NULL) { 4282 PF_RULES_RUNLOCK(); 4283 error = EBUSY; 4284 break; 4285 } 4286 4287 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 4288 PF_RULES_RUNLOCK(); 4289 error = ENXIO; 4290 break; 4291 } 4292 PF_RULES_RUNLOCK(); 4293 if (cmd == DIOCGETQSTATSV0) 4294 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 4295 else 4296 version = pq->version; 4297 error = altq_getqstats(altq, pq->buf, &nbytes, version); 4298 if (error == 0) { 4299 pq->scheduler = altq->scheduler; 4300 pq->nbytes = nbytes; 4301 } 4302 break; 4303 } 4304 #endif /* ALTQ */ 4305 4306 case DIOCBEGINADDRS: { 4307 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4308 4309 PF_RULES_WLOCK(); 4310 pf_empty_kpool(&V_pf_pabuf); 4311 pp->ticket = ++V_ticket_pabuf; 4312 PF_RULES_WUNLOCK(); 4313 break; 4314 } 4315 4316 case DIOCADDADDR: { 4317 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4318 struct pf_kpooladdr *pa; 4319 struct pfi_kkif *kif = NULL; 4320 4321 #ifndef INET 4322 if (pp->af == AF_INET) { 4323 error = EAFNOSUPPORT; 4324 break; 4325 } 4326 #endif /* INET */ 4327 #ifndef INET6 4328 if (pp->af == AF_INET6) { 4329 error = EAFNOSUPPORT; 4330 break; 4331 } 4332 #endif /* INET6 */ 4333 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4334 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4335 pp->addr.addr.type != PF_ADDR_TABLE) { 4336 error = EINVAL; 4337 break; 4338 } 4339 if (pp->addr.addr.p.dyn != NULL) { 4340 error = EINVAL; 4341 break; 4342 } 4343 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4344 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4345 if (error != 0) 4346 break; 4347 if (pa->ifname[0]) 4348 kif = pf_kkif_create(M_WAITOK); 4349 PF_RULES_WLOCK(); 4350 if (pp->ticket != V_ticket_pabuf) { 4351 PF_RULES_WUNLOCK(); 4352 if (pa->ifname[0]) 4353 pf_kkif_free(kif); 4354 free(pa, M_PFRULE); 4355 error = EBUSY; 4356 break; 4357 } 4358 if (pa->ifname[0]) { 4359 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4360 kif = NULL; 4361 pfi_kkif_ref(pa->kif); 4362 } else 4363 pa->kif = NULL; 4364 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4365 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4366 if (pa->ifname[0]) 4367 pfi_kkif_unref(pa->kif); 4368 PF_RULES_WUNLOCK(); 4369 free(pa, M_PFRULE); 4370 break; 4371 } 4372 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4373 PF_RULES_WUNLOCK(); 4374 break; 4375 } 4376 4377 case DIOCGETADDRS: { 4378 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4379 struct pf_kpool *pool; 4380 struct pf_kpooladdr *pa; 4381 4382 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4383 pp->nr = 0; 4384 4385 PF_RULES_RLOCK(); 4386 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4387 pp->r_num, 0, 1, 0); 4388 if (pool == NULL) { 4389 PF_RULES_RUNLOCK(); 4390 error = EBUSY; 4391 break; 4392 } 4393 TAILQ_FOREACH(pa, &pool->list, entries) 4394 pp->nr++; 4395 PF_RULES_RUNLOCK(); 4396 break; 4397 } 4398 4399 case DIOCGETADDR: { 4400 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4401 struct pf_kpool *pool; 4402 struct pf_kpooladdr *pa; 4403 u_int32_t nr = 0; 4404 4405 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4406 4407 PF_RULES_RLOCK(); 4408 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4409 pp->r_num, 0, 1, 1); 4410 if (pool == NULL) { 4411 PF_RULES_RUNLOCK(); 4412 error = EBUSY; 4413 break; 4414 } 4415 pa = TAILQ_FIRST(&pool->list); 4416 while ((pa != NULL) && (nr < pp->nr)) { 4417 pa = TAILQ_NEXT(pa, entries); 4418 nr++; 4419 } 4420 if (pa == NULL) { 4421 PF_RULES_RUNLOCK(); 4422 error = EBUSY; 4423 break; 4424 } 4425 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4426 pf_addr_copyout(&pp->addr.addr); 4427 PF_RULES_RUNLOCK(); 4428 break; 4429 } 4430 4431 case DIOCCHANGEADDR: { 4432 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4433 struct pf_kpool *pool; 4434 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4435 struct pf_kruleset *ruleset; 4436 struct pfi_kkif *kif = NULL; 4437 4438 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4439 4440 if (pca->action < PF_CHANGE_ADD_HEAD || 4441 pca->action > PF_CHANGE_REMOVE) { 4442 error = EINVAL; 4443 break; 4444 } 4445 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4446 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4447 pca->addr.addr.type != PF_ADDR_TABLE) { 4448 error = EINVAL; 4449 break; 4450 } 4451 if (pca->addr.addr.p.dyn != NULL) { 4452 error = EINVAL; 4453 break; 4454 } 4455 4456 if (pca->action != PF_CHANGE_REMOVE) { 4457 #ifndef INET 4458 if (pca->af == AF_INET) { 4459 error = EAFNOSUPPORT; 4460 break; 4461 } 4462 #endif /* INET */ 4463 #ifndef INET6 4464 if (pca->af == AF_INET6) { 4465 error = EAFNOSUPPORT; 4466 break; 4467 } 4468 #endif /* INET6 */ 4469 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4470 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4471 if (newpa->ifname[0]) 4472 kif = pf_kkif_create(M_WAITOK); 4473 newpa->kif = NULL; 4474 } 4475 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4476 PF_RULES_WLOCK(); 4477 ruleset = pf_find_kruleset(pca->anchor); 4478 if (ruleset == NULL) 4479 ERROUT(EBUSY); 4480 4481 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4482 pca->r_num, pca->r_last, 1, 1); 4483 if (pool == NULL) 4484 ERROUT(EBUSY); 4485 4486 if (pca->action != PF_CHANGE_REMOVE) { 4487 if (newpa->ifname[0]) { 4488 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4489 pfi_kkif_ref(newpa->kif); 4490 kif = NULL; 4491 } 4492 4493 switch (newpa->addr.type) { 4494 case PF_ADDR_DYNIFTL: 4495 error = pfi_dynaddr_setup(&newpa->addr, 4496 pca->af); 4497 break; 4498 case PF_ADDR_TABLE: 4499 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4500 newpa->addr.v.tblname); 4501 if (newpa->addr.p.tbl == NULL) 4502 error = ENOMEM; 4503 break; 4504 } 4505 if (error) 4506 goto DIOCCHANGEADDR_error; 4507 } 4508 4509 switch (pca->action) { 4510 case PF_CHANGE_ADD_HEAD: 4511 oldpa = TAILQ_FIRST(&pool->list); 4512 break; 4513 case PF_CHANGE_ADD_TAIL: 4514 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4515 break; 4516 default: 4517 oldpa = TAILQ_FIRST(&pool->list); 4518 for (int i = 0; oldpa && i < pca->nr; i++) 4519 oldpa = TAILQ_NEXT(oldpa, entries); 4520 4521 if (oldpa == NULL) 4522 ERROUT(EINVAL); 4523 } 4524 4525 if (pca->action == PF_CHANGE_REMOVE) { 4526 TAILQ_REMOVE(&pool->list, oldpa, entries); 4527 switch (oldpa->addr.type) { 4528 case PF_ADDR_DYNIFTL: 4529 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4530 break; 4531 case PF_ADDR_TABLE: 4532 pfr_detach_table(oldpa->addr.p.tbl); 4533 break; 4534 } 4535 if (oldpa->kif) 4536 pfi_kkif_unref(oldpa->kif); 4537 free(oldpa, M_PFRULE); 4538 } else { 4539 if (oldpa == NULL) 4540 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4541 else if (pca->action == PF_CHANGE_ADD_HEAD || 4542 pca->action == PF_CHANGE_ADD_BEFORE) 4543 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4544 else 4545 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4546 newpa, entries); 4547 } 4548 4549 pool->cur = TAILQ_FIRST(&pool->list); 4550 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4551 PF_RULES_WUNLOCK(); 4552 break; 4553 4554 #undef ERROUT 4555 DIOCCHANGEADDR_error: 4556 if (newpa != NULL) { 4557 if (newpa->kif) 4558 pfi_kkif_unref(newpa->kif); 4559 free(newpa, M_PFRULE); 4560 } 4561 PF_RULES_WUNLOCK(); 4562 pf_kkif_free(kif); 4563 break; 4564 } 4565 4566 case DIOCGETRULESETS: { 4567 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4568 struct pf_kruleset *ruleset; 4569 struct pf_kanchor *anchor; 4570 4571 pr->path[sizeof(pr->path) - 1] = 0; 4572 4573 PF_RULES_RLOCK(); 4574 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4575 PF_RULES_RUNLOCK(); 4576 error = ENOENT; 4577 break; 4578 } 4579 pr->nr = 0; 4580 if (ruleset->anchor == NULL) { 4581 /* XXX kludge for pf_main_ruleset */ 4582 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4583 if (anchor->parent == NULL) 4584 pr->nr++; 4585 } else { 4586 RB_FOREACH(anchor, pf_kanchor_node, 4587 &ruleset->anchor->children) 4588 pr->nr++; 4589 } 4590 PF_RULES_RUNLOCK(); 4591 break; 4592 } 4593 4594 case DIOCGETRULESET: { 4595 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4596 struct pf_kruleset *ruleset; 4597 struct pf_kanchor *anchor; 4598 u_int32_t nr = 0; 4599 4600 pr->path[sizeof(pr->path) - 1] = 0; 4601 4602 PF_RULES_RLOCK(); 4603 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4604 PF_RULES_RUNLOCK(); 4605 error = ENOENT; 4606 break; 4607 } 4608 pr->name[0] = 0; 4609 if (ruleset->anchor == NULL) { 4610 /* XXX kludge for pf_main_ruleset */ 4611 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4612 if (anchor->parent == NULL && nr++ == pr->nr) { 4613 strlcpy(pr->name, anchor->name, 4614 sizeof(pr->name)); 4615 break; 4616 } 4617 } else { 4618 RB_FOREACH(anchor, pf_kanchor_node, 4619 &ruleset->anchor->children) 4620 if (nr++ == pr->nr) { 4621 strlcpy(pr->name, anchor->name, 4622 sizeof(pr->name)); 4623 break; 4624 } 4625 } 4626 if (!pr->name[0]) 4627 error = EBUSY; 4628 PF_RULES_RUNLOCK(); 4629 break; 4630 } 4631 4632 case DIOCRCLRTABLES: { 4633 struct pfioc_table *io = (struct pfioc_table *)addr; 4634 4635 if (io->pfrio_esize != 0) { 4636 error = ENODEV; 4637 break; 4638 } 4639 PF_RULES_WLOCK(); 4640 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4641 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4642 PF_RULES_WUNLOCK(); 4643 break; 4644 } 4645 4646 case DIOCRADDTABLES: { 4647 struct pfioc_table *io = (struct pfioc_table *)addr; 4648 struct pfr_table *pfrts; 4649 size_t totlen; 4650 4651 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4652 error = ENODEV; 4653 break; 4654 } 4655 4656 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4657 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4658 error = ENOMEM; 4659 break; 4660 } 4661 4662 totlen = io->pfrio_size * sizeof(struct pfr_table); 4663 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4664 M_TEMP, M_WAITOK); 4665 error = copyin(io->pfrio_buffer, pfrts, totlen); 4666 if (error) { 4667 free(pfrts, M_TEMP); 4668 break; 4669 } 4670 PF_RULES_WLOCK(); 4671 error = pfr_add_tables(pfrts, io->pfrio_size, 4672 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4673 PF_RULES_WUNLOCK(); 4674 free(pfrts, M_TEMP); 4675 break; 4676 } 4677 4678 case DIOCRDELTABLES: { 4679 struct pfioc_table *io = (struct pfioc_table *)addr; 4680 struct pfr_table *pfrts; 4681 size_t totlen; 4682 4683 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4684 error = ENODEV; 4685 break; 4686 } 4687 4688 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4689 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4690 error = ENOMEM; 4691 break; 4692 } 4693 4694 totlen = io->pfrio_size * sizeof(struct pfr_table); 4695 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4696 M_TEMP, M_WAITOK); 4697 error = copyin(io->pfrio_buffer, pfrts, totlen); 4698 if (error) { 4699 free(pfrts, M_TEMP); 4700 break; 4701 } 4702 PF_RULES_WLOCK(); 4703 error = pfr_del_tables(pfrts, io->pfrio_size, 4704 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4705 PF_RULES_WUNLOCK(); 4706 free(pfrts, M_TEMP); 4707 break; 4708 } 4709 4710 case DIOCRGETTABLES: { 4711 struct pfioc_table *io = (struct pfioc_table *)addr; 4712 struct pfr_table *pfrts; 4713 size_t totlen; 4714 int n; 4715 4716 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4717 error = ENODEV; 4718 break; 4719 } 4720 PF_RULES_RLOCK(); 4721 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4722 if (n < 0) { 4723 PF_RULES_RUNLOCK(); 4724 error = EINVAL; 4725 break; 4726 } 4727 io->pfrio_size = min(io->pfrio_size, n); 4728 4729 totlen = io->pfrio_size * sizeof(struct pfr_table); 4730 4731 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4732 M_TEMP, M_NOWAIT | M_ZERO); 4733 if (pfrts == NULL) { 4734 error = ENOMEM; 4735 PF_RULES_RUNLOCK(); 4736 break; 4737 } 4738 error = pfr_get_tables(&io->pfrio_table, pfrts, 4739 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4740 PF_RULES_RUNLOCK(); 4741 if (error == 0) 4742 error = copyout(pfrts, io->pfrio_buffer, totlen); 4743 free(pfrts, M_TEMP); 4744 break; 4745 } 4746 4747 case DIOCRGETTSTATS: { 4748 struct pfioc_table *io = (struct pfioc_table *)addr; 4749 struct pfr_tstats *pfrtstats; 4750 size_t totlen; 4751 int n; 4752 4753 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4754 error = ENODEV; 4755 break; 4756 } 4757 PF_TABLE_STATS_LOCK(); 4758 PF_RULES_RLOCK(); 4759 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4760 if (n < 0) { 4761 PF_RULES_RUNLOCK(); 4762 PF_TABLE_STATS_UNLOCK(); 4763 error = EINVAL; 4764 break; 4765 } 4766 io->pfrio_size = min(io->pfrio_size, n); 4767 4768 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4769 pfrtstats = mallocarray(io->pfrio_size, 4770 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4771 if (pfrtstats == NULL) { 4772 error = ENOMEM; 4773 PF_RULES_RUNLOCK(); 4774 PF_TABLE_STATS_UNLOCK(); 4775 break; 4776 } 4777 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4778 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4779 PF_RULES_RUNLOCK(); 4780 PF_TABLE_STATS_UNLOCK(); 4781 if (error == 0) 4782 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4783 free(pfrtstats, M_TEMP); 4784 break; 4785 } 4786 4787 case DIOCRCLRTSTATS: { 4788 struct pfioc_table *io = (struct pfioc_table *)addr; 4789 struct pfr_table *pfrts; 4790 size_t totlen; 4791 4792 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4793 error = ENODEV; 4794 break; 4795 } 4796 4797 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4798 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4799 /* We used to count tables and use the minimum required 4800 * size, so we didn't fail on overly large requests. 4801 * Keep doing so. */ 4802 io->pfrio_size = pf_ioctl_maxcount; 4803 break; 4804 } 4805 4806 totlen = io->pfrio_size * sizeof(struct pfr_table); 4807 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4808 M_TEMP, M_WAITOK); 4809 error = copyin(io->pfrio_buffer, pfrts, totlen); 4810 if (error) { 4811 free(pfrts, M_TEMP); 4812 break; 4813 } 4814 4815 PF_TABLE_STATS_LOCK(); 4816 PF_RULES_RLOCK(); 4817 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4818 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4819 PF_RULES_RUNLOCK(); 4820 PF_TABLE_STATS_UNLOCK(); 4821 free(pfrts, M_TEMP); 4822 break; 4823 } 4824 4825 case DIOCRSETTFLAGS: { 4826 struct pfioc_table *io = (struct pfioc_table *)addr; 4827 struct pfr_table *pfrts; 4828 size_t totlen; 4829 int n; 4830 4831 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4832 error = ENODEV; 4833 break; 4834 } 4835 4836 PF_RULES_RLOCK(); 4837 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4838 if (n < 0) { 4839 PF_RULES_RUNLOCK(); 4840 error = EINVAL; 4841 break; 4842 } 4843 4844 io->pfrio_size = min(io->pfrio_size, n); 4845 PF_RULES_RUNLOCK(); 4846 4847 totlen = io->pfrio_size * sizeof(struct pfr_table); 4848 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4849 M_TEMP, M_WAITOK); 4850 error = copyin(io->pfrio_buffer, pfrts, totlen); 4851 if (error) { 4852 free(pfrts, M_TEMP); 4853 break; 4854 } 4855 PF_RULES_WLOCK(); 4856 error = pfr_set_tflags(pfrts, io->pfrio_size, 4857 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4858 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4859 PF_RULES_WUNLOCK(); 4860 free(pfrts, M_TEMP); 4861 break; 4862 } 4863 4864 case DIOCRCLRADDRS: { 4865 struct pfioc_table *io = (struct pfioc_table *)addr; 4866 4867 if (io->pfrio_esize != 0) { 4868 error = ENODEV; 4869 break; 4870 } 4871 PF_RULES_WLOCK(); 4872 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4873 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4874 PF_RULES_WUNLOCK(); 4875 break; 4876 } 4877 4878 case DIOCRADDADDRS: { 4879 struct pfioc_table *io = (struct pfioc_table *)addr; 4880 struct pfr_addr *pfras; 4881 size_t totlen; 4882 4883 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4884 error = ENODEV; 4885 break; 4886 } 4887 if (io->pfrio_size < 0 || 4888 io->pfrio_size > pf_ioctl_maxcount || 4889 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4890 error = EINVAL; 4891 break; 4892 } 4893 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4894 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4895 M_TEMP, M_WAITOK); 4896 error = copyin(io->pfrio_buffer, pfras, totlen); 4897 if (error) { 4898 free(pfras, M_TEMP); 4899 break; 4900 } 4901 PF_RULES_WLOCK(); 4902 error = pfr_add_addrs(&io->pfrio_table, pfras, 4903 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4904 PFR_FLAG_USERIOCTL); 4905 PF_RULES_WUNLOCK(); 4906 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4907 error = copyout(pfras, io->pfrio_buffer, totlen); 4908 free(pfras, M_TEMP); 4909 break; 4910 } 4911 4912 case DIOCRDELADDRS: { 4913 struct pfioc_table *io = (struct pfioc_table *)addr; 4914 struct pfr_addr *pfras; 4915 size_t totlen; 4916 4917 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4918 error = ENODEV; 4919 break; 4920 } 4921 if (io->pfrio_size < 0 || 4922 io->pfrio_size > pf_ioctl_maxcount || 4923 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4924 error = EINVAL; 4925 break; 4926 } 4927 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4928 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4929 M_TEMP, M_WAITOK); 4930 error = copyin(io->pfrio_buffer, pfras, totlen); 4931 if (error) { 4932 free(pfras, M_TEMP); 4933 break; 4934 } 4935 PF_RULES_WLOCK(); 4936 error = pfr_del_addrs(&io->pfrio_table, pfras, 4937 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4938 PFR_FLAG_USERIOCTL); 4939 PF_RULES_WUNLOCK(); 4940 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4941 error = copyout(pfras, io->pfrio_buffer, totlen); 4942 free(pfras, M_TEMP); 4943 break; 4944 } 4945 4946 case DIOCRSETADDRS: { 4947 struct pfioc_table *io = (struct pfioc_table *)addr; 4948 struct pfr_addr *pfras; 4949 size_t totlen, count; 4950 4951 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4952 error = ENODEV; 4953 break; 4954 } 4955 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4956 error = EINVAL; 4957 break; 4958 } 4959 count = max(io->pfrio_size, io->pfrio_size2); 4960 if (count > pf_ioctl_maxcount || 4961 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4962 error = EINVAL; 4963 break; 4964 } 4965 totlen = count * sizeof(struct pfr_addr); 4966 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4967 M_WAITOK); 4968 error = copyin(io->pfrio_buffer, pfras, totlen); 4969 if (error) { 4970 free(pfras, M_TEMP); 4971 break; 4972 } 4973 PF_RULES_WLOCK(); 4974 error = pfr_set_addrs(&io->pfrio_table, pfras, 4975 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4976 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4977 PFR_FLAG_USERIOCTL, 0); 4978 PF_RULES_WUNLOCK(); 4979 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4980 error = copyout(pfras, io->pfrio_buffer, totlen); 4981 free(pfras, M_TEMP); 4982 break; 4983 } 4984 4985 case DIOCRGETADDRS: { 4986 struct pfioc_table *io = (struct pfioc_table *)addr; 4987 struct pfr_addr *pfras; 4988 size_t totlen; 4989 4990 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4991 error = ENODEV; 4992 break; 4993 } 4994 if (io->pfrio_size < 0 || 4995 io->pfrio_size > pf_ioctl_maxcount || 4996 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4997 error = EINVAL; 4998 break; 4999 } 5000 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5001 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5002 M_TEMP, M_WAITOK | M_ZERO); 5003 PF_RULES_RLOCK(); 5004 error = pfr_get_addrs(&io->pfrio_table, pfras, 5005 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5006 PF_RULES_RUNLOCK(); 5007 if (error == 0) 5008 error = copyout(pfras, io->pfrio_buffer, totlen); 5009 free(pfras, M_TEMP); 5010 break; 5011 } 5012 5013 case DIOCRGETASTATS: { 5014 struct pfioc_table *io = (struct pfioc_table *)addr; 5015 struct pfr_astats *pfrastats; 5016 size_t totlen; 5017 5018 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 5019 error = ENODEV; 5020 break; 5021 } 5022 if (io->pfrio_size < 0 || 5023 io->pfrio_size > pf_ioctl_maxcount || 5024 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 5025 error = EINVAL; 5026 break; 5027 } 5028 totlen = io->pfrio_size * sizeof(struct pfr_astats); 5029 pfrastats = mallocarray(io->pfrio_size, 5030 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 5031 PF_RULES_RLOCK(); 5032 error = pfr_get_astats(&io->pfrio_table, pfrastats, 5033 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5034 PF_RULES_RUNLOCK(); 5035 if (error == 0) 5036 error = copyout(pfrastats, io->pfrio_buffer, totlen); 5037 free(pfrastats, M_TEMP); 5038 break; 5039 } 5040 5041 case DIOCRCLRASTATS: { 5042 struct pfioc_table *io = (struct pfioc_table *)addr; 5043 struct pfr_addr *pfras; 5044 size_t totlen; 5045 5046 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5047 error = ENODEV; 5048 break; 5049 } 5050 if (io->pfrio_size < 0 || 5051 io->pfrio_size > pf_ioctl_maxcount || 5052 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5053 error = EINVAL; 5054 break; 5055 } 5056 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5057 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5058 M_TEMP, M_WAITOK); 5059 error = copyin(io->pfrio_buffer, pfras, totlen); 5060 if (error) { 5061 free(pfras, M_TEMP); 5062 break; 5063 } 5064 PF_RULES_WLOCK(); 5065 error = pfr_clr_astats(&io->pfrio_table, pfras, 5066 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 5067 PFR_FLAG_USERIOCTL); 5068 PF_RULES_WUNLOCK(); 5069 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 5070 error = copyout(pfras, io->pfrio_buffer, totlen); 5071 free(pfras, M_TEMP); 5072 break; 5073 } 5074 5075 case DIOCRTSTADDRS: { 5076 struct pfioc_table *io = (struct pfioc_table *)addr; 5077 struct pfr_addr *pfras; 5078 size_t totlen; 5079 5080 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5081 error = ENODEV; 5082 break; 5083 } 5084 if (io->pfrio_size < 0 || 5085 io->pfrio_size > pf_ioctl_maxcount || 5086 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5087 error = EINVAL; 5088 break; 5089 } 5090 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5091 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5092 M_TEMP, M_WAITOK); 5093 error = copyin(io->pfrio_buffer, pfras, totlen); 5094 if (error) { 5095 free(pfras, M_TEMP); 5096 break; 5097 } 5098 PF_RULES_RLOCK(); 5099 error = pfr_tst_addrs(&io->pfrio_table, pfras, 5100 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 5101 PFR_FLAG_USERIOCTL); 5102 PF_RULES_RUNLOCK(); 5103 if (error == 0) 5104 error = copyout(pfras, io->pfrio_buffer, totlen); 5105 free(pfras, M_TEMP); 5106 break; 5107 } 5108 5109 case DIOCRINADEFINE: { 5110 struct pfioc_table *io = (struct pfioc_table *)addr; 5111 struct pfr_addr *pfras; 5112 size_t totlen; 5113 5114 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 5115 error = ENODEV; 5116 break; 5117 } 5118 if (io->pfrio_size < 0 || 5119 io->pfrio_size > pf_ioctl_maxcount || 5120 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 5121 error = EINVAL; 5122 break; 5123 } 5124 totlen = io->pfrio_size * sizeof(struct pfr_addr); 5125 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 5126 M_TEMP, M_WAITOK); 5127 error = copyin(io->pfrio_buffer, pfras, totlen); 5128 if (error) { 5129 free(pfras, M_TEMP); 5130 break; 5131 } 5132 PF_RULES_WLOCK(); 5133 error = pfr_ina_define(&io->pfrio_table, pfras, 5134 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 5135 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 5136 PF_RULES_WUNLOCK(); 5137 free(pfras, M_TEMP); 5138 break; 5139 } 5140 5141 case DIOCOSFPADD: { 5142 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5143 PF_RULES_WLOCK(); 5144 error = pf_osfp_add(io); 5145 PF_RULES_WUNLOCK(); 5146 break; 5147 } 5148 5149 case DIOCOSFPGET: { 5150 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 5151 PF_RULES_RLOCK(); 5152 error = pf_osfp_get(io); 5153 PF_RULES_RUNLOCK(); 5154 break; 5155 } 5156 5157 case DIOCXBEGIN: { 5158 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5159 struct pfioc_trans_e *ioes, *ioe; 5160 size_t totlen; 5161 int i; 5162 5163 if (io->esize != sizeof(*ioe)) { 5164 error = ENODEV; 5165 break; 5166 } 5167 if (io->size < 0 || 5168 io->size > pf_ioctl_maxcount || 5169 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5170 error = EINVAL; 5171 break; 5172 } 5173 totlen = sizeof(struct pfioc_trans_e) * io->size; 5174 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5175 M_TEMP, M_WAITOK); 5176 error = copyin(io->array, ioes, totlen); 5177 if (error) { 5178 free(ioes, M_TEMP); 5179 break; 5180 } 5181 /* Ensure there's no more ethernet rules to clean up. */ 5182 NET_EPOCH_DRAIN_CALLBACKS(); 5183 PF_RULES_WLOCK(); 5184 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5185 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5186 switch (ioe->rs_num) { 5187 case PF_RULESET_ETH: 5188 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 5189 PF_RULES_WUNLOCK(); 5190 free(ioes, M_TEMP); 5191 goto fail; 5192 } 5193 break; 5194 #ifdef ALTQ 5195 case PF_RULESET_ALTQ: 5196 if (ioe->anchor[0]) { 5197 PF_RULES_WUNLOCK(); 5198 free(ioes, M_TEMP); 5199 error = EINVAL; 5200 goto fail; 5201 } 5202 if ((error = pf_begin_altq(&ioe->ticket))) { 5203 PF_RULES_WUNLOCK(); 5204 free(ioes, M_TEMP); 5205 goto fail; 5206 } 5207 break; 5208 #endif /* ALTQ */ 5209 case PF_RULESET_TABLE: 5210 { 5211 struct pfr_table table; 5212 5213 bzero(&table, sizeof(table)); 5214 strlcpy(table.pfrt_anchor, ioe->anchor, 5215 sizeof(table.pfrt_anchor)); 5216 if ((error = pfr_ina_begin(&table, 5217 &ioe->ticket, NULL, 0))) { 5218 PF_RULES_WUNLOCK(); 5219 free(ioes, M_TEMP); 5220 goto fail; 5221 } 5222 break; 5223 } 5224 default: 5225 if ((error = pf_begin_rules(&ioe->ticket, 5226 ioe->rs_num, ioe->anchor))) { 5227 PF_RULES_WUNLOCK(); 5228 free(ioes, M_TEMP); 5229 goto fail; 5230 } 5231 break; 5232 } 5233 } 5234 PF_RULES_WUNLOCK(); 5235 error = copyout(ioes, io->array, totlen); 5236 free(ioes, M_TEMP); 5237 break; 5238 } 5239 5240 case DIOCXROLLBACK: { 5241 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5242 struct pfioc_trans_e *ioe, *ioes; 5243 size_t totlen; 5244 int i; 5245 5246 if (io->esize != sizeof(*ioe)) { 5247 error = ENODEV; 5248 break; 5249 } 5250 if (io->size < 0 || 5251 io->size > pf_ioctl_maxcount || 5252 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5253 error = EINVAL; 5254 break; 5255 } 5256 totlen = sizeof(struct pfioc_trans_e) * io->size; 5257 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5258 M_TEMP, M_WAITOK); 5259 error = copyin(io->array, ioes, totlen); 5260 if (error) { 5261 free(ioes, M_TEMP); 5262 break; 5263 } 5264 PF_RULES_WLOCK(); 5265 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5266 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 5267 switch (ioe->rs_num) { 5268 case PF_RULESET_ETH: 5269 if ((error = pf_rollback_eth(ioe->ticket, 5270 ioe->anchor))) { 5271 PF_RULES_WUNLOCK(); 5272 free(ioes, M_TEMP); 5273 goto fail; /* really bad */ 5274 } 5275 break; 5276 #ifdef ALTQ 5277 case PF_RULESET_ALTQ: 5278 if (ioe->anchor[0]) { 5279 PF_RULES_WUNLOCK(); 5280 free(ioes, M_TEMP); 5281 error = EINVAL; 5282 goto fail; 5283 } 5284 if ((error = pf_rollback_altq(ioe->ticket))) { 5285 PF_RULES_WUNLOCK(); 5286 free(ioes, M_TEMP); 5287 goto fail; /* really bad */ 5288 } 5289 break; 5290 #endif /* ALTQ */ 5291 case PF_RULESET_TABLE: 5292 { 5293 struct pfr_table table; 5294 5295 bzero(&table, sizeof(table)); 5296 strlcpy(table.pfrt_anchor, ioe->anchor, 5297 sizeof(table.pfrt_anchor)); 5298 if ((error = pfr_ina_rollback(&table, 5299 ioe->ticket, NULL, 0))) { 5300 PF_RULES_WUNLOCK(); 5301 free(ioes, M_TEMP); 5302 goto fail; /* really bad */ 5303 } 5304 break; 5305 } 5306 default: 5307 if ((error = pf_rollback_rules(ioe->ticket, 5308 ioe->rs_num, ioe->anchor))) { 5309 PF_RULES_WUNLOCK(); 5310 free(ioes, M_TEMP); 5311 goto fail; /* really bad */ 5312 } 5313 break; 5314 } 5315 } 5316 PF_RULES_WUNLOCK(); 5317 free(ioes, M_TEMP); 5318 break; 5319 } 5320 5321 case DIOCXCOMMIT: { 5322 struct pfioc_trans *io = (struct pfioc_trans *)addr; 5323 struct pfioc_trans_e *ioe, *ioes; 5324 struct pf_kruleset *rs; 5325 struct pf_keth_ruleset *ers; 5326 size_t totlen; 5327 int i; 5328 5329 if (io->esize != sizeof(*ioe)) { 5330 error = ENODEV; 5331 break; 5332 } 5333 5334 if (io->size < 0 || 5335 io->size > pf_ioctl_maxcount || 5336 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5337 error = EINVAL; 5338 break; 5339 } 5340 5341 totlen = sizeof(struct pfioc_trans_e) * io->size; 5342 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5343 M_TEMP, M_WAITOK); 5344 error = copyin(io->array, ioes, totlen); 5345 if (error) { 5346 free(ioes, M_TEMP); 5347 break; 5348 } 5349 PF_RULES_WLOCK(); 5350 /* First makes sure everything will succeed. */ 5351 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5352 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5353 switch (ioe->rs_num) { 5354 case PF_RULESET_ETH: 5355 ers = pf_find_keth_ruleset(ioe->anchor); 5356 if (ers == NULL || ioe->ticket == 0 || 5357 ioe->ticket != ers->inactive.ticket) { 5358 PF_RULES_WUNLOCK(); 5359 free(ioes, M_TEMP); 5360 error = EINVAL; 5361 goto fail; 5362 } 5363 break; 5364 #ifdef ALTQ 5365 case PF_RULESET_ALTQ: 5366 if (ioe->anchor[0]) { 5367 PF_RULES_WUNLOCK(); 5368 free(ioes, M_TEMP); 5369 error = EINVAL; 5370 goto fail; 5371 } 5372 if (!V_altqs_inactive_open || ioe->ticket != 5373 V_ticket_altqs_inactive) { 5374 PF_RULES_WUNLOCK(); 5375 free(ioes, M_TEMP); 5376 error = EBUSY; 5377 goto fail; 5378 } 5379 break; 5380 #endif /* ALTQ */ 5381 case PF_RULESET_TABLE: 5382 rs = pf_find_kruleset(ioe->anchor); 5383 if (rs == NULL || !rs->topen || ioe->ticket != 5384 rs->tticket) { 5385 PF_RULES_WUNLOCK(); 5386 free(ioes, M_TEMP); 5387 error = EBUSY; 5388 goto fail; 5389 } 5390 break; 5391 default: 5392 if (ioe->rs_num < 0 || ioe->rs_num >= 5393 PF_RULESET_MAX) { 5394 PF_RULES_WUNLOCK(); 5395 free(ioes, M_TEMP); 5396 error = EINVAL; 5397 goto fail; 5398 } 5399 rs = pf_find_kruleset(ioe->anchor); 5400 if (rs == NULL || 5401 !rs->rules[ioe->rs_num].inactive.open || 5402 rs->rules[ioe->rs_num].inactive.ticket != 5403 ioe->ticket) { 5404 PF_RULES_WUNLOCK(); 5405 free(ioes, M_TEMP); 5406 error = EBUSY; 5407 goto fail; 5408 } 5409 break; 5410 } 5411 } 5412 /* Now do the commit - no errors should happen here. */ 5413 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5414 switch (ioe->rs_num) { 5415 case PF_RULESET_ETH: 5416 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5417 PF_RULES_WUNLOCK(); 5418 free(ioes, M_TEMP); 5419 goto fail; /* really bad */ 5420 } 5421 break; 5422 #ifdef ALTQ 5423 case PF_RULESET_ALTQ: 5424 if ((error = pf_commit_altq(ioe->ticket))) { 5425 PF_RULES_WUNLOCK(); 5426 free(ioes, M_TEMP); 5427 goto fail; /* really bad */ 5428 } 5429 break; 5430 #endif /* ALTQ */ 5431 case PF_RULESET_TABLE: 5432 { 5433 struct pfr_table table; 5434 5435 bzero(&table, sizeof(table)); 5436 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5437 sizeof(table.pfrt_anchor)); 5438 if ((error = pfr_ina_commit(&table, 5439 ioe->ticket, NULL, NULL, 0))) { 5440 PF_RULES_WUNLOCK(); 5441 free(ioes, M_TEMP); 5442 goto fail; /* really bad */ 5443 } 5444 break; 5445 } 5446 default: 5447 if ((error = pf_commit_rules(ioe->ticket, 5448 ioe->rs_num, ioe->anchor))) { 5449 PF_RULES_WUNLOCK(); 5450 free(ioes, M_TEMP); 5451 goto fail; /* really bad */ 5452 } 5453 break; 5454 } 5455 } 5456 PF_RULES_WUNLOCK(); 5457 5458 /* Only hook into EtherNet taffic if we've got rules for it. */ 5459 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5460 hook_pf_eth(); 5461 else 5462 dehook_pf_eth(); 5463 5464 free(ioes, M_TEMP); 5465 break; 5466 } 5467 5468 case DIOCGETSRCNODES: { 5469 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5470 struct pf_srchash *sh; 5471 struct pf_ksrc_node *n; 5472 struct pf_src_node *p, *pstore; 5473 uint32_t i, nr = 0; 5474 5475 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5476 i++, sh++) { 5477 PF_HASHROW_LOCK(sh); 5478 LIST_FOREACH(n, &sh->nodes, entry) 5479 nr++; 5480 PF_HASHROW_UNLOCK(sh); 5481 } 5482 5483 psn->psn_len = min(psn->psn_len, 5484 sizeof(struct pf_src_node) * nr); 5485 5486 if (psn->psn_len == 0) { 5487 psn->psn_len = sizeof(struct pf_src_node) * nr; 5488 break; 5489 } 5490 5491 nr = 0; 5492 5493 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5494 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5495 i++, sh++) { 5496 PF_HASHROW_LOCK(sh); 5497 LIST_FOREACH(n, &sh->nodes, entry) { 5498 5499 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5500 break; 5501 5502 pf_src_node_copy(n, p); 5503 5504 p++; 5505 nr++; 5506 } 5507 PF_HASHROW_UNLOCK(sh); 5508 } 5509 error = copyout(pstore, psn->psn_src_nodes, 5510 sizeof(struct pf_src_node) * nr); 5511 if (error) { 5512 free(pstore, M_TEMP); 5513 break; 5514 } 5515 psn->psn_len = sizeof(struct pf_src_node) * nr; 5516 free(pstore, M_TEMP); 5517 break; 5518 } 5519 5520 case DIOCCLRSRCNODES: { 5521 pf_clear_srcnodes(NULL); 5522 pf_purge_expired_src_nodes(); 5523 break; 5524 } 5525 5526 case DIOCKILLSRCNODES: 5527 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5528 break; 5529 5530 #ifdef COMPAT_FREEBSD13 5531 case DIOCKEEPCOUNTERS_FREEBSD13: 5532 #endif 5533 case DIOCKEEPCOUNTERS: 5534 error = pf_keepcounters((struct pfioc_nv *)addr); 5535 break; 5536 5537 case DIOCGETSYNCOOKIES: 5538 error = pf_get_syncookies((struct pfioc_nv *)addr); 5539 break; 5540 5541 case DIOCSETSYNCOOKIES: 5542 error = pf_set_syncookies((struct pfioc_nv *)addr); 5543 break; 5544 5545 case DIOCSETHOSTID: { 5546 u_int32_t *hostid = (u_int32_t *)addr; 5547 5548 PF_RULES_WLOCK(); 5549 if (*hostid == 0) 5550 V_pf_status.hostid = arc4random(); 5551 else 5552 V_pf_status.hostid = *hostid; 5553 PF_RULES_WUNLOCK(); 5554 break; 5555 } 5556 5557 case DIOCOSFPFLUSH: 5558 PF_RULES_WLOCK(); 5559 pf_osfp_flush(); 5560 PF_RULES_WUNLOCK(); 5561 break; 5562 5563 case DIOCIGETIFACES: { 5564 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5565 struct pfi_kif *ifstore; 5566 size_t bufsiz; 5567 5568 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5569 error = ENODEV; 5570 break; 5571 } 5572 5573 if (io->pfiio_size < 0 || 5574 io->pfiio_size > pf_ioctl_maxcount || 5575 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5576 error = EINVAL; 5577 break; 5578 } 5579 5580 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5581 5582 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5583 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5584 M_TEMP, M_WAITOK | M_ZERO); 5585 5586 PF_RULES_RLOCK(); 5587 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5588 PF_RULES_RUNLOCK(); 5589 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5590 free(ifstore, M_TEMP); 5591 break; 5592 } 5593 5594 case DIOCSETIFFLAG: { 5595 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5596 5597 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5598 5599 PF_RULES_WLOCK(); 5600 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5601 PF_RULES_WUNLOCK(); 5602 break; 5603 } 5604 5605 case DIOCCLRIFFLAG: { 5606 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5607 5608 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; 5609 5610 PF_RULES_WLOCK(); 5611 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5612 PF_RULES_WUNLOCK(); 5613 break; 5614 } 5615 5616 default: 5617 error = ENODEV; 5618 break; 5619 } 5620 fail: 5621 if (sx_xlocked(&pf_ioctl_lock)) 5622 sx_xunlock(&pf_ioctl_lock); 5623 CURVNET_RESTORE(); 5624 5625 #undef ERROUT_IOCTL 5626 5627 return (error); 5628 } 5629 5630 void 5631 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 5632 { 5633 bzero(sp, sizeof(struct pfsync_state)); 5634 5635 /* copy from state key */ 5636 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5637 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5638 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5639 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5640 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5641 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5642 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5643 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5644 sp->proto = st->key[PF_SK_WIRE]->proto; 5645 sp->af = st->key[PF_SK_WIRE]->af; 5646 5647 /* copy from state */ 5648 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5649 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5650 sp->creation = htonl(time_uptime - st->creation); 5651 sp->expire = pf_state_expires(st); 5652 if (sp->expire <= time_uptime) 5653 sp->expire = htonl(0); 5654 else 5655 sp->expire = htonl(sp->expire - time_uptime); 5656 5657 sp->direction = st->direction; 5658 sp->log = st->log; 5659 sp->timeout = st->timeout; 5660 sp->state_flags = st->state_flags; 5661 if (st->src_node) 5662 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5663 if (st->nat_src_node) 5664 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5665 5666 sp->id = st->id; 5667 sp->creatorid = st->creatorid; 5668 pf_state_peer_hton(&st->src, &sp->src); 5669 pf_state_peer_hton(&st->dst, &sp->dst); 5670 5671 if (st->rule.ptr == NULL) 5672 sp->rule = htonl(-1); 5673 else 5674 sp->rule = htonl(st->rule.ptr->nr); 5675 if (st->anchor.ptr == NULL) 5676 sp->anchor = htonl(-1); 5677 else 5678 sp->anchor = htonl(st->anchor.ptr->nr); 5679 if (st->nat_rule.ptr == NULL) 5680 sp->nat_rule = htonl(-1); 5681 else 5682 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5683 5684 pf_state_counter_hton(st->packets[0], sp->packets[0]); 5685 pf_state_counter_hton(st->packets[1], sp->packets[1]); 5686 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 5687 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 5688 } 5689 5690 void 5691 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5692 { 5693 bzero(sp, sizeof(*sp)); 5694 5695 sp->version = PF_STATE_VERSION; 5696 5697 /* copy from state key */ 5698 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5699 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5700 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5701 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5702 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5703 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5704 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5705 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5706 sp->proto = st->key[PF_SK_WIRE]->proto; 5707 sp->af = st->key[PF_SK_WIRE]->af; 5708 5709 /* copy from state */ 5710 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5711 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5712 sizeof(sp->orig_ifname)); 5713 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5714 sp->creation = htonl(time_uptime - st->creation); 5715 sp->expire = pf_state_expires(st); 5716 if (sp->expire <= time_uptime) 5717 sp->expire = htonl(0); 5718 else 5719 sp->expire = htonl(sp->expire - time_uptime); 5720 5721 sp->direction = st->direction; 5722 sp->log = st->log; 5723 sp->timeout = st->timeout; 5724 sp->state_flags = st->state_flags; 5725 if (st->src_node) 5726 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5727 if (st->nat_src_node) 5728 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5729 5730 sp->id = st->id; 5731 sp->creatorid = st->creatorid; 5732 pf_state_peer_hton(&st->src, &sp->src); 5733 pf_state_peer_hton(&st->dst, &sp->dst); 5734 5735 if (st->rule.ptr == NULL) 5736 sp->rule = htonl(-1); 5737 else 5738 sp->rule = htonl(st->rule.ptr->nr); 5739 if (st->anchor.ptr == NULL) 5740 sp->anchor = htonl(-1); 5741 else 5742 sp->anchor = htonl(st->anchor.ptr->nr); 5743 if (st->nat_rule.ptr == NULL) 5744 sp->nat_rule = htonl(-1); 5745 else 5746 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5747 5748 sp->packets[0] = st->packets[0]; 5749 sp->packets[1] = st->packets[1]; 5750 sp->bytes[0] = st->bytes[0]; 5751 sp->bytes[1] = st->bytes[1]; 5752 } 5753 5754 static void 5755 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5756 { 5757 struct pfr_ktable *kt; 5758 5759 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5760 5761 kt = aw->p.tbl; 5762 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5763 kt = kt->pfrkt_root; 5764 aw->p.tbl = NULL; 5765 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5766 kt->pfrkt_cnt : -1; 5767 } 5768 5769 static int 5770 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5771 size_t number, char **names) 5772 { 5773 nvlist_t *nvc; 5774 5775 nvc = nvlist_create(0); 5776 if (nvc == NULL) 5777 return (ENOMEM); 5778 5779 for (int i = 0; i < number; i++) { 5780 nvlist_append_number_array(nvc, "counters", 5781 counter_u64_fetch(counters[i])); 5782 nvlist_append_string_array(nvc, "names", 5783 names[i]); 5784 nvlist_append_number_array(nvc, "ids", 5785 i); 5786 } 5787 nvlist_add_nvlist(nvl, name, nvc); 5788 nvlist_destroy(nvc); 5789 5790 return (0); 5791 } 5792 5793 static int 5794 pf_getstatus(struct pfioc_nv *nv) 5795 { 5796 nvlist_t *nvl = NULL, *nvc = NULL; 5797 void *nvlpacked = NULL; 5798 int error; 5799 struct pf_status s; 5800 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5801 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5802 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5803 PF_RULES_RLOCK_TRACKER; 5804 5805 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5806 5807 PF_RULES_RLOCK(); 5808 5809 nvl = nvlist_create(0); 5810 if (nvl == NULL) 5811 ERROUT(ENOMEM); 5812 5813 nvlist_add_bool(nvl, "running", V_pf_status.running); 5814 nvlist_add_number(nvl, "since", V_pf_status.since); 5815 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5816 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5817 nvlist_add_number(nvl, "states", V_pf_status.states); 5818 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5819 5820 /* counters */ 5821 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5822 PFRES_MAX, pf_reasons); 5823 if (error != 0) 5824 ERROUT(error); 5825 5826 /* lcounters */ 5827 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5828 KLCNT_MAX, pf_lcounter); 5829 if (error != 0) 5830 ERROUT(error); 5831 5832 /* fcounters */ 5833 nvc = nvlist_create(0); 5834 if (nvc == NULL) 5835 ERROUT(ENOMEM); 5836 5837 for (int i = 0; i < FCNT_MAX; i++) { 5838 nvlist_append_number_array(nvc, "counters", 5839 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5840 nvlist_append_string_array(nvc, "names", 5841 pf_fcounter[i]); 5842 nvlist_append_number_array(nvc, "ids", 5843 i); 5844 } 5845 nvlist_add_nvlist(nvl, "fcounters", nvc); 5846 nvlist_destroy(nvc); 5847 nvc = NULL; 5848 5849 /* scounters */ 5850 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5851 SCNT_MAX, pf_fcounter); 5852 if (error != 0) 5853 ERROUT(error); 5854 5855 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5856 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5857 PF_MD5_DIGEST_LENGTH); 5858 5859 pfi_update_status(V_pf_status.ifname, &s); 5860 5861 /* pcounters / bcounters */ 5862 for (int i = 0; i < 2; i++) { 5863 for (int j = 0; j < 2; j++) { 5864 for (int k = 0; k < 2; k++) { 5865 nvlist_append_number_array(nvl, "pcounters", 5866 s.pcounters[i][j][k]); 5867 } 5868 nvlist_append_number_array(nvl, "bcounters", 5869 s.bcounters[i][j]); 5870 } 5871 } 5872 5873 nvlpacked = nvlist_pack(nvl, &nv->len); 5874 if (nvlpacked == NULL) 5875 ERROUT(ENOMEM); 5876 5877 if (nv->size == 0) 5878 ERROUT(0); 5879 else if (nv->size < nv->len) 5880 ERROUT(ENOSPC); 5881 5882 PF_RULES_RUNLOCK(); 5883 error = copyout(nvlpacked, nv->data, nv->len); 5884 goto done; 5885 5886 #undef ERROUT 5887 errout: 5888 PF_RULES_RUNLOCK(); 5889 done: 5890 free(nvlpacked, M_NVLIST); 5891 nvlist_destroy(nvc); 5892 nvlist_destroy(nvl); 5893 5894 return (error); 5895 } 5896 5897 /* 5898 * XXX - Check for version missmatch!!! 5899 */ 5900 static void 5901 pf_clear_all_states(void) 5902 { 5903 struct pf_kstate *s; 5904 u_int i; 5905 5906 for (i = 0; i <= pf_hashmask; i++) { 5907 struct pf_idhash *ih = &V_pf_idhash[i]; 5908 relock: 5909 PF_HASHROW_LOCK(ih); 5910 LIST_FOREACH(s, &ih->states, entry) { 5911 s->timeout = PFTM_PURGE; 5912 /* Don't send out individual delete messages. */ 5913 s->state_flags |= PFSTATE_NOSYNC; 5914 pf_unlink_state(s); 5915 goto relock; 5916 } 5917 PF_HASHROW_UNLOCK(ih); 5918 } 5919 } 5920 5921 static int 5922 pf_clear_tables(void) 5923 { 5924 struct pfioc_table io; 5925 int error; 5926 5927 bzero(&io, sizeof(io)); 5928 5929 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5930 io.pfrio_flags); 5931 5932 return (error); 5933 } 5934 5935 static void 5936 pf_clear_srcnodes(struct pf_ksrc_node *n) 5937 { 5938 struct pf_kstate *s; 5939 int i; 5940 5941 for (i = 0; i <= pf_hashmask; i++) { 5942 struct pf_idhash *ih = &V_pf_idhash[i]; 5943 5944 PF_HASHROW_LOCK(ih); 5945 LIST_FOREACH(s, &ih->states, entry) { 5946 if (n == NULL || n == s->src_node) 5947 s->src_node = NULL; 5948 if (n == NULL || n == s->nat_src_node) 5949 s->nat_src_node = NULL; 5950 } 5951 PF_HASHROW_UNLOCK(ih); 5952 } 5953 5954 if (n == NULL) { 5955 struct pf_srchash *sh; 5956 5957 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5958 i++, sh++) { 5959 PF_HASHROW_LOCK(sh); 5960 LIST_FOREACH(n, &sh->nodes, entry) { 5961 n->expire = 1; 5962 n->states = 0; 5963 } 5964 PF_HASHROW_UNLOCK(sh); 5965 } 5966 } else { 5967 /* XXX: hash slot should already be locked here. */ 5968 n->expire = 1; 5969 n->states = 0; 5970 } 5971 } 5972 5973 static void 5974 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5975 { 5976 struct pf_ksrc_node_list kill; 5977 5978 LIST_INIT(&kill); 5979 for (int i = 0; i <= pf_srchashmask; i++) { 5980 struct pf_srchash *sh = &V_pf_srchash[i]; 5981 struct pf_ksrc_node *sn, *tmp; 5982 5983 PF_HASHROW_LOCK(sh); 5984 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5985 if (PF_MATCHA(psnk->psnk_src.neg, 5986 &psnk->psnk_src.addr.v.a.addr, 5987 &psnk->psnk_src.addr.v.a.mask, 5988 &sn->addr, sn->af) && 5989 PF_MATCHA(psnk->psnk_dst.neg, 5990 &psnk->psnk_dst.addr.v.a.addr, 5991 &psnk->psnk_dst.addr.v.a.mask, 5992 &sn->raddr, sn->af)) { 5993 pf_unlink_src_node(sn); 5994 LIST_INSERT_HEAD(&kill, sn, entry); 5995 sn->expire = 1; 5996 } 5997 PF_HASHROW_UNLOCK(sh); 5998 } 5999 6000 for (int i = 0; i <= pf_hashmask; i++) { 6001 struct pf_idhash *ih = &V_pf_idhash[i]; 6002 struct pf_kstate *s; 6003 6004 PF_HASHROW_LOCK(ih); 6005 LIST_FOREACH(s, &ih->states, entry) { 6006 if (s->src_node && s->src_node->expire == 1) 6007 s->src_node = NULL; 6008 if (s->nat_src_node && s->nat_src_node->expire == 1) 6009 s->nat_src_node = NULL; 6010 } 6011 PF_HASHROW_UNLOCK(ih); 6012 } 6013 6014 psnk->psnk_killed = pf_free_src_nodes(&kill); 6015 } 6016 6017 static int 6018 pf_keepcounters(struct pfioc_nv *nv) 6019 { 6020 nvlist_t *nvl = NULL; 6021 void *nvlpacked = NULL; 6022 int error = 0; 6023 6024 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6025 6026 if (nv->len > pf_ioctl_maxcount) 6027 ERROUT(ENOMEM); 6028 6029 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6030 if (nvlpacked == NULL) 6031 ERROUT(ENOMEM); 6032 6033 error = copyin(nv->data, nvlpacked, nv->len); 6034 if (error) 6035 ERROUT(error); 6036 6037 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6038 if (nvl == NULL) 6039 ERROUT(EBADMSG); 6040 6041 if (! nvlist_exists_bool(nvl, "keep_counters")) 6042 ERROUT(EBADMSG); 6043 6044 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 6045 6046 on_error: 6047 nvlist_destroy(nvl); 6048 free(nvlpacked, M_NVLIST); 6049 return (error); 6050 } 6051 6052 static unsigned int 6053 pf_clear_states(const struct pf_kstate_kill *kill) 6054 { 6055 struct pf_state_key_cmp match_key; 6056 struct pf_kstate *s; 6057 struct pfi_kkif *kif; 6058 int idx; 6059 unsigned int killed = 0, dir; 6060 6061 for (unsigned int i = 0; i <= pf_hashmask; i++) { 6062 struct pf_idhash *ih = &V_pf_idhash[i]; 6063 6064 relock_DIOCCLRSTATES: 6065 PF_HASHROW_LOCK(ih); 6066 LIST_FOREACH(s, &ih->states, entry) { 6067 /* For floating states look at the original kif. */ 6068 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 6069 6070 if (kill->psk_ifname[0] && 6071 strcmp(kill->psk_ifname, 6072 kif->pfik_name)) 6073 continue; 6074 6075 if (kill->psk_kill_match) { 6076 bzero(&match_key, sizeof(match_key)); 6077 6078 if (s->direction == PF_OUT) { 6079 dir = PF_IN; 6080 idx = PF_SK_STACK; 6081 } else { 6082 dir = PF_OUT; 6083 idx = PF_SK_WIRE; 6084 } 6085 6086 match_key.af = s->key[idx]->af; 6087 match_key.proto = s->key[idx]->proto; 6088 PF_ACPY(&match_key.addr[0], 6089 &s->key[idx]->addr[1], match_key.af); 6090 match_key.port[0] = s->key[idx]->port[1]; 6091 PF_ACPY(&match_key.addr[1], 6092 &s->key[idx]->addr[0], match_key.af); 6093 match_key.port[1] = s->key[idx]->port[0]; 6094 } 6095 6096 /* 6097 * Don't send out individual 6098 * delete messages. 6099 */ 6100 s->state_flags |= PFSTATE_NOSYNC; 6101 pf_unlink_state(s); 6102 killed++; 6103 6104 if (kill->psk_kill_match) 6105 killed += pf_kill_matching_state(&match_key, 6106 dir); 6107 6108 goto relock_DIOCCLRSTATES; 6109 } 6110 PF_HASHROW_UNLOCK(ih); 6111 } 6112 6113 if (V_pfsync_clear_states_ptr != NULL) 6114 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 6115 6116 return (killed); 6117 } 6118 6119 static void 6120 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 6121 { 6122 struct pf_kstate *s; 6123 6124 if (kill->psk_pfcmp.id) { 6125 if (kill->psk_pfcmp.creatorid == 0) 6126 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 6127 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 6128 kill->psk_pfcmp.creatorid))) { 6129 pf_unlink_state(s); 6130 *killed = 1; 6131 } 6132 return; 6133 } 6134 6135 for (unsigned int i = 0; i <= pf_hashmask; i++) 6136 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 6137 6138 return; 6139 } 6140 6141 static int 6142 pf_killstates_nv(struct pfioc_nv *nv) 6143 { 6144 struct pf_kstate_kill kill; 6145 nvlist_t *nvl = NULL; 6146 void *nvlpacked = NULL; 6147 int error = 0; 6148 unsigned int killed = 0; 6149 6150 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6151 6152 if (nv->len > pf_ioctl_maxcount) 6153 ERROUT(ENOMEM); 6154 6155 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6156 if (nvlpacked == NULL) 6157 ERROUT(ENOMEM); 6158 6159 error = copyin(nv->data, nvlpacked, nv->len); 6160 if (error) 6161 ERROUT(error); 6162 6163 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6164 if (nvl == NULL) 6165 ERROUT(EBADMSG); 6166 6167 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6168 if (error) 6169 ERROUT(error); 6170 6171 pf_killstates(&kill, &killed); 6172 6173 free(nvlpacked, M_NVLIST); 6174 nvlpacked = NULL; 6175 nvlist_destroy(nvl); 6176 nvl = nvlist_create(0); 6177 if (nvl == NULL) 6178 ERROUT(ENOMEM); 6179 6180 nvlist_add_number(nvl, "killed", killed); 6181 6182 nvlpacked = nvlist_pack(nvl, &nv->len); 6183 if (nvlpacked == NULL) 6184 ERROUT(ENOMEM); 6185 6186 if (nv->size == 0) 6187 ERROUT(0); 6188 else if (nv->size < nv->len) 6189 ERROUT(ENOSPC); 6190 6191 error = copyout(nvlpacked, nv->data, nv->len); 6192 6193 on_error: 6194 nvlist_destroy(nvl); 6195 free(nvlpacked, M_NVLIST); 6196 return (error); 6197 } 6198 6199 static int 6200 pf_clearstates_nv(struct pfioc_nv *nv) 6201 { 6202 struct pf_kstate_kill kill; 6203 nvlist_t *nvl = NULL; 6204 void *nvlpacked = NULL; 6205 int error = 0; 6206 unsigned int killed; 6207 6208 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 6209 6210 if (nv->len > pf_ioctl_maxcount) 6211 ERROUT(ENOMEM); 6212 6213 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6214 if (nvlpacked == NULL) 6215 ERROUT(ENOMEM); 6216 6217 error = copyin(nv->data, nvlpacked, nv->len); 6218 if (error) 6219 ERROUT(error); 6220 6221 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6222 if (nvl == NULL) 6223 ERROUT(EBADMSG); 6224 6225 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 6226 if (error) 6227 ERROUT(error); 6228 6229 killed = pf_clear_states(&kill); 6230 6231 free(nvlpacked, M_NVLIST); 6232 nvlpacked = NULL; 6233 nvlist_destroy(nvl); 6234 nvl = nvlist_create(0); 6235 if (nvl == NULL) 6236 ERROUT(ENOMEM); 6237 6238 nvlist_add_number(nvl, "killed", killed); 6239 6240 nvlpacked = nvlist_pack(nvl, &nv->len); 6241 if (nvlpacked == NULL) 6242 ERROUT(ENOMEM); 6243 6244 if (nv->size == 0) 6245 ERROUT(0); 6246 else if (nv->size < nv->len) 6247 ERROUT(ENOSPC); 6248 6249 error = copyout(nvlpacked, nv->data, nv->len); 6250 6251 #undef ERROUT 6252 on_error: 6253 nvlist_destroy(nvl); 6254 free(nvlpacked, M_NVLIST); 6255 return (error); 6256 } 6257 6258 static int 6259 pf_getstate(struct pfioc_nv *nv) 6260 { 6261 nvlist_t *nvl = NULL, *nvls; 6262 void *nvlpacked = NULL; 6263 struct pf_kstate *s = NULL; 6264 int error = 0; 6265 uint64_t id, creatorid; 6266 6267 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 6268 6269 if (nv->len > pf_ioctl_maxcount) 6270 ERROUT(ENOMEM); 6271 6272 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 6273 if (nvlpacked == NULL) 6274 ERROUT(ENOMEM); 6275 6276 error = copyin(nv->data, nvlpacked, nv->len); 6277 if (error) 6278 ERROUT(error); 6279 6280 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 6281 if (nvl == NULL) 6282 ERROUT(EBADMSG); 6283 6284 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 6285 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 6286 6287 s = pf_find_state_byid(id, creatorid); 6288 if (s == NULL) 6289 ERROUT(ENOENT); 6290 6291 free(nvlpacked, M_NVLIST); 6292 nvlpacked = NULL; 6293 nvlist_destroy(nvl); 6294 nvl = nvlist_create(0); 6295 if (nvl == NULL) 6296 ERROUT(ENOMEM); 6297 6298 nvls = pf_state_to_nvstate(s); 6299 if (nvls == NULL) 6300 ERROUT(ENOMEM); 6301 6302 nvlist_add_nvlist(nvl, "state", nvls); 6303 nvlist_destroy(nvls); 6304 6305 nvlpacked = nvlist_pack(nvl, &nv->len); 6306 if (nvlpacked == NULL) 6307 ERROUT(ENOMEM); 6308 6309 if (nv->size == 0) 6310 ERROUT(0); 6311 else if (nv->size < nv->len) 6312 ERROUT(ENOSPC); 6313 6314 error = copyout(nvlpacked, nv->data, nv->len); 6315 6316 #undef ERROUT 6317 errout: 6318 if (s != NULL) 6319 PF_STATE_UNLOCK(s); 6320 free(nvlpacked, M_NVLIST); 6321 nvlist_destroy(nvl); 6322 return (error); 6323 } 6324 6325 /* 6326 * XXX - Check for version missmatch!!! 6327 */ 6328 6329 /* 6330 * Duplicate pfctl -Fa operation to get rid of as much as we can. 6331 */ 6332 static int 6333 shutdown_pf(void) 6334 { 6335 int error = 0; 6336 u_int32_t t[5]; 6337 char nn = '\0'; 6338 6339 do { 6340 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6341 != 0) { 6342 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6343 break; 6344 } 6345 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6346 != 0) { 6347 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6348 break; /* XXX: rollback? */ 6349 } 6350 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6351 != 0) { 6352 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6353 break; /* XXX: rollback? */ 6354 } 6355 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6356 != 0) { 6357 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6358 break; /* XXX: rollback? */ 6359 } 6360 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6361 != 0) { 6362 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6363 break; /* XXX: rollback? */ 6364 } 6365 6366 /* XXX: these should always succeed here */ 6367 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6368 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6369 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6370 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6371 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6372 6373 if ((error = pf_clear_tables()) != 0) 6374 break; 6375 6376 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6377 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6378 break; 6379 } 6380 pf_commit_eth(t[0], &nn); 6381 6382 #ifdef ALTQ 6383 if ((error = pf_begin_altq(&t[0])) != 0) { 6384 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6385 break; 6386 } 6387 pf_commit_altq(t[0]); 6388 #endif 6389 6390 pf_clear_all_states(); 6391 6392 pf_clear_srcnodes(NULL); 6393 6394 /* status does not use malloced mem so no need to cleanup */ 6395 /* fingerprints and interfaces have their own cleanup code */ 6396 } while(0); 6397 6398 return (error); 6399 } 6400 6401 static pfil_return_t 6402 pf_check_return(int chk, struct mbuf **m) 6403 { 6404 6405 switch (chk) { 6406 case PF_PASS: 6407 if (*m == NULL) 6408 return (PFIL_CONSUMED); 6409 else 6410 return (PFIL_PASS); 6411 break; 6412 default: 6413 if (*m != NULL) { 6414 m_freem(*m); 6415 *m = NULL; 6416 } 6417 return (PFIL_DROPPED); 6418 } 6419 } 6420 6421 static pfil_return_t 6422 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6423 void *ruleset __unused, struct inpcb *inp) 6424 { 6425 int chk; 6426 6427 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6428 6429 return (pf_check_return(chk, m)); 6430 } 6431 6432 static pfil_return_t 6433 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6434 void *ruleset __unused, struct inpcb *inp) 6435 { 6436 int chk; 6437 6438 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6439 6440 return (pf_check_return(chk, m)); 6441 } 6442 6443 #ifdef INET 6444 static pfil_return_t 6445 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6446 void *ruleset __unused, struct inpcb *inp) 6447 { 6448 int chk; 6449 6450 chk = pf_test(PF_IN, flags, ifp, m, inp); 6451 6452 return (pf_check_return(chk, m)); 6453 } 6454 6455 static pfil_return_t 6456 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6457 void *ruleset __unused, struct inpcb *inp) 6458 { 6459 int chk; 6460 6461 chk = pf_test(PF_OUT, flags, ifp, m, inp); 6462 6463 return (pf_check_return(chk, m)); 6464 } 6465 #endif 6466 6467 #ifdef INET6 6468 static pfil_return_t 6469 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6470 void *ruleset __unused, struct inpcb *inp) 6471 { 6472 int chk; 6473 6474 /* 6475 * In case of loopback traffic IPv6 uses the real interface in 6476 * order to support scoped addresses. In order to support stateful 6477 * filtering we have change this to lo0 as it is the case in IPv4. 6478 */ 6479 CURVNET_SET(ifp->if_vnet); 6480 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 6481 CURVNET_RESTORE(); 6482 6483 return (pf_check_return(chk, m)); 6484 } 6485 6486 static pfil_return_t 6487 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6488 void *ruleset __unused, struct inpcb *inp) 6489 { 6490 int chk; 6491 6492 CURVNET_SET(ifp->if_vnet); 6493 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 6494 CURVNET_RESTORE(); 6495 6496 return (pf_check_return(chk, m)); 6497 } 6498 #endif /* INET6 */ 6499 6500 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6501 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6502 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6503 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6504 6505 #ifdef INET 6506 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6507 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6508 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6509 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6510 #endif 6511 #ifdef INET6 6512 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6513 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6514 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6515 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6516 #endif 6517 6518 static void 6519 hook_pf_eth(void) 6520 { 6521 struct pfil_hook_args pha; 6522 struct pfil_link_args pla; 6523 int ret __diagused; 6524 6525 if (V_pf_pfil_eth_hooked) 6526 return; 6527 6528 pha.pa_version = PFIL_VERSION; 6529 pha.pa_modname = "pf"; 6530 pha.pa_ruleset = NULL; 6531 6532 pla.pa_version = PFIL_VERSION; 6533 6534 pha.pa_type = PFIL_TYPE_ETHERNET; 6535 pha.pa_func = pf_eth_check_in; 6536 pha.pa_flags = PFIL_IN; 6537 pha.pa_rulname = "eth-in"; 6538 V_pf_eth_in_hook = pfil_add_hook(&pha); 6539 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6540 pla.pa_head = V_link_pfil_head; 6541 pla.pa_hook = V_pf_eth_in_hook; 6542 ret = pfil_link(&pla); 6543 MPASS(ret == 0); 6544 pha.pa_func = pf_eth_check_out; 6545 pha.pa_flags = PFIL_OUT; 6546 pha.pa_rulname = "eth-out"; 6547 V_pf_eth_out_hook = pfil_add_hook(&pha); 6548 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6549 pla.pa_head = V_link_pfil_head; 6550 pla.pa_hook = V_pf_eth_out_hook; 6551 ret = pfil_link(&pla); 6552 MPASS(ret == 0); 6553 6554 V_pf_pfil_eth_hooked = 1; 6555 } 6556 6557 static void 6558 hook_pf(void) 6559 { 6560 struct pfil_hook_args pha; 6561 struct pfil_link_args pla; 6562 int ret __diagused; 6563 6564 if (V_pf_pfil_hooked) 6565 return; 6566 6567 pha.pa_version = PFIL_VERSION; 6568 pha.pa_modname = "pf"; 6569 pha.pa_ruleset = NULL; 6570 6571 pla.pa_version = PFIL_VERSION; 6572 6573 #ifdef INET 6574 pha.pa_type = PFIL_TYPE_IP4; 6575 pha.pa_func = pf_check_in; 6576 pha.pa_flags = PFIL_IN; 6577 pha.pa_rulname = "default-in"; 6578 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6579 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6580 pla.pa_head = V_inet_pfil_head; 6581 pla.pa_hook = V_pf_ip4_in_hook; 6582 ret = pfil_link(&pla); 6583 MPASS(ret == 0); 6584 pha.pa_func = pf_check_out; 6585 pha.pa_flags = PFIL_OUT; 6586 pha.pa_rulname = "default-out"; 6587 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6588 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6589 pla.pa_head = V_inet_pfil_head; 6590 pla.pa_hook = V_pf_ip4_out_hook; 6591 ret = pfil_link(&pla); 6592 MPASS(ret == 0); 6593 #endif 6594 #ifdef INET6 6595 pha.pa_type = PFIL_TYPE_IP6; 6596 pha.pa_func = pf_check6_in; 6597 pha.pa_flags = PFIL_IN; 6598 pha.pa_rulname = "default-in6"; 6599 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6600 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6601 pla.pa_head = V_inet6_pfil_head; 6602 pla.pa_hook = V_pf_ip6_in_hook; 6603 ret = pfil_link(&pla); 6604 MPASS(ret == 0); 6605 pha.pa_func = pf_check6_out; 6606 pha.pa_rulname = "default-out6"; 6607 pha.pa_flags = PFIL_OUT; 6608 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6609 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6610 pla.pa_head = V_inet6_pfil_head; 6611 pla.pa_hook = V_pf_ip6_out_hook; 6612 ret = pfil_link(&pla); 6613 MPASS(ret == 0); 6614 #endif 6615 6616 V_pf_pfil_hooked = 1; 6617 } 6618 6619 static void 6620 dehook_pf_eth(void) 6621 { 6622 6623 if (V_pf_pfil_eth_hooked == 0) 6624 return; 6625 6626 pfil_remove_hook(V_pf_eth_in_hook); 6627 pfil_remove_hook(V_pf_eth_out_hook); 6628 6629 V_pf_pfil_eth_hooked = 0; 6630 } 6631 6632 static void 6633 dehook_pf(void) 6634 { 6635 6636 if (V_pf_pfil_hooked == 0) 6637 return; 6638 6639 #ifdef INET 6640 pfil_remove_hook(V_pf_ip4_in_hook); 6641 pfil_remove_hook(V_pf_ip4_out_hook); 6642 #endif 6643 #ifdef INET6 6644 pfil_remove_hook(V_pf_ip6_in_hook); 6645 pfil_remove_hook(V_pf_ip6_out_hook); 6646 #endif 6647 6648 V_pf_pfil_hooked = 0; 6649 } 6650 6651 static void 6652 pf_load_vnet(void) 6653 { 6654 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6655 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6656 6657 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6658 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6659 #ifdef ALTQ 6660 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6661 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6662 #endif 6663 6664 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6665 6666 pfattach_vnet(); 6667 V_pf_vnet_active = 1; 6668 } 6669 6670 static int 6671 pf_load(void) 6672 { 6673 int error; 6674 6675 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 6676 sx_init(&pf_ioctl_lock, "pf ioctl"); 6677 sx_init(&pf_end_lock, "pf end thread"); 6678 6679 pf_mtag_initialize(); 6680 6681 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6682 if (pf_dev == NULL) 6683 return (ENOMEM); 6684 6685 pf_end_threads = 0; 6686 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6687 if (error != 0) 6688 return (error); 6689 6690 pfi_initialize(); 6691 6692 return (0); 6693 } 6694 6695 static void 6696 pf_unload_vnet(void) 6697 { 6698 int ret __diagused; 6699 6700 V_pf_vnet_active = 0; 6701 V_pf_status.running = 0; 6702 dehook_pf(); 6703 dehook_pf_eth(); 6704 6705 PF_RULES_WLOCK(); 6706 pf_syncookies_cleanup(); 6707 shutdown_pf(); 6708 PF_RULES_WUNLOCK(); 6709 6710 /* Make sure we've cleaned up ethernet rules before we continue. */ 6711 NET_EPOCH_DRAIN_CALLBACKS(); 6712 6713 ret = swi_remove(V_pf_swi_cookie); 6714 MPASS(ret == 0); 6715 ret = intr_event_destroy(V_pf_swi_ie); 6716 MPASS(ret == 0); 6717 6718 pf_unload_vnet_purge(); 6719 6720 pf_normalize_cleanup(); 6721 PF_RULES_WLOCK(); 6722 pfi_cleanup_vnet(); 6723 PF_RULES_WUNLOCK(); 6724 pfr_cleanup(); 6725 pf_osfp_flush(); 6726 pf_cleanup(); 6727 if (IS_DEFAULT_VNET(curvnet)) 6728 pf_mtag_cleanup(); 6729 6730 pf_cleanup_tagset(&V_pf_tags); 6731 #ifdef ALTQ 6732 pf_cleanup_tagset(&V_pf_qids); 6733 #endif 6734 uma_zdestroy(V_pf_tag_z); 6735 6736 #ifdef PF_WANT_32_TO_64_COUNTER 6737 PF_RULES_WLOCK(); 6738 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6739 6740 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6741 MPASS(V_pf_allkifcount == 0); 6742 6743 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6744 V_pf_allrulecount--; 6745 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6746 6747 /* 6748 * There are known pf rule leaks when running the test suite. 6749 */ 6750 #ifdef notyet 6751 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6752 MPASS(V_pf_allrulecount == 0); 6753 #endif 6754 6755 PF_RULES_WUNLOCK(); 6756 6757 free(V_pf_kifmarker, PFI_MTYPE); 6758 free(V_pf_rulemarker, M_PFRULE); 6759 #endif 6760 6761 /* Free counters last as we updated them during shutdown. */ 6762 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6763 for (int i = 0; i < 2; i++) { 6764 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6765 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6766 } 6767 counter_u64_free(V_pf_default_rule.states_cur); 6768 counter_u64_free(V_pf_default_rule.states_tot); 6769 counter_u64_free(V_pf_default_rule.src_nodes); 6770 uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp); 6771 6772 for (int i = 0; i < PFRES_MAX; i++) 6773 counter_u64_free(V_pf_status.counters[i]); 6774 for (int i = 0; i < KLCNT_MAX; i++) 6775 counter_u64_free(V_pf_status.lcounters[i]); 6776 for (int i = 0; i < FCNT_MAX; i++) 6777 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6778 for (int i = 0; i < SCNT_MAX; i++) 6779 counter_u64_free(V_pf_status.scounters[i]); 6780 } 6781 6782 static void 6783 pf_unload(void) 6784 { 6785 6786 sx_xlock(&pf_end_lock); 6787 pf_end_threads = 1; 6788 while (pf_end_threads < 2) { 6789 wakeup_one(pf_purge_thread); 6790 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6791 } 6792 sx_xunlock(&pf_end_lock); 6793 6794 if (pf_dev != NULL) 6795 destroy_dev(pf_dev); 6796 6797 pfi_cleanup(); 6798 6799 rm_destroy(&pf_rules_lock); 6800 sx_destroy(&pf_ioctl_lock); 6801 sx_destroy(&pf_end_lock); 6802 } 6803 6804 static void 6805 vnet_pf_init(void *unused __unused) 6806 { 6807 6808 pf_load_vnet(); 6809 } 6810 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6811 vnet_pf_init, NULL); 6812 6813 static void 6814 vnet_pf_uninit(const void *unused __unused) 6815 { 6816 6817 pf_unload_vnet(); 6818 } 6819 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6820 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6821 vnet_pf_uninit, NULL); 6822 6823 static int 6824 pf_modevent(module_t mod, int type, void *data) 6825 { 6826 int error = 0; 6827 6828 switch(type) { 6829 case MOD_LOAD: 6830 error = pf_load(); 6831 break; 6832 case MOD_UNLOAD: 6833 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6834 * the vnet_pf_uninit()s */ 6835 break; 6836 default: 6837 error = EINVAL; 6838 break; 6839 } 6840 6841 return (error); 6842 } 6843 6844 static moduledata_t pf_mod = { 6845 "pf", 6846 pf_modevent, 6847 0 6848 }; 6849 6850 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6851 MODULE_VERSION(pf, PF_MODVER); 6852