1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROVIDER_DECLARE(pf); 98 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 99 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 101 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 102 103 static struct pf_kpool *pf_get_kpool(char *, u_int32_t, u_int8_t, u_int32_t, 104 u_int8_t, u_int8_t, u_int8_t); 105 106 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 107 static void pf_empty_kpool(struct pf_kpalist *); 108 static int pfioctl(struct cdev *, u_long, caddr_t, int, 109 struct thread *); 110 #ifdef ALTQ 111 static int pf_begin_altq(u_int32_t *); 112 static int pf_rollback_altq(u_int32_t); 113 static int pf_commit_altq(u_int32_t); 114 static int pf_enable_altq(struct pf_altq *); 115 static int pf_disable_altq(struct pf_altq *); 116 static u_int32_t pf_qname2qid(char *); 117 static void pf_qid_unref(u_int32_t); 118 #endif /* ALTQ */ 119 static int pf_begin_rules(u_int32_t *, int, const char *); 120 static int pf_rollback_rules(u_int32_t, int, char *); 121 static int pf_setup_pfsync_matching(struct pf_kruleset *); 122 static void pf_hash_rule(MD5_CTX *, struct pf_krule *); 123 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 124 static int pf_commit_rules(u_int32_t, int, char *); 125 static int pf_addr_setup(struct pf_kruleset *, 126 struct pf_addr_wrap *, sa_family_t); 127 static void pf_addr_copyout(struct pf_addr_wrap *); 128 static void pf_src_node_copy(const struct pf_ksrc_node *, 129 struct pf_src_node *); 130 #ifdef ALTQ 131 static int pf_export_kaltq(struct pf_altq *, 132 struct pfioc_altq_v1 *, size_t); 133 static int pf_import_kaltq(struct pfioc_altq_v1 *, 134 struct pf_altq *, size_t); 135 #endif /* ALTQ */ 136 137 VNET_DEFINE(struct pf_krule, pf_default_rule); 138 139 #ifdef ALTQ 140 VNET_DEFINE_STATIC(int, pf_altq_running); 141 #define V_pf_altq_running VNET(pf_altq_running) 142 #endif 143 144 #define TAGID_MAX 50000 145 struct pf_tagname { 146 TAILQ_ENTRY(pf_tagname) namehash_entries; 147 TAILQ_ENTRY(pf_tagname) taghash_entries; 148 char name[PF_TAG_NAME_SIZE]; 149 uint16_t tag; 150 int ref; 151 }; 152 153 struct pf_tagset { 154 TAILQ_HEAD(, pf_tagname) *namehash; 155 TAILQ_HEAD(, pf_tagname) *taghash; 156 unsigned int mask; 157 uint32_t seed; 158 BITSET_DEFINE(, TAGID_MAX) avail; 159 }; 160 161 VNET_DEFINE(struct pf_tagset, pf_tags); 162 #define V_pf_tags VNET(pf_tags) 163 static unsigned int pf_rule_tag_hashsize; 164 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 165 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 166 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 167 "Size of pf(4) rule tag hashtable"); 168 169 #ifdef ALTQ 170 VNET_DEFINE(struct pf_tagset, pf_qids); 171 #define V_pf_qids VNET(pf_qids) 172 static unsigned int pf_queue_tag_hashsize; 173 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 174 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 175 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 176 "Size of pf(4) queue tag hashtable"); 177 #endif 178 VNET_DEFINE(uma_zone_t, pf_tag_z); 179 #define V_pf_tag_z VNET(pf_tag_z) 180 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 181 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 182 183 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 184 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 185 #endif 186 187 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 188 unsigned int); 189 static void pf_cleanup_tagset(struct pf_tagset *); 190 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 191 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 192 static u_int16_t tagname2tag(struct pf_tagset *, char *); 193 static u_int16_t pf_tagname2tag(char *); 194 static void tag_unref(struct pf_tagset *, u_int16_t); 195 196 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 197 198 struct cdev *pf_dev; 199 200 /* 201 * XXX - These are new and need to be checked when moveing to a new version 202 */ 203 static void pf_clear_all_states(void); 204 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 205 static int pf_killstates(struct pf_kstate_kill *, 206 unsigned int *); 207 static int pf_killstates_row(struct pf_kstate_kill *, 208 struct pf_idhash *); 209 static int pf_killstates_nv(struct pfioc_nv *); 210 static int pf_clearstates_nv(struct pfioc_nv *); 211 static int pf_getstate(struct pfioc_nv *); 212 static int pf_getstates(struct pfioc_nv *); 213 static int pf_clear_tables(void); 214 static void pf_clear_srcnodes(struct pf_ksrc_node *); 215 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 216 static int pf_keepcounters(struct pfioc_nv *); 217 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 218 219 /* 220 * Wrapper functions for pfil(9) hooks 221 */ 222 #ifdef INET 223 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 224 int flags, void *ruleset __unused, struct inpcb *inp); 225 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 226 int flags, void *ruleset __unused, struct inpcb *inp); 227 #endif 228 #ifdef INET6 229 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 230 int flags, void *ruleset __unused, struct inpcb *inp); 231 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 232 int flags, void *ruleset __unused, struct inpcb *inp); 233 #endif 234 235 static void hook_pf(void); 236 static void dehook_pf(void); 237 static int shutdown_pf(void); 238 static int pf_load(void); 239 static void pf_unload(void); 240 241 static struct cdevsw pf_cdevsw = { 242 .d_ioctl = pfioctl, 243 .d_name = PF_NAME, 244 .d_version = D_VERSION, 245 }; 246 247 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 248 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 249 250 /* 251 * We need a flag that is neither hooked nor running to know when 252 * the VNET is "valid". We primarily need this to control (global) 253 * external event, e.g., eventhandlers. 254 */ 255 VNET_DEFINE(int, pf_vnet_active); 256 #define V_pf_vnet_active VNET(pf_vnet_active) 257 258 int pf_end_threads; 259 struct proc *pf_purge_proc; 260 261 struct rmlock pf_rules_lock; 262 struct sx pf_ioctl_lock; 263 struct sx pf_end_lock; 264 265 /* pfsync */ 266 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 267 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 268 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 269 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 270 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 271 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 272 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 273 274 /* pflog */ 275 pflog_packet_t *pflog_packet_ptr = NULL; 276 277 extern u_long pf_ioctl_maxcount; 278 279 static void 280 pfattach_vnet(void) 281 { 282 u_int32_t *my_timeout = V_pf_default_rule.timeout; 283 284 pf_initialize(); 285 pfr_initialize(); 286 pfi_initialize_vnet(); 287 pf_normalize_init(); 288 289 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 290 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 291 292 RB_INIT(&V_pf_anchors); 293 pf_init_kruleset(&pf_main_ruleset); 294 295 /* default rule should never be garbage collected */ 296 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 297 #ifdef PF_DEFAULT_TO_DROP 298 V_pf_default_rule.action = PF_DROP; 299 #else 300 V_pf_default_rule.action = PF_PASS; 301 #endif 302 V_pf_default_rule.nr = -1; 303 V_pf_default_rule.rtableid = -1; 304 305 V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK); 306 for (int i = 0; i < 2; i++) { 307 V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK); 308 V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK); 309 } 310 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 311 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 312 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 313 314 /* initialize default timeouts */ 315 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 316 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 317 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 318 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 319 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 320 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 321 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 322 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 323 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 324 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 325 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 326 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 327 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 328 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 329 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 330 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 331 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 332 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 333 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 334 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 335 336 bzero(&V_pf_status, sizeof(V_pf_status)); 337 V_pf_status.debug = PF_DEBUG_URGENT; 338 339 V_pf_pfil_hooked = 0; 340 341 /* XXX do our best to avoid a conflict */ 342 V_pf_status.hostid = arc4random(); 343 344 for (int i = 0; i < PFRES_MAX; i++) 345 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 346 for (int i = 0; i < LCNT_MAX; i++) 347 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 348 for (int i = 0; i < FCNT_MAX; i++) 349 V_pf_status.fcounters[i] = counter_u64_alloc(M_WAITOK); 350 for (int i = 0; i < SCNT_MAX; i++) 351 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 352 353 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 354 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 355 /* XXXGL: leaked all above. */ 356 return; 357 } 358 359 static struct pf_kpool * 360 pf_get_kpool(char *anchor, u_int32_t ticket, u_int8_t rule_action, 361 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 362 u_int8_t check_ticket) 363 { 364 struct pf_kruleset *ruleset; 365 struct pf_krule *rule; 366 int rs_num; 367 368 ruleset = pf_find_kruleset(anchor); 369 if (ruleset == NULL) 370 return (NULL); 371 rs_num = pf_get_ruleset_number(rule_action); 372 if (rs_num >= PF_RULESET_MAX) 373 return (NULL); 374 if (active) { 375 if (check_ticket && ticket != 376 ruleset->rules[rs_num].active.ticket) 377 return (NULL); 378 if (r_last) 379 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 380 pf_krulequeue); 381 else 382 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 383 } else { 384 if (check_ticket && ticket != 385 ruleset->rules[rs_num].inactive.ticket) 386 return (NULL); 387 if (r_last) 388 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 389 pf_krulequeue); 390 else 391 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 392 } 393 if (!r_last) { 394 while ((rule != NULL) && (rule->nr != rule_number)) 395 rule = TAILQ_NEXT(rule, entries); 396 } 397 if (rule == NULL) 398 return (NULL); 399 400 return (&rule->rpool); 401 } 402 403 static void 404 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 405 { 406 struct pf_kpooladdr *mv_pool_pa; 407 408 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 409 TAILQ_REMOVE(poola, mv_pool_pa, entries); 410 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 411 } 412 } 413 414 static void 415 pf_empty_kpool(struct pf_kpalist *poola) 416 { 417 struct pf_kpooladdr *pa; 418 419 while ((pa = TAILQ_FIRST(poola)) != NULL) { 420 switch (pa->addr.type) { 421 case PF_ADDR_DYNIFTL: 422 pfi_dynaddr_remove(pa->addr.p.dyn); 423 break; 424 case PF_ADDR_TABLE: 425 /* XXX: this could be unfinished pooladdr on pabuf */ 426 if (pa->addr.p.tbl != NULL) 427 pfr_detach_table(pa->addr.p.tbl); 428 break; 429 } 430 if (pa->kif) 431 pfi_kkif_unref(pa->kif); 432 TAILQ_REMOVE(poola, pa, entries); 433 free(pa, M_PFRULE); 434 } 435 } 436 437 static void 438 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 439 { 440 441 PF_RULES_WASSERT(); 442 443 TAILQ_REMOVE(rulequeue, rule, entries); 444 445 PF_UNLNKDRULES_LOCK(); 446 rule->rule_ref |= PFRULE_REFS; 447 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 448 PF_UNLNKDRULES_UNLOCK(); 449 } 450 451 void 452 pf_free_rule(struct pf_krule *rule) 453 { 454 455 PF_RULES_WASSERT(); 456 457 if (rule->tag) 458 tag_unref(&V_pf_tags, rule->tag); 459 if (rule->match_tag) 460 tag_unref(&V_pf_tags, rule->match_tag); 461 #ifdef ALTQ 462 if (rule->pqid != rule->qid) 463 pf_qid_unref(rule->pqid); 464 pf_qid_unref(rule->qid); 465 #endif 466 switch (rule->src.addr.type) { 467 case PF_ADDR_DYNIFTL: 468 pfi_dynaddr_remove(rule->src.addr.p.dyn); 469 break; 470 case PF_ADDR_TABLE: 471 pfr_detach_table(rule->src.addr.p.tbl); 472 break; 473 } 474 switch (rule->dst.addr.type) { 475 case PF_ADDR_DYNIFTL: 476 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 477 break; 478 case PF_ADDR_TABLE: 479 pfr_detach_table(rule->dst.addr.p.tbl); 480 break; 481 } 482 if (rule->overload_tbl) 483 pfr_detach_table(rule->overload_tbl); 484 if (rule->kif) 485 pfi_kkif_unref(rule->kif); 486 pf_kanchor_remove(rule); 487 pf_empty_kpool(&rule->rpool.list); 488 489 pf_krule_free(rule); 490 } 491 492 static void 493 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 494 unsigned int default_size) 495 { 496 unsigned int i; 497 unsigned int hashsize; 498 499 if (*tunable_size == 0 || !powerof2(*tunable_size)) 500 *tunable_size = default_size; 501 502 hashsize = *tunable_size; 503 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 504 M_WAITOK); 505 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 506 M_WAITOK); 507 ts->mask = hashsize - 1; 508 ts->seed = arc4random(); 509 for (i = 0; i < hashsize; i++) { 510 TAILQ_INIT(&ts->namehash[i]); 511 TAILQ_INIT(&ts->taghash[i]); 512 } 513 BIT_FILL(TAGID_MAX, &ts->avail); 514 } 515 516 static void 517 pf_cleanup_tagset(struct pf_tagset *ts) 518 { 519 unsigned int i; 520 unsigned int hashsize; 521 struct pf_tagname *t, *tmp; 522 523 /* 524 * Only need to clean up one of the hashes as each tag is hashed 525 * into each table. 526 */ 527 hashsize = ts->mask + 1; 528 for (i = 0; i < hashsize; i++) 529 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 530 uma_zfree(V_pf_tag_z, t); 531 532 free(ts->namehash, M_PFHASH); 533 free(ts->taghash, M_PFHASH); 534 } 535 536 static uint16_t 537 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 538 { 539 size_t len; 540 541 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 542 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 543 } 544 545 static uint16_t 546 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 547 { 548 549 return (tag & ts->mask); 550 } 551 552 static u_int16_t 553 tagname2tag(struct pf_tagset *ts, char *tagname) 554 { 555 struct pf_tagname *tag; 556 u_int32_t index; 557 u_int16_t new_tagid; 558 559 PF_RULES_WASSERT(); 560 561 index = tagname2hashindex(ts, tagname); 562 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 563 if (strcmp(tagname, tag->name) == 0) { 564 tag->ref++; 565 return (tag->tag); 566 } 567 568 /* 569 * new entry 570 * 571 * to avoid fragmentation, we do a linear search from the beginning 572 * and take the first free slot we find. 573 */ 574 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 575 /* 576 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 577 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 578 * set. It may also return a bit number greater than TAGID_MAX due 579 * to rounding of the number of bits in the vector up to a multiple 580 * of the vector word size at declaration/allocation time. 581 */ 582 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 583 return (0); 584 585 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 586 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 587 588 /* allocate and fill new struct pf_tagname */ 589 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 590 if (tag == NULL) 591 return (0); 592 strlcpy(tag->name, tagname, sizeof(tag->name)); 593 tag->tag = new_tagid; 594 tag->ref = 1; 595 596 /* Insert into namehash */ 597 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 598 599 /* Insert into taghash */ 600 index = tag2hashindex(ts, new_tagid); 601 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 602 603 return (tag->tag); 604 } 605 606 static void 607 tag_unref(struct pf_tagset *ts, u_int16_t tag) 608 { 609 struct pf_tagname *t; 610 uint16_t index; 611 612 PF_RULES_WASSERT(); 613 614 index = tag2hashindex(ts, tag); 615 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 616 if (tag == t->tag) { 617 if (--t->ref == 0) { 618 TAILQ_REMOVE(&ts->taghash[index], t, 619 taghash_entries); 620 index = tagname2hashindex(ts, t->name); 621 TAILQ_REMOVE(&ts->namehash[index], t, 622 namehash_entries); 623 /* Bits are 0-based for BIT_SET() */ 624 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 625 uma_zfree(V_pf_tag_z, t); 626 } 627 break; 628 } 629 } 630 631 static u_int16_t 632 pf_tagname2tag(char *tagname) 633 { 634 return (tagname2tag(&V_pf_tags, tagname)); 635 } 636 637 #ifdef ALTQ 638 static u_int32_t 639 pf_qname2qid(char *qname) 640 { 641 return ((u_int32_t)tagname2tag(&V_pf_qids, qname)); 642 } 643 644 static void 645 pf_qid_unref(u_int32_t qid) 646 { 647 tag_unref(&V_pf_qids, (u_int16_t)qid); 648 } 649 650 static int 651 pf_begin_altq(u_int32_t *ticket) 652 { 653 struct pf_altq *altq, *tmp; 654 int error = 0; 655 656 PF_RULES_WASSERT(); 657 658 /* Purge the old altq lists */ 659 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 660 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 661 /* detach and destroy the discipline */ 662 error = altq_remove(altq); 663 } 664 free(altq, M_PFALTQ); 665 } 666 TAILQ_INIT(V_pf_altq_ifs_inactive); 667 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 668 pf_qid_unref(altq->qid); 669 free(altq, M_PFALTQ); 670 } 671 TAILQ_INIT(V_pf_altqs_inactive); 672 if (error) 673 return (error); 674 *ticket = ++V_ticket_altqs_inactive; 675 V_altqs_inactive_open = 1; 676 return (0); 677 } 678 679 static int 680 pf_rollback_altq(u_int32_t ticket) 681 { 682 struct pf_altq *altq, *tmp; 683 int error = 0; 684 685 PF_RULES_WASSERT(); 686 687 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 688 return (0); 689 /* Purge the old altq lists */ 690 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 691 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 692 /* detach and destroy the discipline */ 693 error = altq_remove(altq); 694 } 695 free(altq, M_PFALTQ); 696 } 697 TAILQ_INIT(V_pf_altq_ifs_inactive); 698 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 699 pf_qid_unref(altq->qid); 700 free(altq, M_PFALTQ); 701 } 702 TAILQ_INIT(V_pf_altqs_inactive); 703 V_altqs_inactive_open = 0; 704 return (error); 705 } 706 707 static int 708 pf_commit_altq(u_int32_t ticket) 709 { 710 struct pf_altqqueue *old_altqs, *old_altq_ifs; 711 struct pf_altq *altq, *tmp; 712 int err, error = 0; 713 714 PF_RULES_WASSERT(); 715 716 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 717 return (EBUSY); 718 719 /* swap altqs, keep the old. */ 720 old_altqs = V_pf_altqs_active; 721 old_altq_ifs = V_pf_altq_ifs_active; 722 V_pf_altqs_active = V_pf_altqs_inactive; 723 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 724 V_pf_altqs_inactive = old_altqs; 725 V_pf_altq_ifs_inactive = old_altq_ifs; 726 V_ticket_altqs_active = V_ticket_altqs_inactive; 727 728 /* Attach new disciplines */ 729 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 730 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 731 /* attach the discipline */ 732 error = altq_pfattach(altq); 733 if (error == 0 && V_pf_altq_running) 734 error = pf_enable_altq(altq); 735 if (error != 0) 736 return (error); 737 } 738 } 739 740 /* Purge the old altq lists */ 741 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 742 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 743 /* detach and destroy the discipline */ 744 if (V_pf_altq_running) 745 error = pf_disable_altq(altq); 746 err = altq_pfdetach(altq); 747 if (err != 0 && error == 0) 748 error = err; 749 err = altq_remove(altq); 750 if (err != 0 && error == 0) 751 error = err; 752 } 753 free(altq, M_PFALTQ); 754 } 755 TAILQ_INIT(V_pf_altq_ifs_inactive); 756 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 757 pf_qid_unref(altq->qid); 758 free(altq, M_PFALTQ); 759 } 760 TAILQ_INIT(V_pf_altqs_inactive); 761 762 V_altqs_inactive_open = 0; 763 return (error); 764 } 765 766 static int 767 pf_enable_altq(struct pf_altq *altq) 768 { 769 struct ifnet *ifp; 770 struct tb_profile tb; 771 int error = 0; 772 773 if ((ifp = ifunit(altq->ifname)) == NULL) 774 return (EINVAL); 775 776 if (ifp->if_snd.altq_type != ALTQT_NONE) 777 error = altq_enable(&ifp->if_snd); 778 779 /* set tokenbucket regulator */ 780 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 781 tb.rate = altq->ifbandwidth; 782 tb.depth = altq->tbrsize; 783 error = tbr_set(&ifp->if_snd, &tb); 784 } 785 786 return (error); 787 } 788 789 static int 790 pf_disable_altq(struct pf_altq *altq) 791 { 792 struct ifnet *ifp; 793 struct tb_profile tb; 794 int error; 795 796 if ((ifp = ifunit(altq->ifname)) == NULL) 797 return (EINVAL); 798 799 /* 800 * when the discipline is no longer referenced, it was overridden 801 * by a new one. if so, just return. 802 */ 803 if (altq->altq_disc != ifp->if_snd.altq_disc) 804 return (0); 805 806 error = altq_disable(&ifp->if_snd); 807 808 if (error == 0) { 809 /* clear tokenbucket regulator */ 810 tb.rate = 0; 811 error = tbr_set(&ifp->if_snd, &tb); 812 } 813 814 return (error); 815 } 816 817 static int 818 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 819 struct pf_altq *altq) 820 { 821 struct ifnet *ifp1; 822 int error = 0; 823 824 /* Deactivate the interface in question */ 825 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 826 if ((ifp1 = ifunit(altq->ifname)) == NULL || 827 (remove && ifp1 == ifp)) { 828 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 829 } else { 830 error = altq_add(ifp1, altq); 831 832 if (ticket != V_ticket_altqs_inactive) 833 error = EBUSY; 834 835 if (error) 836 free(altq, M_PFALTQ); 837 } 838 839 return (error); 840 } 841 842 void 843 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 844 { 845 struct pf_altq *a1, *a2, *a3; 846 u_int32_t ticket; 847 int error = 0; 848 849 /* 850 * No need to re-evaluate the configuration for events on interfaces 851 * that do not support ALTQ, as it's not possible for such 852 * interfaces to be part of the configuration. 853 */ 854 if (!ALTQ_IS_READY(&ifp->if_snd)) 855 return; 856 857 /* Interrupt userland queue modifications */ 858 if (V_altqs_inactive_open) 859 pf_rollback_altq(V_ticket_altqs_inactive); 860 861 /* Start new altq ruleset */ 862 if (pf_begin_altq(&ticket)) 863 return; 864 865 /* Copy the current active set */ 866 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 867 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 868 if (a2 == NULL) { 869 error = ENOMEM; 870 break; 871 } 872 bcopy(a1, a2, sizeof(struct pf_altq)); 873 874 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 875 if (error) 876 break; 877 878 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 879 } 880 if (error) 881 goto out; 882 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 883 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 884 if (a2 == NULL) { 885 error = ENOMEM; 886 break; 887 } 888 bcopy(a1, a2, sizeof(struct pf_altq)); 889 890 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 891 error = EBUSY; 892 free(a2, M_PFALTQ); 893 break; 894 } 895 a2->altq_disc = NULL; 896 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 897 if (strncmp(a3->ifname, a2->ifname, 898 IFNAMSIZ) == 0) { 899 a2->altq_disc = a3->altq_disc; 900 break; 901 } 902 } 903 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 904 if (error) 905 break; 906 907 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 908 } 909 910 out: 911 if (error != 0) 912 pf_rollback_altq(ticket); 913 else 914 pf_commit_altq(ticket); 915 } 916 #endif /* ALTQ */ 917 918 static int 919 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 920 { 921 struct pf_kruleset *rs; 922 struct pf_krule *rule; 923 924 PF_RULES_WASSERT(); 925 926 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 927 return (EINVAL); 928 rs = pf_find_or_create_kruleset(anchor); 929 if (rs == NULL) 930 return (EINVAL); 931 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 932 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 933 rs->rules[rs_num].inactive.rcount--; 934 } 935 *ticket = ++rs->rules[rs_num].inactive.ticket; 936 rs->rules[rs_num].inactive.open = 1; 937 return (0); 938 } 939 940 static int 941 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 942 { 943 struct pf_kruleset *rs; 944 struct pf_krule *rule; 945 946 PF_RULES_WASSERT(); 947 948 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 949 return (EINVAL); 950 rs = pf_find_kruleset(anchor); 951 if (rs == NULL || !rs->rules[rs_num].inactive.open || 952 rs->rules[rs_num].inactive.ticket != ticket) 953 return (0); 954 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 955 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 956 rs->rules[rs_num].inactive.rcount--; 957 } 958 rs->rules[rs_num].inactive.open = 0; 959 return (0); 960 } 961 962 #define PF_MD5_UPD(st, elm) \ 963 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 964 965 #define PF_MD5_UPD_STR(st, elm) \ 966 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 967 968 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 969 (stor) = htonl((st)->elm); \ 970 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 971 } while (0) 972 973 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 974 (stor) = htons((st)->elm); \ 975 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 976 } while (0) 977 978 static void 979 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 980 { 981 PF_MD5_UPD(pfr, addr.type); 982 switch (pfr->addr.type) { 983 case PF_ADDR_DYNIFTL: 984 PF_MD5_UPD(pfr, addr.v.ifname); 985 PF_MD5_UPD(pfr, addr.iflags); 986 break; 987 case PF_ADDR_TABLE: 988 PF_MD5_UPD(pfr, addr.v.tblname); 989 break; 990 case PF_ADDR_ADDRMASK: 991 /* XXX ignore af? */ 992 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 993 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 994 break; 995 } 996 997 PF_MD5_UPD(pfr, port[0]); 998 PF_MD5_UPD(pfr, port[1]); 999 PF_MD5_UPD(pfr, neg); 1000 PF_MD5_UPD(pfr, port_op); 1001 } 1002 1003 static void 1004 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule) 1005 { 1006 u_int16_t x; 1007 u_int32_t y; 1008 1009 pf_hash_rule_addr(ctx, &rule->src); 1010 pf_hash_rule_addr(ctx, &rule->dst); 1011 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1012 PF_MD5_UPD_STR(rule, label[i]); 1013 PF_MD5_UPD_STR(rule, ifname); 1014 PF_MD5_UPD_STR(rule, match_tagname); 1015 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1016 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1017 PF_MD5_UPD_HTONL(rule, prob, y); 1018 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1019 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1020 PF_MD5_UPD(rule, uid.op); 1021 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1022 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1023 PF_MD5_UPD(rule, gid.op); 1024 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1025 PF_MD5_UPD(rule, action); 1026 PF_MD5_UPD(rule, direction); 1027 PF_MD5_UPD(rule, af); 1028 PF_MD5_UPD(rule, quick); 1029 PF_MD5_UPD(rule, ifnot); 1030 PF_MD5_UPD(rule, match_tag_not); 1031 PF_MD5_UPD(rule, natpass); 1032 PF_MD5_UPD(rule, keep_state); 1033 PF_MD5_UPD(rule, proto); 1034 PF_MD5_UPD(rule, type); 1035 PF_MD5_UPD(rule, code); 1036 PF_MD5_UPD(rule, flags); 1037 PF_MD5_UPD(rule, flagset); 1038 PF_MD5_UPD(rule, allow_opts); 1039 PF_MD5_UPD(rule, rt); 1040 PF_MD5_UPD(rule, tos); 1041 } 1042 1043 static bool 1044 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1045 { 1046 MD5_CTX ctx[2]; 1047 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH]; 1048 1049 MD5Init(&ctx[0]); 1050 MD5Init(&ctx[1]); 1051 pf_hash_rule(&ctx[0], a); 1052 pf_hash_rule(&ctx[1], b); 1053 MD5Final(digest[0], &ctx[0]); 1054 MD5Final(digest[1], &ctx[1]); 1055 1056 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0); 1057 } 1058 1059 static int 1060 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1061 { 1062 struct pf_kruleset *rs; 1063 struct pf_krule *rule, **old_array, *tail; 1064 struct pf_krulequeue *old_rules; 1065 int error; 1066 u_int32_t old_rcount; 1067 1068 PF_RULES_WASSERT(); 1069 1070 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1071 return (EINVAL); 1072 rs = pf_find_kruleset(anchor); 1073 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1074 ticket != rs->rules[rs_num].inactive.ticket) 1075 return (EBUSY); 1076 1077 /* Calculate checksum for the main ruleset */ 1078 if (rs == &pf_main_ruleset) { 1079 error = pf_setup_pfsync_matching(rs); 1080 if (error != 0) 1081 return (error); 1082 } 1083 1084 /* Swap rules, keep the old. */ 1085 old_rules = rs->rules[rs_num].active.ptr; 1086 old_rcount = rs->rules[rs_num].active.rcount; 1087 old_array = rs->rules[rs_num].active.ptr_array; 1088 1089 rs->rules[rs_num].active.ptr = 1090 rs->rules[rs_num].inactive.ptr; 1091 rs->rules[rs_num].active.ptr_array = 1092 rs->rules[rs_num].inactive.ptr_array; 1093 rs->rules[rs_num].active.rcount = 1094 rs->rules[rs_num].inactive.rcount; 1095 1096 /* Attempt to preserve counter information. */ 1097 if (V_pf_status.keep_counters) { 1098 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1099 entries) { 1100 tail = TAILQ_FIRST(old_rules); 1101 while ((tail != NULL) && ! pf_krule_compare(tail, rule)) 1102 tail = TAILQ_NEXT(tail, entries); 1103 if (tail != NULL) { 1104 counter_u64_add(rule->evaluations, 1105 counter_u64_fetch(tail->evaluations)); 1106 counter_u64_add(rule->packets[0], 1107 counter_u64_fetch(tail->packets[0])); 1108 counter_u64_add(rule->packets[1], 1109 counter_u64_fetch(tail->packets[1])); 1110 counter_u64_add(rule->bytes[0], 1111 counter_u64_fetch(tail->bytes[0])); 1112 counter_u64_add(rule->bytes[1], 1113 counter_u64_fetch(tail->bytes[1])); 1114 } 1115 } 1116 } 1117 1118 rs->rules[rs_num].inactive.ptr = old_rules; 1119 rs->rules[rs_num].inactive.ptr_array = old_array; 1120 rs->rules[rs_num].inactive.rcount = old_rcount; 1121 1122 rs->rules[rs_num].active.ticket = 1123 rs->rules[rs_num].inactive.ticket; 1124 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1125 1126 /* Purge the old rule list. */ 1127 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1128 pf_unlink_rule(old_rules, rule); 1129 if (rs->rules[rs_num].inactive.ptr_array) 1130 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1131 rs->rules[rs_num].inactive.ptr_array = NULL; 1132 rs->rules[rs_num].inactive.rcount = 0; 1133 rs->rules[rs_num].inactive.open = 0; 1134 pf_remove_if_empty_kruleset(rs); 1135 1136 return (0); 1137 } 1138 1139 static int 1140 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1141 { 1142 MD5_CTX ctx; 1143 struct pf_krule *rule; 1144 int rs_cnt; 1145 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1146 1147 MD5Init(&ctx); 1148 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1149 /* XXX PF_RULESET_SCRUB as well? */ 1150 if (rs_cnt == PF_RULESET_SCRUB) 1151 continue; 1152 1153 if (rs->rules[rs_cnt].inactive.ptr_array) 1154 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1155 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1156 1157 if (rs->rules[rs_cnt].inactive.rcount) { 1158 rs->rules[rs_cnt].inactive.ptr_array = 1159 malloc(sizeof(caddr_t) * 1160 rs->rules[rs_cnt].inactive.rcount, 1161 M_TEMP, M_NOWAIT); 1162 1163 if (!rs->rules[rs_cnt].inactive.ptr_array) 1164 return (ENOMEM); 1165 } 1166 1167 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1168 entries) { 1169 pf_hash_rule(&ctx, rule); 1170 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1171 } 1172 } 1173 1174 MD5Final(digest, &ctx); 1175 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1176 return (0); 1177 } 1178 1179 static int 1180 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1181 sa_family_t af) 1182 { 1183 int error = 0; 1184 1185 switch (addr->type) { 1186 case PF_ADDR_TABLE: 1187 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1188 if (addr->p.tbl == NULL) 1189 error = ENOMEM; 1190 break; 1191 case PF_ADDR_DYNIFTL: 1192 error = pfi_dynaddr_setup(addr, af); 1193 break; 1194 } 1195 1196 return (error); 1197 } 1198 1199 static void 1200 pf_addr_copyout(struct pf_addr_wrap *addr) 1201 { 1202 1203 switch (addr->type) { 1204 case PF_ADDR_DYNIFTL: 1205 pfi_dynaddr_copyout(addr); 1206 break; 1207 case PF_ADDR_TABLE: 1208 pf_tbladdr_copyout(addr); 1209 break; 1210 } 1211 } 1212 1213 static void 1214 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1215 { 1216 int secs = time_uptime, diff; 1217 1218 bzero(out, sizeof(struct pf_src_node)); 1219 1220 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1221 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1222 1223 if (in->rule.ptr != NULL) 1224 out->rule.nr = in->rule.ptr->nr; 1225 1226 for (int i = 0; i < 2; i++) { 1227 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1228 out->packets[i] = counter_u64_fetch(in->packets[i]); 1229 } 1230 1231 out->states = in->states; 1232 out->conn = in->conn; 1233 out->af = in->af; 1234 out->ruletype = in->ruletype; 1235 1236 out->creation = secs - in->creation; 1237 if (out->expire > secs) 1238 out->expire -= secs; 1239 else 1240 out->expire = 0; 1241 1242 /* Adjust the connection rate estimate. */ 1243 diff = secs - in->conn_rate.last; 1244 if (diff >= in->conn_rate.seconds) 1245 out->conn_rate.count = 0; 1246 else 1247 out->conn_rate.count -= 1248 in->conn_rate.count * diff / 1249 in->conn_rate.seconds; 1250 } 1251 1252 #ifdef ALTQ 1253 /* 1254 * Handle export of struct pf_kaltq to user binaries that may be using any 1255 * version of struct pf_altq. 1256 */ 1257 static int 1258 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1259 { 1260 u_int32_t version; 1261 1262 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1263 version = 0; 1264 else 1265 version = pa->version; 1266 1267 if (version > PFIOC_ALTQ_VERSION) 1268 return (EINVAL); 1269 1270 #define ASSIGN(x) exported_q->x = q->x 1271 #define COPY(x) \ 1272 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1273 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1274 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1275 1276 switch (version) { 1277 case 0: { 1278 struct pf_altq_v0 *exported_q = 1279 &((struct pfioc_altq_v0 *)pa)->altq; 1280 1281 COPY(ifname); 1282 1283 ASSIGN(scheduler); 1284 ASSIGN(tbrsize); 1285 exported_q->tbrsize = SATU16(q->tbrsize); 1286 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1287 1288 COPY(qname); 1289 COPY(parent); 1290 ASSIGN(parent_qid); 1291 exported_q->bandwidth = SATU32(q->bandwidth); 1292 ASSIGN(priority); 1293 ASSIGN(local_flags); 1294 1295 ASSIGN(qlimit); 1296 ASSIGN(flags); 1297 1298 if (q->scheduler == ALTQT_HFSC) { 1299 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1300 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1301 SATU32(q->pq_u.hfsc_opts.x) 1302 1303 ASSIGN_OPT_SATU32(rtsc_m1); 1304 ASSIGN_OPT(rtsc_d); 1305 ASSIGN_OPT_SATU32(rtsc_m2); 1306 1307 ASSIGN_OPT_SATU32(lssc_m1); 1308 ASSIGN_OPT(lssc_d); 1309 ASSIGN_OPT_SATU32(lssc_m2); 1310 1311 ASSIGN_OPT_SATU32(ulsc_m1); 1312 ASSIGN_OPT(ulsc_d); 1313 ASSIGN_OPT_SATU32(ulsc_m2); 1314 1315 ASSIGN_OPT(flags); 1316 1317 #undef ASSIGN_OPT 1318 #undef ASSIGN_OPT_SATU32 1319 } else 1320 COPY(pq_u); 1321 1322 ASSIGN(qid); 1323 break; 1324 } 1325 case 1: { 1326 struct pf_altq_v1 *exported_q = 1327 &((struct pfioc_altq_v1 *)pa)->altq; 1328 1329 COPY(ifname); 1330 1331 ASSIGN(scheduler); 1332 ASSIGN(tbrsize); 1333 ASSIGN(ifbandwidth); 1334 1335 COPY(qname); 1336 COPY(parent); 1337 ASSIGN(parent_qid); 1338 ASSIGN(bandwidth); 1339 ASSIGN(priority); 1340 ASSIGN(local_flags); 1341 1342 ASSIGN(qlimit); 1343 ASSIGN(flags); 1344 COPY(pq_u); 1345 1346 ASSIGN(qid); 1347 break; 1348 } 1349 default: 1350 panic("%s: unhandled struct pfioc_altq version", __func__); 1351 break; 1352 } 1353 1354 #undef ASSIGN 1355 #undef COPY 1356 #undef SATU16 1357 #undef SATU32 1358 1359 return (0); 1360 } 1361 1362 /* 1363 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1364 * that may be using any version of it. 1365 */ 1366 static int 1367 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1368 { 1369 u_int32_t version; 1370 1371 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1372 version = 0; 1373 else 1374 version = pa->version; 1375 1376 if (version > PFIOC_ALTQ_VERSION) 1377 return (EINVAL); 1378 1379 #define ASSIGN(x) q->x = imported_q->x 1380 #define COPY(x) \ 1381 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1382 1383 switch (version) { 1384 case 0: { 1385 struct pf_altq_v0 *imported_q = 1386 &((struct pfioc_altq_v0 *)pa)->altq; 1387 1388 COPY(ifname); 1389 1390 ASSIGN(scheduler); 1391 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1392 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1393 1394 COPY(qname); 1395 COPY(parent); 1396 ASSIGN(parent_qid); 1397 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1398 ASSIGN(priority); 1399 ASSIGN(local_flags); 1400 1401 ASSIGN(qlimit); 1402 ASSIGN(flags); 1403 1404 if (imported_q->scheduler == ALTQT_HFSC) { 1405 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1406 1407 /* 1408 * The m1 and m2 parameters are being copied from 1409 * 32-bit to 64-bit. 1410 */ 1411 ASSIGN_OPT(rtsc_m1); 1412 ASSIGN_OPT(rtsc_d); 1413 ASSIGN_OPT(rtsc_m2); 1414 1415 ASSIGN_OPT(lssc_m1); 1416 ASSIGN_OPT(lssc_d); 1417 ASSIGN_OPT(lssc_m2); 1418 1419 ASSIGN_OPT(ulsc_m1); 1420 ASSIGN_OPT(ulsc_d); 1421 ASSIGN_OPT(ulsc_m2); 1422 1423 ASSIGN_OPT(flags); 1424 1425 #undef ASSIGN_OPT 1426 } else 1427 COPY(pq_u); 1428 1429 ASSIGN(qid); 1430 break; 1431 } 1432 case 1: { 1433 struct pf_altq_v1 *imported_q = 1434 &((struct pfioc_altq_v1 *)pa)->altq; 1435 1436 COPY(ifname); 1437 1438 ASSIGN(scheduler); 1439 ASSIGN(tbrsize); 1440 ASSIGN(ifbandwidth); 1441 1442 COPY(qname); 1443 COPY(parent); 1444 ASSIGN(parent_qid); 1445 ASSIGN(bandwidth); 1446 ASSIGN(priority); 1447 ASSIGN(local_flags); 1448 1449 ASSIGN(qlimit); 1450 ASSIGN(flags); 1451 COPY(pq_u); 1452 1453 ASSIGN(qid); 1454 break; 1455 } 1456 default: 1457 panic("%s: unhandled struct pfioc_altq version", __func__); 1458 break; 1459 } 1460 1461 #undef ASSIGN 1462 #undef COPY 1463 1464 return (0); 1465 } 1466 1467 static struct pf_altq * 1468 pf_altq_get_nth_active(u_int32_t n) 1469 { 1470 struct pf_altq *altq; 1471 u_int32_t nr; 1472 1473 nr = 0; 1474 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1475 if (nr == n) 1476 return (altq); 1477 nr++; 1478 } 1479 1480 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1481 if (nr == n) 1482 return (altq); 1483 nr++; 1484 } 1485 1486 return (NULL); 1487 } 1488 #endif /* ALTQ */ 1489 1490 void 1491 pf_krule_free(struct pf_krule *rule) 1492 { 1493 if (rule == NULL) 1494 return; 1495 1496 counter_u64_free(rule->evaluations); 1497 for (int i = 0; i < 2; i++) { 1498 counter_u64_free(rule->packets[i]); 1499 counter_u64_free(rule->bytes[i]); 1500 } 1501 counter_u64_free(rule->states_cur); 1502 counter_u64_free(rule->states_tot); 1503 counter_u64_free(rule->src_nodes); 1504 free(rule, M_PFRULE); 1505 } 1506 1507 static void 1508 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1509 struct pf_pooladdr *pool) 1510 { 1511 1512 bzero(pool, sizeof(*pool)); 1513 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1514 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1515 } 1516 1517 static void 1518 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1519 struct pf_kpooladdr *kpool) 1520 { 1521 1522 bzero(kpool, sizeof(*kpool)); 1523 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1524 strlcpy(kpool->ifname, pool->ifname, sizeof(kpool->ifname)); 1525 } 1526 1527 static void 1528 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1529 { 1530 bzero(pool, sizeof(*pool)); 1531 1532 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1533 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1534 1535 pool->tblidx = kpool->tblidx; 1536 pool->proxy_port[0] = kpool->proxy_port[0]; 1537 pool->proxy_port[1] = kpool->proxy_port[1]; 1538 pool->opts = kpool->opts; 1539 } 1540 1541 static int 1542 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1543 { 1544 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1545 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1546 1547 bzero(kpool, sizeof(*kpool)); 1548 1549 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1550 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1551 1552 kpool->tblidx = pool->tblidx; 1553 kpool->proxy_port[0] = pool->proxy_port[0]; 1554 kpool->proxy_port[1] = pool->proxy_port[1]; 1555 kpool->opts = pool->opts; 1556 1557 return (0); 1558 } 1559 1560 static void 1561 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1562 { 1563 1564 bzero(rule, sizeof(*rule)); 1565 1566 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1567 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1568 1569 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1570 if (rule->skip[i].ptr == NULL) 1571 rule->skip[i].nr = -1; 1572 else 1573 rule->skip[i].nr = krule->skip[i].ptr->nr; 1574 } 1575 1576 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1577 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1578 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1579 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1580 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1581 strlcpy(rule->match_tagname, krule->match_tagname, 1582 sizeof(rule->match_tagname)); 1583 strlcpy(rule->overload_tblname, krule->overload_tblname, 1584 sizeof(rule->overload_tblname)); 1585 1586 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1587 1588 rule->evaluations = counter_u64_fetch(krule->evaluations); 1589 for (int i = 0; i < 2; i++) { 1590 rule->packets[i] = counter_u64_fetch(krule->packets[i]); 1591 rule->bytes[i] = counter_u64_fetch(krule->bytes[i]); 1592 } 1593 1594 /* kif, anchor, overload_tbl are not copied over. */ 1595 1596 rule->os_fingerprint = krule->os_fingerprint; 1597 1598 rule->rtableid = krule->rtableid; 1599 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1600 rule->max_states = krule->max_states; 1601 rule->max_src_nodes = krule->max_src_nodes; 1602 rule->max_src_states = krule->max_src_states; 1603 rule->max_src_conn = krule->max_src_conn; 1604 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1605 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1606 rule->qid = krule->qid; 1607 rule->pqid = krule->pqid; 1608 rule->nr = krule->nr; 1609 rule->prob = krule->prob; 1610 rule->cuid = krule->cuid; 1611 rule->cpid = krule->cpid; 1612 1613 rule->return_icmp = krule->return_icmp; 1614 rule->return_icmp6 = krule->return_icmp6; 1615 rule->max_mss = krule->max_mss; 1616 rule->tag = krule->tag; 1617 rule->match_tag = krule->match_tag; 1618 rule->scrub_flags = krule->scrub_flags; 1619 1620 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1621 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1622 1623 rule->rule_flag = krule->rule_flag; 1624 rule->action = krule->action; 1625 rule->direction = krule->direction; 1626 rule->log = krule->log; 1627 rule->logif = krule->logif; 1628 rule->quick = krule->quick; 1629 rule->ifnot = krule->ifnot; 1630 rule->match_tag_not = krule->match_tag_not; 1631 rule->natpass = krule->natpass; 1632 1633 rule->keep_state = krule->keep_state; 1634 rule->af = krule->af; 1635 rule->proto = krule->proto; 1636 rule->type = krule->type; 1637 rule->code = krule->code; 1638 rule->flags = krule->flags; 1639 rule->flagset = krule->flagset; 1640 rule->min_ttl = krule->min_ttl; 1641 rule->allow_opts = krule->allow_opts; 1642 rule->rt = krule->rt; 1643 rule->return_ttl = krule->return_ttl; 1644 rule->tos = krule->tos; 1645 rule->set_tos = krule->set_tos; 1646 rule->anchor_relative = krule->anchor_relative; 1647 rule->anchor_wildcard = krule->anchor_wildcard; 1648 1649 rule->flush = krule->flush; 1650 rule->prio = krule->prio; 1651 rule->set_prio[0] = krule->set_prio[0]; 1652 rule->set_prio[1] = krule->set_prio[1]; 1653 1654 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1655 1656 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1657 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1658 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1659 } 1660 1661 static int 1662 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1663 { 1664 int ret; 1665 1666 #ifndef INET 1667 if (rule->af == AF_INET) { 1668 return (EAFNOSUPPORT); 1669 } 1670 #endif /* INET */ 1671 #ifndef INET6 1672 if (rule->af == AF_INET6) { 1673 return (EAFNOSUPPORT); 1674 } 1675 #endif /* INET6 */ 1676 1677 ret = pf_check_rule_addr(&rule->src); 1678 if (ret != 0) 1679 return (ret); 1680 ret = pf_check_rule_addr(&rule->dst); 1681 if (ret != 0) 1682 return (ret); 1683 1684 bzero(krule, sizeof(*krule)); 1685 1686 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1687 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1688 1689 strlcpy(krule->label[0], rule->label, sizeof(rule->label)); 1690 strlcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1691 strlcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1692 strlcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1693 strlcpy(krule->tagname, rule->tagname, sizeof(rule->tagname)); 1694 strlcpy(krule->match_tagname, rule->match_tagname, 1695 sizeof(rule->match_tagname)); 1696 strlcpy(krule->overload_tblname, rule->overload_tblname, 1697 sizeof(rule->overload_tblname)); 1698 1699 ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1700 if (ret != 0) 1701 return (ret); 1702 1703 /* Don't allow userspace to set evaulations, packets or bytes. */ 1704 /* kif, anchor, overload_tbl are not copied over. */ 1705 1706 krule->os_fingerprint = rule->os_fingerprint; 1707 1708 krule->rtableid = rule->rtableid; 1709 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 1710 krule->max_states = rule->max_states; 1711 krule->max_src_nodes = rule->max_src_nodes; 1712 krule->max_src_states = rule->max_src_states; 1713 krule->max_src_conn = rule->max_src_conn; 1714 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1715 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1716 krule->qid = rule->qid; 1717 krule->pqid = rule->pqid; 1718 krule->nr = rule->nr; 1719 krule->prob = rule->prob; 1720 krule->cuid = rule->cuid; 1721 krule->cpid = rule->cpid; 1722 1723 krule->return_icmp = rule->return_icmp; 1724 krule->return_icmp6 = rule->return_icmp6; 1725 krule->max_mss = rule->max_mss; 1726 krule->tag = rule->tag; 1727 krule->match_tag = rule->match_tag; 1728 krule->scrub_flags = rule->scrub_flags; 1729 1730 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 1731 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 1732 1733 krule->rule_flag = rule->rule_flag; 1734 krule->action = rule->action; 1735 krule->direction = rule->direction; 1736 krule->log = rule->log; 1737 krule->logif = rule->logif; 1738 krule->quick = rule->quick; 1739 krule->ifnot = rule->ifnot; 1740 krule->match_tag_not = rule->match_tag_not; 1741 krule->natpass = rule->natpass; 1742 1743 krule->keep_state = rule->keep_state; 1744 krule->af = rule->af; 1745 krule->proto = rule->proto; 1746 krule->type = rule->type; 1747 krule->code = rule->code; 1748 krule->flags = rule->flags; 1749 krule->flagset = rule->flagset; 1750 krule->min_ttl = rule->min_ttl; 1751 krule->allow_opts = rule->allow_opts; 1752 krule->rt = rule->rt; 1753 krule->return_ttl = rule->return_ttl; 1754 krule->tos = rule->tos; 1755 krule->set_tos = rule->set_tos; 1756 krule->anchor_relative = rule->anchor_relative; 1757 krule->anchor_wildcard = rule->anchor_wildcard; 1758 1759 krule->flush = rule->flush; 1760 krule->prio = rule->prio; 1761 krule->set_prio[0] = rule->set_prio[0]; 1762 krule->set_prio[1] = rule->set_prio[1]; 1763 1764 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 1765 1766 return (0); 1767 } 1768 1769 static int 1770 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 1771 struct pf_kstate_kill *kill) 1772 { 1773 bzero(kill, sizeof(*kill)); 1774 1775 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 1776 kill->psk_af = psk->psk_af; 1777 kill->psk_proto = psk->psk_proto; 1778 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 1779 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 1780 strlcpy(kill->psk_ifname, psk->psk_ifname, sizeof(kill->psk_ifname)); 1781 strlcpy(kill->psk_label, psk->psk_label, sizeof(kill->psk_label)); 1782 1783 return (0); 1784 } 1785 1786 static int 1787 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 1788 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 1789 struct thread *td) 1790 { 1791 struct pf_kruleset *ruleset; 1792 struct pf_krule *tail; 1793 struct pf_kpooladdr *pa; 1794 struct pfi_kkif *kif = NULL; 1795 int rs_num; 1796 int error = 0; 1797 1798 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 1799 error = EINVAL; 1800 goto errout_unlocked; 1801 } 1802 1803 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 1804 1805 if (rule->ifname[0]) 1806 kif = pf_kkif_create(M_WAITOK); 1807 rule->evaluations = counter_u64_alloc(M_WAITOK); 1808 for (int i = 0; i < 2; i++) { 1809 rule->packets[i] = counter_u64_alloc(M_WAITOK); 1810 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 1811 } 1812 rule->states_cur = counter_u64_alloc(M_WAITOK); 1813 rule->states_tot = counter_u64_alloc(M_WAITOK); 1814 rule->src_nodes = counter_u64_alloc(M_WAITOK); 1815 rule->cuid = td->td_ucred->cr_ruid; 1816 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 1817 TAILQ_INIT(&rule->rpool.list); 1818 1819 PF_RULES_WLOCK(); 1820 ruleset = pf_find_kruleset(anchor); 1821 if (ruleset == NULL) 1822 ERROUT(EINVAL); 1823 rs_num = pf_get_ruleset_number(rule->action); 1824 if (rs_num >= PF_RULESET_MAX) 1825 ERROUT(EINVAL); 1826 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 1827 DPFPRINTF(PF_DEBUG_MISC, 1828 ("ticket: %d != [%d]%d\n", ticket, rs_num, 1829 ruleset->rules[rs_num].inactive.ticket)); 1830 ERROUT(EBUSY); 1831 } 1832 if (pool_ticket != V_ticket_pabuf) { 1833 DPFPRINTF(PF_DEBUG_MISC, 1834 ("pool_ticket: %d != %d\n", pool_ticket, 1835 V_ticket_pabuf)); 1836 ERROUT(EBUSY); 1837 } 1838 1839 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1840 pf_krulequeue); 1841 if (tail) 1842 rule->nr = tail->nr + 1; 1843 else 1844 rule->nr = 0; 1845 if (rule->ifname[0]) { 1846 rule->kif = pfi_kkif_attach(kif, rule->ifname); 1847 kif = NULL; 1848 pfi_kkif_ref(rule->kif); 1849 } else 1850 rule->kif = NULL; 1851 1852 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 1853 error = EBUSY; 1854 1855 #ifdef ALTQ 1856 /* set queue IDs */ 1857 if (rule->qname[0] != 0) { 1858 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1859 error = EBUSY; 1860 else if (rule->pqname[0] != 0) { 1861 if ((rule->pqid = 1862 pf_qname2qid(rule->pqname)) == 0) 1863 error = EBUSY; 1864 } else 1865 rule->pqid = rule->qid; 1866 } 1867 #endif 1868 if (rule->tagname[0]) 1869 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1870 error = EBUSY; 1871 if (rule->match_tagname[0]) 1872 if ((rule->match_tag = 1873 pf_tagname2tag(rule->match_tagname)) == 0) 1874 error = EBUSY; 1875 if (rule->rt && !rule->direction) 1876 error = EINVAL; 1877 if (!rule->log) 1878 rule->logif = 0; 1879 if (rule->logif >= PFLOGIFS_MAX) 1880 error = EINVAL; 1881 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1882 error = ENOMEM; 1883 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1884 error = ENOMEM; 1885 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 1886 error = EINVAL; 1887 if (rule->scrub_flags & PFSTATE_SETPRIO && 1888 (rule->set_prio[0] > PF_PRIO_MAX || 1889 rule->set_prio[1] > PF_PRIO_MAX)) 1890 error = EINVAL; 1891 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 1892 if (pa->addr.type == PF_ADDR_TABLE) { 1893 pa->addr.p.tbl = pfr_attach_table(ruleset, 1894 pa->addr.v.tblname); 1895 if (pa->addr.p.tbl == NULL) 1896 error = ENOMEM; 1897 } 1898 1899 rule->overload_tbl = NULL; 1900 if (rule->overload_tblname[0]) { 1901 if ((rule->overload_tbl = pfr_attach_table(ruleset, 1902 rule->overload_tblname)) == NULL) 1903 error = EINVAL; 1904 else 1905 rule->overload_tbl->pfrkt_flags |= 1906 PFR_TFLAG_ACTIVE; 1907 } 1908 1909 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 1910 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1911 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 1912 (rule->rt > PF_NOPFROUTE)) && 1913 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1914 error = EINVAL; 1915 1916 if (error) { 1917 pf_free_rule(rule); 1918 rule = NULL; 1919 ERROUT(error); 1920 } 1921 1922 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1923 counter_u64_zero(rule->evaluations); 1924 for (int i = 0; i < 2; i++) { 1925 counter_u64_zero(rule->packets[i]); 1926 counter_u64_zero(rule->bytes[i]); 1927 } 1928 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1929 rule, entries); 1930 ruleset->rules[rs_num].inactive.rcount++; 1931 PF_RULES_WUNLOCK(); 1932 1933 return (0); 1934 1935 #undef ERROUT 1936 errout: 1937 PF_RULES_WUNLOCK(); 1938 errout_unlocked: 1939 pf_kkif_free(kif); 1940 pf_krule_free(rule); 1941 return (error); 1942 } 1943 1944 static bool 1945 pf_label_match(const struct pf_krule *rule, const char *label) 1946 { 1947 int i = 0; 1948 1949 while (*rule->label[i]) { 1950 if (strcmp(rule->label[i], label) == 0) 1951 return (true); 1952 i++; 1953 } 1954 1955 return (false); 1956 } 1957 1958 static unsigned int 1959 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 1960 { 1961 struct pf_state *match; 1962 int more = 0; 1963 unsigned int killed = 0; 1964 1965 /* Call with unlocked hashrow */ 1966 1967 match = pf_find_state_all(key, dir, &more); 1968 if (match && !more) { 1969 pf_unlink_state(match, 0); 1970 killed++; 1971 } 1972 1973 return (killed); 1974 } 1975 1976 static int 1977 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 1978 { 1979 struct pf_state *s; 1980 struct pf_state_key *sk; 1981 struct pf_addr *srcaddr, *dstaddr; 1982 struct pf_state_key_cmp match_key; 1983 int idx, killed = 0; 1984 unsigned int dir; 1985 u_int16_t srcport, dstport; 1986 struct pfi_kkif *kif; 1987 1988 relock_DIOCKILLSTATES: 1989 PF_HASHROW_LOCK(ih); 1990 LIST_FOREACH(s, &ih->states, entry) { 1991 /* For floating states look at the original kif. */ 1992 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 1993 1994 sk = s->key[PF_SK_WIRE]; 1995 if (s->direction == PF_OUT) { 1996 srcaddr = &sk->addr[1]; 1997 dstaddr = &sk->addr[0]; 1998 srcport = sk->port[1]; 1999 dstport = sk->port[0]; 2000 } else { 2001 srcaddr = &sk->addr[0]; 2002 dstaddr = &sk->addr[1]; 2003 srcport = sk->port[0]; 2004 dstport = sk->port[1]; 2005 } 2006 2007 if (psk->psk_af && sk->af != psk->psk_af) 2008 continue; 2009 2010 if (psk->psk_proto && psk->psk_proto != sk->proto) 2011 continue; 2012 2013 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2014 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2015 continue; 2016 2017 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2018 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2019 continue; 2020 2021 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2022 &psk->psk_rt_addr.addr.v.a.addr, 2023 &psk->psk_rt_addr.addr.v.a.mask, 2024 &s->rt_addr, sk->af)) 2025 continue; 2026 2027 if (psk->psk_src.port_op != 0 && 2028 ! pf_match_port(psk->psk_src.port_op, 2029 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2030 continue; 2031 2032 if (psk->psk_dst.port_op != 0 && 2033 ! pf_match_port(psk->psk_dst.port_op, 2034 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2035 continue; 2036 2037 if (psk->psk_label[0] && 2038 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2039 continue; 2040 2041 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2042 kif->pfik_name)) 2043 continue; 2044 2045 if (psk->psk_kill_match) { 2046 /* Create the key to find matching states, with lock 2047 * held. */ 2048 2049 bzero(&match_key, sizeof(match_key)); 2050 2051 if (s->direction == PF_OUT) { 2052 dir = PF_IN; 2053 idx = PF_SK_STACK; 2054 } else { 2055 dir = PF_OUT; 2056 idx = PF_SK_WIRE; 2057 } 2058 2059 match_key.af = s->key[idx]->af; 2060 match_key.proto = s->key[idx]->proto; 2061 PF_ACPY(&match_key.addr[0], 2062 &s->key[idx]->addr[1], match_key.af); 2063 match_key.port[0] = s->key[idx]->port[1]; 2064 PF_ACPY(&match_key.addr[1], 2065 &s->key[idx]->addr[0], match_key.af); 2066 match_key.port[1] = s->key[idx]->port[0]; 2067 } 2068 2069 pf_unlink_state(s, PF_ENTER_LOCKED); 2070 killed++; 2071 2072 if (psk->psk_kill_match) 2073 killed += pf_kill_matching_state(&match_key, dir); 2074 2075 goto relock_DIOCKILLSTATES; 2076 } 2077 PF_HASHROW_UNLOCK(ih); 2078 2079 return (killed); 2080 } 2081 2082 static int 2083 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2084 { 2085 int error = 0; 2086 PF_RULES_RLOCK_TRACKER; 2087 2088 #define ERROUT_IOCTL(target, x) \ 2089 do { \ 2090 error = (x); \ 2091 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2092 goto target; \ 2093 } while (0) 2094 2095 2096 /* XXX keep in sync with switch() below */ 2097 if (securelevel_gt(td->td_ucred, 2)) 2098 switch (cmd) { 2099 case DIOCGETRULES: 2100 case DIOCGETRULE: 2101 case DIOCGETRULENV: 2102 case DIOCGETADDRS: 2103 case DIOCGETADDR: 2104 case DIOCGETSTATE: 2105 case DIOCGETSTATENV: 2106 case DIOCSETSTATUSIF: 2107 case DIOCGETSTATUS: 2108 case DIOCCLRSTATUS: 2109 case DIOCNATLOOK: 2110 case DIOCSETDEBUG: 2111 case DIOCGETSTATES: 2112 case DIOCGETSTATESNV: 2113 case DIOCGETTIMEOUT: 2114 case DIOCCLRRULECTRS: 2115 case DIOCGETLIMIT: 2116 case DIOCGETALTQSV0: 2117 case DIOCGETALTQSV1: 2118 case DIOCGETALTQV0: 2119 case DIOCGETALTQV1: 2120 case DIOCGETQSTATSV0: 2121 case DIOCGETQSTATSV1: 2122 case DIOCGETRULESETS: 2123 case DIOCGETRULESET: 2124 case DIOCRGETTABLES: 2125 case DIOCRGETTSTATS: 2126 case DIOCRCLRTSTATS: 2127 case DIOCRCLRADDRS: 2128 case DIOCRADDADDRS: 2129 case DIOCRDELADDRS: 2130 case DIOCRSETADDRS: 2131 case DIOCRGETADDRS: 2132 case DIOCRGETASTATS: 2133 case DIOCRCLRASTATS: 2134 case DIOCRTSTADDRS: 2135 case DIOCOSFPGET: 2136 case DIOCGETSRCNODES: 2137 case DIOCCLRSRCNODES: 2138 case DIOCIGETIFACES: 2139 case DIOCGIFSPEEDV0: 2140 case DIOCGIFSPEEDV1: 2141 case DIOCSETIFFLAG: 2142 case DIOCCLRIFFLAG: 2143 break; 2144 case DIOCRCLRTABLES: 2145 case DIOCRADDTABLES: 2146 case DIOCRDELTABLES: 2147 case DIOCRSETTFLAGS: 2148 if (((struct pfioc_table *)addr)->pfrio_flags & 2149 PFR_FLAG_DUMMY) 2150 break; /* dummy operation ok */ 2151 return (EPERM); 2152 default: 2153 return (EPERM); 2154 } 2155 2156 if (!(flags & FWRITE)) 2157 switch (cmd) { 2158 case DIOCGETRULES: 2159 case DIOCGETADDRS: 2160 case DIOCGETADDR: 2161 case DIOCGETSTATE: 2162 case DIOCGETSTATENV: 2163 case DIOCGETSTATUS: 2164 case DIOCGETSTATES: 2165 case DIOCGETSTATESNV: 2166 case DIOCGETTIMEOUT: 2167 case DIOCGETLIMIT: 2168 case DIOCGETALTQSV0: 2169 case DIOCGETALTQSV1: 2170 case DIOCGETALTQV0: 2171 case DIOCGETALTQV1: 2172 case DIOCGETQSTATSV0: 2173 case DIOCGETQSTATSV1: 2174 case DIOCGETRULESETS: 2175 case DIOCGETRULESET: 2176 case DIOCNATLOOK: 2177 case DIOCRGETTABLES: 2178 case DIOCRGETTSTATS: 2179 case DIOCRGETADDRS: 2180 case DIOCRGETASTATS: 2181 case DIOCRTSTADDRS: 2182 case DIOCOSFPGET: 2183 case DIOCGETSRCNODES: 2184 case DIOCIGETIFACES: 2185 case DIOCGIFSPEEDV1: 2186 case DIOCGIFSPEEDV0: 2187 case DIOCGETRULENV: 2188 break; 2189 case DIOCRCLRTABLES: 2190 case DIOCRADDTABLES: 2191 case DIOCRDELTABLES: 2192 case DIOCRCLRTSTATS: 2193 case DIOCRCLRADDRS: 2194 case DIOCRADDADDRS: 2195 case DIOCRDELADDRS: 2196 case DIOCRSETADDRS: 2197 case DIOCRSETTFLAGS: 2198 if (((struct pfioc_table *)addr)->pfrio_flags & 2199 PFR_FLAG_DUMMY) { 2200 flags |= FWRITE; /* need write lock for dummy */ 2201 break; /* dummy operation ok */ 2202 } 2203 return (EACCES); 2204 case DIOCGETRULE: 2205 if (((struct pfioc_rule *)addr)->action == 2206 PF_GET_CLR_CNTR) 2207 return (EACCES); 2208 break; 2209 default: 2210 return (EACCES); 2211 } 2212 2213 CURVNET_SET(TD_TO_VNET(td)); 2214 2215 switch (cmd) { 2216 case DIOCSTART: 2217 sx_xlock(&pf_ioctl_lock); 2218 if (V_pf_status.running) 2219 error = EEXIST; 2220 else { 2221 int cpu; 2222 2223 hook_pf(); 2224 V_pf_status.running = 1; 2225 V_pf_status.since = time_second; 2226 2227 CPU_FOREACH(cpu) 2228 V_pf_stateid[cpu] = time_second; 2229 2230 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2231 } 2232 break; 2233 2234 case DIOCSTOP: 2235 sx_xlock(&pf_ioctl_lock); 2236 if (!V_pf_status.running) 2237 error = ENOENT; 2238 else { 2239 V_pf_status.running = 0; 2240 dehook_pf(); 2241 V_pf_status.since = time_second; 2242 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2243 } 2244 break; 2245 2246 case DIOCADDRULENV: { 2247 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2248 nvlist_t *nvl = NULL; 2249 void *nvlpacked = NULL; 2250 struct pf_krule *rule = NULL; 2251 const char *anchor = "", *anchor_call = ""; 2252 uint32_t ticket = 0, pool_ticket = 0; 2253 2254 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 2255 2256 if (nv->len > pf_ioctl_maxcount) 2257 ERROUT(ENOMEM); 2258 2259 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2260 error = copyin(nv->data, nvlpacked, nv->len); 2261 if (error) 2262 ERROUT(error); 2263 2264 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2265 if (nvl == NULL) 2266 ERROUT(EBADMSG); 2267 2268 if (! nvlist_exists_number(nvl, "ticket")) 2269 ERROUT(EINVAL); 2270 ticket = nvlist_get_number(nvl, "ticket"); 2271 2272 if (! nvlist_exists_number(nvl, "pool_ticket")) 2273 ERROUT(EINVAL); 2274 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 2275 2276 if (! nvlist_exists_nvlist(nvl, "rule")) 2277 ERROUT(EINVAL); 2278 2279 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO); 2280 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 2281 rule); 2282 if (error) 2283 ERROUT(error); 2284 2285 if (nvlist_exists_string(nvl, "anchor")) 2286 anchor = nvlist_get_string(nvl, "anchor"); 2287 if (nvlist_exists_string(nvl, "anchor_call")) 2288 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2289 2290 if ((error = nvlist_error(nvl))) 2291 ERROUT(error); 2292 2293 /* Frees rule on error */ 2294 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 2295 anchor_call, td); 2296 2297 nvlist_destroy(nvl); 2298 free(nvlpacked, M_TEMP); 2299 break; 2300 #undef ERROUT 2301 DIOCADDRULENV_error: 2302 pf_krule_free(rule); 2303 nvlist_destroy(nvl); 2304 free(nvlpacked, M_TEMP); 2305 2306 break; 2307 } 2308 case DIOCADDRULE: { 2309 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2310 struct pf_krule *rule; 2311 2312 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2313 error = pf_rule_to_krule(&pr->rule, rule); 2314 if (error != 0) { 2315 free(rule, M_PFRULE); 2316 break; 2317 } 2318 2319 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2320 2321 /* Frees rule on error */ 2322 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 2323 pr->anchor, pr->anchor_call, td); 2324 break; 2325 } 2326 2327 case DIOCGETRULES: { 2328 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2329 struct pf_kruleset *ruleset; 2330 struct pf_krule *tail; 2331 int rs_num; 2332 2333 PF_RULES_WLOCK(); 2334 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2335 ruleset = pf_find_kruleset(pr->anchor); 2336 if (ruleset == NULL) { 2337 PF_RULES_WUNLOCK(); 2338 error = EINVAL; 2339 break; 2340 } 2341 rs_num = pf_get_ruleset_number(pr->rule.action); 2342 if (rs_num >= PF_RULESET_MAX) { 2343 PF_RULES_WUNLOCK(); 2344 error = EINVAL; 2345 break; 2346 } 2347 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2348 pf_krulequeue); 2349 if (tail) 2350 pr->nr = tail->nr + 1; 2351 else 2352 pr->nr = 0; 2353 pr->ticket = ruleset->rules[rs_num].active.ticket; 2354 PF_RULES_WUNLOCK(); 2355 break; 2356 } 2357 2358 case DIOCGETRULE: { 2359 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2360 struct pf_kruleset *ruleset; 2361 struct pf_krule *rule; 2362 int rs_num; 2363 2364 PF_RULES_WLOCK(); 2365 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2366 ruleset = pf_find_kruleset(pr->anchor); 2367 if (ruleset == NULL) { 2368 PF_RULES_WUNLOCK(); 2369 error = EINVAL; 2370 break; 2371 } 2372 rs_num = pf_get_ruleset_number(pr->rule.action); 2373 if (rs_num >= PF_RULESET_MAX) { 2374 PF_RULES_WUNLOCK(); 2375 error = EINVAL; 2376 break; 2377 } 2378 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 2379 PF_RULES_WUNLOCK(); 2380 error = EBUSY; 2381 break; 2382 } 2383 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 2384 while ((rule != NULL) && (rule->nr != pr->nr)) 2385 rule = TAILQ_NEXT(rule, entries); 2386 if (rule == NULL) { 2387 PF_RULES_WUNLOCK(); 2388 error = EBUSY; 2389 break; 2390 } 2391 2392 pf_krule_to_rule(rule, &pr->rule); 2393 2394 if (pf_kanchor_copyout(ruleset, rule, pr)) { 2395 PF_RULES_WUNLOCK(); 2396 error = EBUSY; 2397 break; 2398 } 2399 pf_addr_copyout(&pr->rule.src.addr); 2400 pf_addr_copyout(&pr->rule.dst.addr); 2401 2402 if (pr->action == PF_GET_CLR_CNTR) { 2403 counter_u64_zero(rule->evaluations); 2404 for (int i = 0; i < 2; i++) { 2405 counter_u64_zero(rule->packets[i]); 2406 counter_u64_zero(rule->bytes[i]); 2407 } 2408 counter_u64_zero(rule->states_tot); 2409 } 2410 PF_RULES_WUNLOCK(); 2411 break; 2412 } 2413 2414 case DIOCGETRULENV: { 2415 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2416 nvlist_t *nvrule = NULL; 2417 nvlist_t *nvl = NULL; 2418 struct pf_kruleset *ruleset; 2419 struct pf_krule *rule; 2420 void *nvlpacked = NULL; 2421 int rs_num, nr; 2422 bool clear_counter = false; 2423 2424 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 2425 2426 if (nv->len > pf_ioctl_maxcount) 2427 ERROUT(ENOMEM); 2428 2429 /* Copy the request in */ 2430 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2431 if (nvlpacked == NULL) 2432 ERROUT(ENOMEM); 2433 2434 error = copyin(nv->data, nvlpacked, nv->len); 2435 if (error) 2436 ERROUT(error); 2437 2438 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2439 if (nvl == NULL) 2440 ERROUT(EBADMSG); 2441 2442 if (! nvlist_exists_string(nvl, "anchor")) 2443 ERROUT(EBADMSG); 2444 if (! nvlist_exists_number(nvl, "ruleset")) 2445 ERROUT(EBADMSG); 2446 if (! nvlist_exists_number(nvl, "ticket")) 2447 ERROUT(EBADMSG); 2448 if (! nvlist_exists_number(nvl, "nr")) 2449 ERROUT(EBADMSG); 2450 2451 if (nvlist_exists_bool(nvl, "clear_counter")) 2452 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 2453 2454 if (clear_counter && !(flags & FWRITE)) 2455 ERROUT(EACCES); 2456 2457 nr = nvlist_get_number(nvl, "nr"); 2458 2459 PF_RULES_WLOCK(); 2460 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 2461 if (ruleset == NULL) { 2462 PF_RULES_WUNLOCK(); 2463 ERROUT(ENOENT); 2464 } 2465 2466 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 2467 if (rs_num >= PF_RULESET_MAX) { 2468 PF_RULES_WUNLOCK(); 2469 ERROUT(EINVAL); 2470 } 2471 2472 if (nvlist_get_number(nvl, "ticket") != 2473 ruleset->rules[rs_num].active.ticket) { 2474 PF_RULES_WUNLOCK(); 2475 ERROUT(EBUSY); 2476 } 2477 2478 if ((error = nvlist_error(nvl))) { 2479 PF_RULES_WUNLOCK(); 2480 ERROUT(error); 2481 } 2482 2483 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 2484 while ((rule != NULL) && (rule->nr != nr)) 2485 rule = TAILQ_NEXT(rule, entries); 2486 if (rule == NULL) { 2487 PF_RULES_WUNLOCK(); 2488 ERROUT(EBUSY); 2489 } 2490 2491 nvrule = pf_krule_to_nvrule(rule); 2492 2493 nvlist_destroy(nvl); 2494 nvl = nvlist_create(0); 2495 if (nvl == NULL) { 2496 PF_RULES_WUNLOCK(); 2497 ERROUT(ENOMEM); 2498 } 2499 nvlist_add_number(nvl, "nr", nr); 2500 nvlist_add_nvlist(nvl, "rule", nvrule); 2501 nvlist_destroy(nvrule); 2502 nvrule = NULL; 2503 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 2504 PF_RULES_WUNLOCK(); 2505 ERROUT(EBUSY); 2506 } 2507 2508 free(nvlpacked, M_TEMP); 2509 nvlpacked = nvlist_pack(nvl, &nv->len); 2510 if (nvlpacked == NULL) { 2511 PF_RULES_WUNLOCK(); 2512 ERROUT(ENOMEM); 2513 } 2514 2515 if (nv->size == 0) { 2516 PF_RULES_WUNLOCK(); 2517 ERROUT(0); 2518 } 2519 else if (nv->size < nv->len) { 2520 PF_RULES_WUNLOCK(); 2521 ERROUT(ENOSPC); 2522 } 2523 2524 error = copyout(nvlpacked, nv->data, nv->len); 2525 2526 if (clear_counter) { 2527 counter_u64_zero(rule->evaluations); 2528 for (int i = 0; i < 2; i++) { 2529 counter_u64_zero(rule->packets[i]); 2530 counter_u64_zero(rule->bytes[i]); 2531 } 2532 counter_u64_zero(rule->states_tot); 2533 } 2534 PF_RULES_WUNLOCK(); 2535 2536 #undef ERROUT 2537 DIOCGETRULENV_error: 2538 free(nvlpacked, M_TEMP); 2539 nvlist_destroy(nvrule); 2540 nvlist_destroy(nvl); 2541 2542 break; 2543 } 2544 2545 case DIOCCHANGERULE: { 2546 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 2547 struct pf_kruleset *ruleset; 2548 struct pf_krule *oldrule = NULL, *newrule = NULL; 2549 struct pfi_kkif *kif = NULL; 2550 struct pf_kpooladdr *pa; 2551 u_int32_t nr = 0; 2552 int rs_num; 2553 2554 if (pcr->action < PF_CHANGE_ADD_HEAD || 2555 pcr->action > PF_CHANGE_GET_TICKET) { 2556 error = EINVAL; 2557 break; 2558 } 2559 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 2560 error = EINVAL; 2561 break; 2562 } 2563 2564 if (pcr->action != PF_CHANGE_REMOVE) { 2565 newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK); 2566 error = pf_rule_to_krule(&pcr->rule, newrule); 2567 if (error != 0) { 2568 free(newrule, M_PFRULE); 2569 break; 2570 } 2571 2572 if (newrule->ifname[0]) 2573 kif = pf_kkif_create(M_WAITOK); 2574 newrule->evaluations = counter_u64_alloc(M_WAITOK); 2575 for (int i = 0; i < 2; i++) { 2576 newrule->packets[i] = 2577 counter_u64_alloc(M_WAITOK); 2578 newrule->bytes[i] = 2579 counter_u64_alloc(M_WAITOK); 2580 } 2581 newrule->states_cur = counter_u64_alloc(M_WAITOK); 2582 newrule->states_tot = counter_u64_alloc(M_WAITOK); 2583 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 2584 newrule->cuid = td->td_ucred->cr_ruid; 2585 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2586 TAILQ_INIT(&newrule->rpool.list); 2587 } 2588 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; } 2589 2590 PF_RULES_WLOCK(); 2591 if (!(pcr->action == PF_CHANGE_REMOVE || 2592 pcr->action == PF_CHANGE_GET_TICKET) && 2593 pcr->pool_ticket != V_ticket_pabuf) 2594 ERROUT(EBUSY); 2595 2596 ruleset = pf_find_kruleset(pcr->anchor); 2597 if (ruleset == NULL) 2598 ERROUT(EINVAL); 2599 2600 rs_num = pf_get_ruleset_number(pcr->rule.action); 2601 if (rs_num >= PF_RULESET_MAX) 2602 ERROUT(EINVAL); 2603 2604 if (pcr->action == PF_CHANGE_GET_TICKET) { 2605 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 2606 ERROUT(0); 2607 } else if (pcr->ticket != 2608 ruleset->rules[rs_num].active.ticket) 2609 ERROUT(EINVAL); 2610 2611 if (pcr->action != PF_CHANGE_REMOVE) { 2612 if (newrule->ifname[0]) { 2613 newrule->kif = pfi_kkif_attach(kif, 2614 newrule->ifname); 2615 kif = NULL; 2616 pfi_kkif_ref(newrule->kif); 2617 } else 2618 newrule->kif = NULL; 2619 2620 if (newrule->rtableid > 0 && 2621 newrule->rtableid >= rt_numfibs) 2622 error = EBUSY; 2623 2624 #ifdef ALTQ 2625 /* set queue IDs */ 2626 if (newrule->qname[0] != 0) { 2627 if ((newrule->qid = 2628 pf_qname2qid(newrule->qname)) == 0) 2629 error = EBUSY; 2630 else if (newrule->pqname[0] != 0) { 2631 if ((newrule->pqid = 2632 pf_qname2qid(newrule->pqname)) == 0) 2633 error = EBUSY; 2634 } else 2635 newrule->pqid = newrule->qid; 2636 } 2637 #endif /* ALTQ */ 2638 if (newrule->tagname[0]) 2639 if ((newrule->tag = 2640 pf_tagname2tag(newrule->tagname)) == 0) 2641 error = EBUSY; 2642 if (newrule->match_tagname[0]) 2643 if ((newrule->match_tag = pf_tagname2tag( 2644 newrule->match_tagname)) == 0) 2645 error = EBUSY; 2646 if (newrule->rt && !newrule->direction) 2647 error = EINVAL; 2648 if (!newrule->log) 2649 newrule->logif = 0; 2650 if (newrule->logif >= PFLOGIFS_MAX) 2651 error = EINVAL; 2652 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 2653 error = ENOMEM; 2654 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 2655 error = ENOMEM; 2656 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 2657 error = EINVAL; 2658 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2659 if (pa->addr.type == PF_ADDR_TABLE) { 2660 pa->addr.p.tbl = 2661 pfr_attach_table(ruleset, 2662 pa->addr.v.tblname); 2663 if (pa->addr.p.tbl == NULL) 2664 error = ENOMEM; 2665 } 2666 2667 newrule->overload_tbl = NULL; 2668 if (newrule->overload_tblname[0]) { 2669 if ((newrule->overload_tbl = pfr_attach_table( 2670 ruleset, newrule->overload_tblname)) == 2671 NULL) 2672 error = EINVAL; 2673 else 2674 newrule->overload_tbl->pfrkt_flags |= 2675 PFR_TFLAG_ACTIVE; 2676 } 2677 2678 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 2679 if (((((newrule->action == PF_NAT) || 2680 (newrule->action == PF_RDR) || 2681 (newrule->action == PF_BINAT) || 2682 (newrule->rt > PF_NOPFROUTE)) && 2683 !newrule->anchor)) && 2684 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 2685 error = EINVAL; 2686 2687 if (error) { 2688 pf_free_rule(newrule); 2689 PF_RULES_WUNLOCK(); 2690 break; 2691 } 2692 2693 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 2694 } 2695 pf_empty_kpool(&V_pf_pabuf); 2696 2697 if (pcr->action == PF_CHANGE_ADD_HEAD) 2698 oldrule = TAILQ_FIRST( 2699 ruleset->rules[rs_num].active.ptr); 2700 else if (pcr->action == PF_CHANGE_ADD_TAIL) 2701 oldrule = TAILQ_LAST( 2702 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 2703 else { 2704 oldrule = TAILQ_FIRST( 2705 ruleset->rules[rs_num].active.ptr); 2706 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 2707 oldrule = TAILQ_NEXT(oldrule, entries); 2708 if (oldrule == NULL) { 2709 if (newrule != NULL) 2710 pf_free_rule(newrule); 2711 PF_RULES_WUNLOCK(); 2712 error = EINVAL; 2713 break; 2714 } 2715 } 2716 2717 if (pcr->action == PF_CHANGE_REMOVE) { 2718 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 2719 oldrule); 2720 ruleset->rules[rs_num].active.rcount--; 2721 } else { 2722 if (oldrule == NULL) 2723 TAILQ_INSERT_TAIL( 2724 ruleset->rules[rs_num].active.ptr, 2725 newrule, entries); 2726 else if (pcr->action == PF_CHANGE_ADD_HEAD || 2727 pcr->action == PF_CHANGE_ADD_BEFORE) 2728 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 2729 else 2730 TAILQ_INSERT_AFTER( 2731 ruleset->rules[rs_num].active.ptr, 2732 oldrule, newrule, entries); 2733 ruleset->rules[rs_num].active.rcount++; 2734 } 2735 2736 nr = 0; 2737 TAILQ_FOREACH(oldrule, 2738 ruleset->rules[rs_num].active.ptr, entries) 2739 oldrule->nr = nr++; 2740 2741 ruleset->rules[rs_num].active.ticket++; 2742 2743 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 2744 pf_remove_if_empty_kruleset(ruleset); 2745 2746 PF_RULES_WUNLOCK(); 2747 break; 2748 2749 #undef ERROUT 2750 DIOCCHANGERULE_error: 2751 PF_RULES_WUNLOCK(); 2752 pf_krule_free(newrule); 2753 pf_kkif_free(kif); 2754 break; 2755 } 2756 2757 case DIOCCLRSTATES: { 2758 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2759 struct pf_kstate_kill kill; 2760 2761 error = pf_state_kill_to_kstate_kill(psk, &kill); 2762 if (error) 2763 break; 2764 2765 psk->psk_killed = pf_clear_states(&kill); 2766 break; 2767 } 2768 2769 case DIOCCLRSTATESNV: { 2770 error = pf_clearstates_nv((struct pfioc_nv *)addr); 2771 break; 2772 } 2773 2774 case DIOCKILLSTATES: { 2775 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 2776 struct pf_kstate_kill kill; 2777 2778 error = pf_state_kill_to_kstate_kill(psk, &kill); 2779 if (error) 2780 break; 2781 2782 psk->psk_killed = 0; 2783 error = pf_killstates(&kill, &psk->psk_killed); 2784 break; 2785 } 2786 2787 case DIOCKILLSTATESNV: { 2788 error = pf_killstates_nv((struct pfioc_nv *)addr); 2789 break; 2790 } 2791 2792 case DIOCADDSTATE: { 2793 struct pfioc_state *ps = (struct pfioc_state *)addr; 2794 struct pfsync_state *sp = &ps->state; 2795 2796 if (sp->timeout >= PFTM_MAX) { 2797 error = EINVAL; 2798 break; 2799 } 2800 if (V_pfsync_state_import_ptr != NULL) { 2801 PF_RULES_RLOCK(); 2802 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 2803 PF_RULES_RUNLOCK(); 2804 } else 2805 error = EOPNOTSUPP; 2806 break; 2807 } 2808 2809 case DIOCGETSTATE: { 2810 struct pfioc_state *ps = (struct pfioc_state *)addr; 2811 struct pf_state *s; 2812 2813 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 2814 if (s == NULL) { 2815 error = ENOENT; 2816 break; 2817 } 2818 2819 pfsync_state_export(&ps->state, s); 2820 PF_STATE_UNLOCK(s); 2821 break; 2822 } 2823 2824 case DIOCGETSTATENV: { 2825 error = pf_getstate((struct pfioc_nv *)addr); 2826 break; 2827 } 2828 2829 case DIOCGETSTATES: { 2830 struct pfioc_states *ps = (struct pfioc_states *)addr; 2831 struct pf_state *s; 2832 struct pfsync_state *pstore, *p; 2833 int i, nr; 2834 2835 if (ps->ps_len <= 0) { 2836 nr = uma_zone_get_cur(V_pf_state_z); 2837 ps->ps_len = sizeof(struct pfsync_state) * nr; 2838 break; 2839 } 2840 2841 p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK | M_ZERO); 2842 nr = 0; 2843 2844 for (i = 0; i <= pf_hashmask; i++) { 2845 struct pf_idhash *ih = &V_pf_idhash[i]; 2846 2847 PF_HASHROW_LOCK(ih); 2848 LIST_FOREACH(s, &ih->states, entry) { 2849 if (s->timeout == PFTM_UNLINKED) 2850 continue; 2851 2852 if ((nr+1) * sizeof(*p) > ps->ps_len) { 2853 PF_HASHROW_UNLOCK(ih); 2854 goto DIOCGETSTATES_full; 2855 } 2856 pfsync_state_export(p, s); 2857 p++; 2858 nr++; 2859 } 2860 PF_HASHROW_UNLOCK(ih); 2861 } 2862 DIOCGETSTATES_full: 2863 error = copyout(pstore, ps->ps_states, 2864 sizeof(struct pfsync_state) * nr); 2865 if (error) { 2866 free(pstore, M_TEMP); 2867 break; 2868 } 2869 ps->ps_len = sizeof(struct pfsync_state) * nr; 2870 free(pstore, M_TEMP); 2871 2872 break; 2873 } 2874 2875 case DIOCGETSTATESNV: { 2876 error = pf_getstates((struct pfioc_nv *)addr); 2877 break; 2878 } 2879 2880 case DIOCGETSTATUS: { 2881 struct pf_status *s = (struct pf_status *)addr; 2882 2883 PF_RULES_RLOCK(); 2884 s->running = V_pf_status.running; 2885 s->since = V_pf_status.since; 2886 s->debug = V_pf_status.debug; 2887 s->hostid = V_pf_status.hostid; 2888 s->states = V_pf_status.states; 2889 s->src_nodes = V_pf_status.src_nodes; 2890 2891 for (int i = 0; i < PFRES_MAX; i++) 2892 s->counters[i] = 2893 counter_u64_fetch(V_pf_status.counters[i]); 2894 for (int i = 0; i < LCNT_MAX; i++) 2895 s->lcounters[i] = 2896 counter_u64_fetch(V_pf_status.lcounters[i]); 2897 for (int i = 0; i < FCNT_MAX; i++) 2898 s->fcounters[i] = 2899 counter_u64_fetch(V_pf_status.fcounters[i]); 2900 for (int i = 0; i < SCNT_MAX; i++) 2901 s->scounters[i] = 2902 counter_u64_fetch(V_pf_status.scounters[i]); 2903 2904 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 2905 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 2906 PF_MD5_DIGEST_LENGTH); 2907 2908 pfi_update_status(s->ifname, s); 2909 PF_RULES_RUNLOCK(); 2910 break; 2911 } 2912 2913 case DIOCSETSTATUSIF: { 2914 struct pfioc_if *pi = (struct pfioc_if *)addr; 2915 2916 if (pi->ifname[0] == 0) { 2917 bzero(V_pf_status.ifname, IFNAMSIZ); 2918 break; 2919 } 2920 PF_RULES_WLOCK(); 2921 strlcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 2922 PF_RULES_WUNLOCK(); 2923 break; 2924 } 2925 2926 case DIOCCLRSTATUS: { 2927 PF_RULES_WLOCK(); 2928 for (int i = 0; i < PFRES_MAX; i++) 2929 counter_u64_zero(V_pf_status.counters[i]); 2930 for (int i = 0; i < FCNT_MAX; i++) 2931 counter_u64_zero(V_pf_status.fcounters[i]); 2932 for (int i = 0; i < SCNT_MAX; i++) 2933 counter_u64_zero(V_pf_status.scounters[i]); 2934 for (int i = 0; i < LCNT_MAX; i++) 2935 counter_u64_zero(V_pf_status.lcounters[i]); 2936 V_pf_status.since = time_second; 2937 if (*V_pf_status.ifname) 2938 pfi_update_status(V_pf_status.ifname, NULL); 2939 PF_RULES_WUNLOCK(); 2940 break; 2941 } 2942 2943 case DIOCNATLOOK: { 2944 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 2945 struct pf_state_key *sk; 2946 struct pf_state *state; 2947 struct pf_state_key_cmp key; 2948 int m = 0, direction = pnl->direction; 2949 int sidx, didx; 2950 2951 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 2952 sidx = (direction == PF_IN) ? 1 : 0; 2953 didx = (direction == PF_IN) ? 0 : 1; 2954 2955 if (!pnl->proto || 2956 PF_AZERO(&pnl->saddr, pnl->af) || 2957 PF_AZERO(&pnl->daddr, pnl->af) || 2958 ((pnl->proto == IPPROTO_TCP || 2959 pnl->proto == IPPROTO_UDP) && 2960 (!pnl->dport || !pnl->sport))) 2961 error = EINVAL; 2962 else { 2963 bzero(&key, sizeof(key)); 2964 key.af = pnl->af; 2965 key.proto = pnl->proto; 2966 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 2967 key.port[sidx] = pnl->sport; 2968 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 2969 key.port[didx] = pnl->dport; 2970 2971 state = pf_find_state_all(&key, direction, &m); 2972 2973 if (m > 1) 2974 error = E2BIG; /* more than one state */ 2975 else if (state != NULL) { 2976 /* XXXGL: not locked read */ 2977 sk = state->key[sidx]; 2978 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 2979 pnl->rsport = sk->port[sidx]; 2980 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 2981 pnl->rdport = sk->port[didx]; 2982 } else 2983 error = ENOENT; 2984 } 2985 break; 2986 } 2987 2988 case DIOCSETTIMEOUT: { 2989 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 2990 int old; 2991 2992 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 2993 pt->seconds < 0) { 2994 error = EINVAL; 2995 break; 2996 } 2997 PF_RULES_WLOCK(); 2998 old = V_pf_default_rule.timeout[pt->timeout]; 2999 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3000 pt->seconds = 1; 3001 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3002 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3003 wakeup(pf_purge_thread); 3004 pt->seconds = old; 3005 PF_RULES_WUNLOCK(); 3006 break; 3007 } 3008 3009 case DIOCGETTIMEOUT: { 3010 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3011 3012 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3013 error = EINVAL; 3014 break; 3015 } 3016 PF_RULES_RLOCK(); 3017 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3018 PF_RULES_RUNLOCK(); 3019 break; 3020 } 3021 3022 case DIOCGETLIMIT: { 3023 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3024 3025 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3026 error = EINVAL; 3027 break; 3028 } 3029 PF_RULES_RLOCK(); 3030 pl->limit = V_pf_limits[pl->index].limit; 3031 PF_RULES_RUNLOCK(); 3032 break; 3033 } 3034 3035 case DIOCSETLIMIT: { 3036 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3037 int old_limit; 3038 3039 PF_RULES_WLOCK(); 3040 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3041 V_pf_limits[pl->index].zone == NULL) { 3042 PF_RULES_WUNLOCK(); 3043 error = EINVAL; 3044 break; 3045 } 3046 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3047 old_limit = V_pf_limits[pl->index].limit; 3048 V_pf_limits[pl->index].limit = pl->limit; 3049 pl->limit = old_limit; 3050 PF_RULES_WUNLOCK(); 3051 break; 3052 } 3053 3054 case DIOCSETDEBUG: { 3055 u_int32_t *level = (u_int32_t *)addr; 3056 3057 PF_RULES_WLOCK(); 3058 V_pf_status.debug = *level; 3059 PF_RULES_WUNLOCK(); 3060 break; 3061 } 3062 3063 case DIOCCLRRULECTRS: { 3064 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3065 struct pf_kruleset *ruleset = &pf_main_ruleset; 3066 struct pf_krule *rule; 3067 3068 PF_RULES_WLOCK(); 3069 TAILQ_FOREACH(rule, 3070 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3071 counter_u64_zero(rule->evaluations); 3072 for (int i = 0; i < 2; i++) { 3073 counter_u64_zero(rule->packets[i]); 3074 counter_u64_zero(rule->bytes[i]); 3075 } 3076 } 3077 PF_RULES_WUNLOCK(); 3078 break; 3079 } 3080 3081 case DIOCGIFSPEEDV0: 3082 case DIOCGIFSPEEDV1: { 3083 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3084 struct pf_ifspeed_v1 ps; 3085 struct ifnet *ifp; 3086 3087 if (psp->ifname[0] != 0) { 3088 /* Can we completely trust user-land? */ 3089 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3090 ifp = ifunit(ps.ifname); 3091 if (ifp != NULL) { 3092 psp->baudrate32 = 3093 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 3094 if (cmd == DIOCGIFSPEEDV1) 3095 psp->baudrate = ifp->if_baudrate; 3096 } else 3097 error = EINVAL; 3098 } else 3099 error = EINVAL; 3100 break; 3101 } 3102 3103 #ifdef ALTQ 3104 case DIOCSTARTALTQ: { 3105 struct pf_altq *altq; 3106 3107 PF_RULES_WLOCK(); 3108 /* enable all altq interfaces on active list */ 3109 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3110 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3111 error = pf_enable_altq(altq); 3112 if (error != 0) 3113 break; 3114 } 3115 } 3116 if (error == 0) 3117 V_pf_altq_running = 1; 3118 PF_RULES_WUNLOCK(); 3119 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 3120 break; 3121 } 3122 3123 case DIOCSTOPALTQ: { 3124 struct pf_altq *altq; 3125 3126 PF_RULES_WLOCK(); 3127 /* disable all altq interfaces on active list */ 3128 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3129 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3130 error = pf_disable_altq(altq); 3131 if (error != 0) 3132 break; 3133 } 3134 } 3135 if (error == 0) 3136 V_pf_altq_running = 0; 3137 PF_RULES_WUNLOCK(); 3138 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 3139 break; 3140 } 3141 3142 case DIOCADDALTQV0: 3143 case DIOCADDALTQV1: { 3144 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3145 struct pf_altq *altq, *a; 3146 struct ifnet *ifp; 3147 3148 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 3149 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 3150 if (error) 3151 break; 3152 altq->local_flags = 0; 3153 3154 PF_RULES_WLOCK(); 3155 if (pa->ticket != V_ticket_altqs_inactive) { 3156 PF_RULES_WUNLOCK(); 3157 free(altq, M_PFALTQ); 3158 error = EBUSY; 3159 break; 3160 } 3161 3162 /* 3163 * if this is for a queue, find the discipline and 3164 * copy the necessary fields 3165 */ 3166 if (altq->qname[0] != 0) { 3167 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 3168 PF_RULES_WUNLOCK(); 3169 error = EBUSY; 3170 free(altq, M_PFALTQ); 3171 break; 3172 } 3173 altq->altq_disc = NULL; 3174 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 3175 if (strncmp(a->ifname, altq->ifname, 3176 IFNAMSIZ) == 0) { 3177 altq->altq_disc = a->altq_disc; 3178 break; 3179 } 3180 } 3181 } 3182 3183 if ((ifp = ifunit(altq->ifname)) == NULL) 3184 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 3185 else 3186 error = altq_add(ifp, altq); 3187 3188 if (error) { 3189 PF_RULES_WUNLOCK(); 3190 free(altq, M_PFALTQ); 3191 break; 3192 } 3193 3194 if (altq->qname[0] != 0) 3195 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 3196 else 3197 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 3198 /* version error check done on import above */ 3199 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3200 PF_RULES_WUNLOCK(); 3201 break; 3202 } 3203 3204 case DIOCGETALTQSV0: 3205 case DIOCGETALTQSV1: { 3206 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3207 struct pf_altq *altq; 3208 3209 PF_RULES_RLOCK(); 3210 pa->nr = 0; 3211 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 3212 pa->nr++; 3213 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 3214 pa->nr++; 3215 pa->ticket = V_ticket_altqs_active; 3216 PF_RULES_RUNLOCK(); 3217 break; 3218 } 3219 3220 case DIOCGETALTQV0: 3221 case DIOCGETALTQV1: { 3222 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3223 struct pf_altq *altq; 3224 3225 PF_RULES_RLOCK(); 3226 if (pa->ticket != V_ticket_altqs_active) { 3227 PF_RULES_RUNLOCK(); 3228 error = EBUSY; 3229 break; 3230 } 3231 altq = pf_altq_get_nth_active(pa->nr); 3232 if (altq == NULL) { 3233 PF_RULES_RUNLOCK(); 3234 error = EBUSY; 3235 break; 3236 } 3237 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3238 PF_RULES_RUNLOCK(); 3239 break; 3240 } 3241 3242 case DIOCCHANGEALTQV0: 3243 case DIOCCHANGEALTQV1: 3244 /* CHANGEALTQ not supported yet! */ 3245 error = ENODEV; 3246 break; 3247 3248 case DIOCGETQSTATSV0: 3249 case DIOCGETQSTATSV1: { 3250 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 3251 struct pf_altq *altq; 3252 int nbytes; 3253 u_int32_t version; 3254 3255 PF_RULES_RLOCK(); 3256 if (pq->ticket != V_ticket_altqs_active) { 3257 PF_RULES_RUNLOCK(); 3258 error = EBUSY; 3259 break; 3260 } 3261 nbytes = pq->nbytes; 3262 altq = pf_altq_get_nth_active(pq->nr); 3263 if (altq == NULL) { 3264 PF_RULES_RUNLOCK(); 3265 error = EBUSY; 3266 break; 3267 } 3268 3269 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 3270 PF_RULES_RUNLOCK(); 3271 error = ENXIO; 3272 break; 3273 } 3274 PF_RULES_RUNLOCK(); 3275 if (cmd == DIOCGETQSTATSV0) 3276 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 3277 else 3278 version = pq->version; 3279 error = altq_getqstats(altq, pq->buf, &nbytes, version); 3280 if (error == 0) { 3281 pq->scheduler = altq->scheduler; 3282 pq->nbytes = nbytes; 3283 } 3284 break; 3285 } 3286 #endif /* ALTQ */ 3287 3288 case DIOCBEGINADDRS: { 3289 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3290 3291 PF_RULES_WLOCK(); 3292 pf_empty_kpool(&V_pf_pabuf); 3293 pp->ticket = ++V_ticket_pabuf; 3294 PF_RULES_WUNLOCK(); 3295 break; 3296 } 3297 3298 case DIOCADDADDR: { 3299 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3300 struct pf_kpooladdr *pa; 3301 struct pfi_kkif *kif = NULL; 3302 3303 #ifndef INET 3304 if (pp->af == AF_INET) { 3305 error = EAFNOSUPPORT; 3306 break; 3307 } 3308 #endif /* INET */ 3309 #ifndef INET6 3310 if (pp->af == AF_INET6) { 3311 error = EAFNOSUPPORT; 3312 break; 3313 } 3314 #endif /* INET6 */ 3315 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 3316 pp->addr.addr.type != PF_ADDR_DYNIFTL && 3317 pp->addr.addr.type != PF_ADDR_TABLE) { 3318 error = EINVAL; 3319 break; 3320 } 3321 if (pp->addr.addr.p.dyn != NULL) { 3322 error = EINVAL; 3323 break; 3324 } 3325 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 3326 pf_pooladdr_to_kpooladdr(&pp->addr, pa); 3327 if (pa->ifname[0]) 3328 kif = pf_kkif_create(M_WAITOK); 3329 PF_RULES_WLOCK(); 3330 if (pp->ticket != V_ticket_pabuf) { 3331 PF_RULES_WUNLOCK(); 3332 if (pa->ifname[0]) 3333 pf_kkif_free(kif); 3334 free(pa, M_PFRULE); 3335 error = EBUSY; 3336 break; 3337 } 3338 if (pa->ifname[0]) { 3339 pa->kif = pfi_kkif_attach(kif, pa->ifname); 3340 kif = NULL; 3341 pfi_kkif_ref(pa->kif); 3342 } else 3343 pa->kif = NULL; 3344 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 3345 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 3346 if (pa->ifname[0]) 3347 pfi_kkif_unref(pa->kif); 3348 PF_RULES_WUNLOCK(); 3349 free(pa, M_PFRULE); 3350 break; 3351 } 3352 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 3353 PF_RULES_WUNLOCK(); 3354 break; 3355 } 3356 3357 case DIOCGETADDRS: { 3358 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3359 struct pf_kpool *pool; 3360 struct pf_kpooladdr *pa; 3361 3362 PF_RULES_RLOCK(); 3363 pp->nr = 0; 3364 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 3365 pp->r_num, 0, 1, 0); 3366 if (pool == NULL) { 3367 PF_RULES_RUNLOCK(); 3368 error = EBUSY; 3369 break; 3370 } 3371 TAILQ_FOREACH(pa, &pool->list, entries) 3372 pp->nr++; 3373 PF_RULES_RUNLOCK(); 3374 break; 3375 } 3376 3377 case DIOCGETADDR: { 3378 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3379 struct pf_kpool *pool; 3380 struct pf_kpooladdr *pa; 3381 u_int32_t nr = 0; 3382 3383 PF_RULES_RLOCK(); 3384 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 3385 pp->r_num, 0, 1, 1); 3386 if (pool == NULL) { 3387 PF_RULES_RUNLOCK(); 3388 error = EBUSY; 3389 break; 3390 } 3391 pa = TAILQ_FIRST(&pool->list); 3392 while ((pa != NULL) && (nr < pp->nr)) { 3393 pa = TAILQ_NEXT(pa, entries); 3394 nr++; 3395 } 3396 if (pa == NULL) { 3397 PF_RULES_RUNLOCK(); 3398 error = EBUSY; 3399 break; 3400 } 3401 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 3402 pf_addr_copyout(&pp->addr.addr); 3403 PF_RULES_RUNLOCK(); 3404 break; 3405 } 3406 3407 case DIOCCHANGEADDR: { 3408 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 3409 struct pf_kpool *pool; 3410 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 3411 struct pf_kruleset *ruleset; 3412 struct pfi_kkif *kif = NULL; 3413 3414 if (pca->action < PF_CHANGE_ADD_HEAD || 3415 pca->action > PF_CHANGE_REMOVE) { 3416 error = EINVAL; 3417 break; 3418 } 3419 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 3420 pca->addr.addr.type != PF_ADDR_DYNIFTL && 3421 pca->addr.addr.type != PF_ADDR_TABLE) { 3422 error = EINVAL; 3423 break; 3424 } 3425 if (pca->addr.addr.p.dyn != NULL) { 3426 error = EINVAL; 3427 break; 3428 } 3429 3430 if (pca->action != PF_CHANGE_REMOVE) { 3431 #ifndef INET 3432 if (pca->af == AF_INET) { 3433 error = EAFNOSUPPORT; 3434 break; 3435 } 3436 #endif /* INET */ 3437 #ifndef INET6 3438 if (pca->af == AF_INET6) { 3439 error = EAFNOSUPPORT; 3440 break; 3441 } 3442 #endif /* INET6 */ 3443 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 3444 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 3445 if (newpa->ifname[0]) 3446 kif = pf_kkif_create(M_WAITOK); 3447 newpa->kif = NULL; 3448 } 3449 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 3450 PF_RULES_WLOCK(); 3451 ruleset = pf_find_kruleset(pca->anchor); 3452 if (ruleset == NULL) 3453 ERROUT(EBUSY); 3454 3455 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 3456 pca->r_num, pca->r_last, 1, 1); 3457 if (pool == NULL) 3458 ERROUT(EBUSY); 3459 3460 if (pca->action != PF_CHANGE_REMOVE) { 3461 if (newpa->ifname[0]) { 3462 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 3463 pfi_kkif_ref(newpa->kif); 3464 kif = NULL; 3465 } 3466 3467 switch (newpa->addr.type) { 3468 case PF_ADDR_DYNIFTL: 3469 error = pfi_dynaddr_setup(&newpa->addr, 3470 pca->af); 3471 break; 3472 case PF_ADDR_TABLE: 3473 newpa->addr.p.tbl = pfr_attach_table(ruleset, 3474 newpa->addr.v.tblname); 3475 if (newpa->addr.p.tbl == NULL) 3476 error = ENOMEM; 3477 break; 3478 } 3479 if (error) 3480 goto DIOCCHANGEADDR_error; 3481 } 3482 3483 switch (pca->action) { 3484 case PF_CHANGE_ADD_HEAD: 3485 oldpa = TAILQ_FIRST(&pool->list); 3486 break; 3487 case PF_CHANGE_ADD_TAIL: 3488 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 3489 break; 3490 default: 3491 oldpa = TAILQ_FIRST(&pool->list); 3492 for (int i = 0; oldpa && i < pca->nr; i++) 3493 oldpa = TAILQ_NEXT(oldpa, entries); 3494 3495 if (oldpa == NULL) 3496 ERROUT(EINVAL); 3497 } 3498 3499 if (pca->action == PF_CHANGE_REMOVE) { 3500 TAILQ_REMOVE(&pool->list, oldpa, entries); 3501 switch (oldpa->addr.type) { 3502 case PF_ADDR_DYNIFTL: 3503 pfi_dynaddr_remove(oldpa->addr.p.dyn); 3504 break; 3505 case PF_ADDR_TABLE: 3506 pfr_detach_table(oldpa->addr.p.tbl); 3507 break; 3508 } 3509 if (oldpa->kif) 3510 pfi_kkif_unref(oldpa->kif); 3511 free(oldpa, M_PFRULE); 3512 } else { 3513 if (oldpa == NULL) 3514 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 3515 else if (pca->action == PF_CHANGE_ADD_HEAD || 3516 pca->action == PF_CHANGE_ADD_BEFORE) 3517 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 3518 else 3519 TAILQ_INSERT_AFTER(&pool->list, oldpa, 3520 newpa, entries); 3521 } 3522 3523 pool->cur = TAILQ_FIRST(&pool->list); 3524 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 3525 PF_RULES_WUNLOCK(); 3526 break; 3527 3528 #undef ERROUT 3529 DIOCCHANGEADDR_error: 3530 if (newpa != NULL) { 3531 if (newpa->kif) 3532 pfi_kkif_unref(newpa->kif); 3533 free(newpa, M_PFRULE); 3534 } 3535 PF_RULES_WUNLOCK(); 3536 pf_kkif_free(kif); 3537 break; 3538 } 3539 3540 case DIOCGETRULESETS: { 3541 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3542 struct pf_kruleset *ruleset; 3543 struct pf_kanchor *anchor; 3544 3545 PF_RULES_RLOCK(); 3546 pr->path[sizeof(pr->path) - 1] = 0; 3547 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 3548 PF_RULES_RUNLOCK(); 3549 error = ENOENT; 3550 break; 3551 } 3552 pr->nr = 0; 3553 if (ruleset->anchor == NULL) { 3554 /* XXX kludge for pf_main_ruleset */ 3555 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 3556 if (anchor->parent == NULL) 3557 pr->nr++; 3558 } else { 3559 RB_FOREACH(anchor, pf_kanchor_node, 3560 &ruleset->anchor->children) 3561 pr->nr++; 3562 } 3563 PF_RULES_RUNLOCK(); 3564 break; 3565 } 3566 3567 case DIOCGETRULESET: { 3568 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 3569 struct pf_kruleset *ruleset; 3570 struct pf_kanchor *anchor; 3571 u_int32_t nr = 0; 3572 3573 PF_RULES_RLOCK(); 3574 pr->path[sizeof(pr->path) - 1] = 0; 3575 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 3576 PF_RULES_RUNLOCK(); 3577 error = ENOENT; 3578 break; 3579 } 3580 pr->name[0] = 0; 3581 if (ruleset->anchor == NULL) { 3582 /* XXX kludge for pf_main_ruleset */ 3583 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 3584 if (anchor->parent == NULL && nr++ == pr->nr) { 3585 strlcpy(pr->name, anchor->name, 3586 sizeof(pr->name)); 3587 break; 3588 } 3589 } else { 3590 RB_FOREACH(anchor, pf_kanchor_node, 3591 &ruleset->anchor->children) 3592 if (nr++ == pr->nr) { 3593 strlcpy(pr->name, anchor->name, 3594 sizeof(pr->name)); 3595 break; 3596 } 3597 } 3598 if (!pr->name[0]) 3599 error = EBUSY; 3600 PF_RULES_RUNLOCK(); 3601 break; 3602 } 3603 3604 case DIOCRCLRTABLES: { 3605 struct pfioc_table *io = (struct pfioc_table *)addr; 3606 3607 if (io->pfrio_esize != 0) { 3608 error = ENODEV; 3609 break; 3610 } 3611 PF_RULES_WLOCK(); 3612 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 3613 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3614 PF_RULES_WUNLOCK(); 3615 break; 3616 } 3617 3618 case DIOCRADDTABLES: { 3619 struct pfioc_table *io = (struct pfioc_table *)addr; 3620 struct pfr_table *pfrts; 3621 size_t totlen; 3622 3623 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3624 error = ENODEV; 3625 break; 3626 } 3627 3628 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 3629 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 3630 error = ENOMEM; 3631 break; 3632 } 3633 3634 totlen = io->pfrio_size * sizeof(struct pfr_table); 3635 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3636 M_TEMP, M_WAITOK); 3637 error = copyin(io->pfrio_buffer, pfrts, totlen); 3638 if (error) { 3639 free(pfrts, M_TEMP); 3640 break; 3641 } 3642 PF_RULES_WLOCK(); 3643 error = pfr_add_tables(pfrts, io->pfrio_size, 3644 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3645 PF_RULES_WUNLOCK(); 3646 free(pfrts, M_TEMP); 3647 break; 3648 } 3649 3650 case DIOCRDELTABLES: { 3651 struct pfioc_table *io = (struct pfioc_table *)addr; 3652 struct pfr_table *pfrts; 3653 size_t totlen; 3654 3655 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3656 error = ENODEV; 3657 break; 3658 } 3659 3660 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 3661 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 3662 error = ENOMEM; 3663 break; 3664 } 3665 3666 totlen = io->pfrio_size * sizeof(struct pfr_table); 3667 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3668 M_TEMP, M_WAITOK); 3669 error = copyin(io->pfrio_buffer, pfrts, totlen); 3670 if (error) { 3671 free(pfrts, M_TEMP); 3672 break; 3673 } 3674 PF_RULES_WLOCK(); 3675 error = pfr_del_tables(pfrts, io->pfrio_size, 3676 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3677 PF_RULES_WUNLOCK(); 3678 free(pfrts, M_TEMP); 3679 break; 3680 } 3681 3682 case DIOCRGETTABLES: { 3683 struct pfioc_table *io = (struct pfioc_table *)addr; 3684 struct pfr_table *pfrts; 3685 size_t totlen; 3686 int n; 3687 3688 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3689 error = ENODEV; 3690 break; 3691 } 3692 PF_RULES_RLOCK(); 3693 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 3694 if (n < 0) { 3695 PF_RULES_RUNLOCK(); 3696 error = EINVAL; 3697 break; 3698 } 3699 io->pfrio_size = min(io->pfrio_size, n); 3700 3701 totlen = io->pfrio_size * sizeof(struct pfr_table); 3702 3703 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3704 M_TEMP, M_NOWAIT); 3705 if (pfrts == NULL) { 3706 error = ENOMEM; 3707 PF_RULES_RUNLOCK(); 3708 break; 3709 } 3710 error = pfr_get_tables(&io->pfrio_table, pfrts, 3711 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3712 PF_RULES_RUNLOCK(); 3713 if (error == 0) 3714 error = copyout(pfrts, io->pfrio_buffer, totlen); 3715 free(pfrts, M_TEMP); 3716 break; 3717 } 3718 3719 case DIOCRGETTSTATS: { 3720 struct pfioc_table *io = (struct pfioc_table *)addr; 3721 struct pfr_tstats *pfrtstats; 3722 size_t totlen; 3723 int n; 3724 3725 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 3726 error = ENODEV; 3727 break; 3728 } 3729 PF_RULES_WLOCK(); 3730 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 3731 if (n < 0) { 3732 PF_RULES_WUNLOCK(); 3733 error = EINVAL; 3734 break; 3735 } 3736 io->pfrio_size = min(io->pfrio_size, n); 3737 3738 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 3739 pfrtstats = mallocarray(io->pfrio_size, 3740 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT); 3741 if (pfrtstats == NULL) { 3742 error = ENOMEM; 3743 PF_RULES_WUNLOCK(); 3744 break; 3745 } 3746 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 3747 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3748 PF_RULES_WUNLOCK(); 3749 if (error == 0) 3750 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 3751 free(pfrtstats, M_TEMP); 3752 break; 3753 } 3754 3755 case DIOCRCLRTSTATS: { 3756 struct pfioc_table *io = (struct pfioc_table *)addr; 3757 struct pfr_table *pfrts; 3758 size_t totlen; 3759 3760 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3761 error = ENODEV; 3762 break; 3763 } 3764 3765 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 3766 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 3767 /* We used to count tables and use the minimum required 3768 * size, so we didn't fail on overly large requests. 3769 * Keep doing so. */ 3770 io->pfrio_size = pf_ioctl_maxcount; 3771 break; 3772 } 3773 3774 totlen = io->pfrio_size * sizeof(struct pfr_table); 3775 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3776 M_TEMP, M_NOWAIT); 3777 if (pfrts == NULL) { 3778 error = ENOMEM; 3779 break; 3780 } 3781 error = copyin(io->pfrio_buffer, pfrts, totlen); 3782 if (error) { 3783 free(pfrts, M_TEMP); 3784 break; 3785 } 3786 3787 PF_RULES_WLOCK(); 3788 error = pfr_clr_tstats(pfrts, io->pfrio_size, 3789 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3790 PF_RULES_WUNLOCK(); 3791 free(pfrts, M_TEMP); 3792 break; 3793 } 3794 3795 case DIOCRSETTFLAGS: { 3796 struct pfioc_table *io = (struct pfioc_table *)addr; 3797 struct pfr_table *pfrts; 3798 size_t totlen; 3799 int n; 3800 3801 if (io->pfrio_esize != sizeof(struct pfr_table)) { 3802 error = ENODEV; 3803 break; 3804 } 3805 3806 PF_RULES_RLOCK(); 3807 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 3808 if (n < 0) { 3809 PF_RULES_RUNLOCK(); 3810 error = EINVAL; 3811 break; 3812 } 3813 3814 io->pfrio_size = min(io->pfrio_size, n); 3815 PF_RULES_RUNLOCK(); 3816 3817 totlen = io->pfrio_size * sizeof(struct pfr_table); 3818 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 3819 M_TEMP, M_WAITOK); 3820 error = copyin(io->pfrio_buffer, pfrts, totlen); 3821 if (error) { 3822 free(pfrts, M_TEMP); 3823 break; 3824 } 3825 PF_RULES_WLOCK(); 3826 error = pfr_set_tflags(pfrts, io->pfrio_size, 3827 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 3828 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3829 PF_RULES_WUNLOCK(); 3830 free(pfrts, M_TEMP); 3831 break; 3832 } 3833 3834 case DIOCRCLRADDRS: { 3835 struct pfioc_table *io = (struct pfioc_table *)addr; 3836 3837 if (io->pfrio_esize != 0) { 3838 error = ENODEV; 3839 break; 3840 } 3841 PF_RULES_WLOCK(); 3842 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 3843 io->pfrio_flags | PFR_FLAG_USERIOCTL); 3844 PF_RULES_WUNLOCK(); 3845 break; 3846 } 3847 3848 case DIOCRADDADDRS: { 3849 struct pfioc_table *io = (struct pfioc_table *)addr; 3850 struct pfr_addr *pfras; 3851 size_t totlen; 3852 3853 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3854 error = ENODEV; 3855 break; 3856 } 3857 if (io->pfrio_size < 0 || 3858 io->pfrio_size > pf_ioctl_maxcount || 3859 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 3860 error = EINVAL; 3861 break; 3862 } 3863 totlen = io->pfrio_size * sizeof(struct pfr_addr); 3864 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 3865 M_TEMP, M_NOWAIT); 3866 if (! pfras) { 3867 error = ENOMEM; 3868 break; 3869 } 3870 error = copyin(io->pfrio_buffer, pfras, totlen); 3871 if (error) { 3872 free(pfras, M_TEMP); 3873 break; 3874 } 3875 PF_RULES_WLOCK(); 3876 error = pfr_add_addrs(&io->pfrio_table, pfras, 3877 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 3878 PFR_FLAG_USERIOCTL); 3879 PF_RULES_WUNLOCK(); 3880 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 3881 error = copyout(pfras, io->pfrio_buffer, totlen); 3882 free(pfras, M_TEMP); 3883 break; 3884 } 3885 3886 case DIOCRDELADDRS: { 3887 struct pfioc_table *io = (struct pfioc_table *)addr; 3888 struct pfr_addr *pfras; 3889 size_t totlen; 3890 3891 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3892 error = ENODEV; 3893 break; 3894 } 3895 if (io->pfrio_size < 0 || 3896 io->pfrio_size > pf_ioctl_maxcount || 3897 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 3898 error = EINVAL; 3899 break; 3900 } 3901 totlen = io->pfrio_size * sizeof(struct pfr_addr); 3902 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 3903 M_TEMP, M_NOWAIT); 3904 if (! pfras) { 3905 error = ENOMEM; 3906 break; 3907 } 3908 error = copyin(io->pfrio_buffer, pfras, totlen); 3909 if (error) { 3910 free(pfras, M_TEMP); 3911 break; 3912 } 3913 PF_RULES_WLOCK(); 3914 error = pfr_del_addrs(&io->pfrio_table, pfras, 3915 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 3916 PFR_FLAG_USERIOCTL); 3917 PF_RULES_WUNLOCK(); 3918 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 3919 error = copyout(pfras, io->pfrio_buffer, totlen); 3920 free(pfras, M_TEMP); 3921 break; 3922 } 3923 3924 case DIOCRSETADDRS: { 3925 struct pfioc_table *io = (struct pfioc_table *)addr; 3926 struct pfr_addr *pfras; 3927 size_t totlen, count; 3928 3929 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3930 error = ENODEV; 3931 break; 3932 } 3933 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 3934 error = EINVAL; 3935 break; 3936 } 3937 count = max(io->pfrio_size, io->pfrio_size2); 3938 if (count > pf_ioctl_maxcount || 3939 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 3940 error = EINVAL; 3941 break; 3942 } 3943 totlen = count * sizeof(struct pfr_addr); 3944 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 3945 M_NOWAIT); 3946 if (! pfras) { 3947 error = ENOMEM; 3948 break; 3949 } 3950 error = copyin(io->pfrio_buffer, pfras, totlen); 3951 if (error) { 3952 free(pfras, M_TEMP); 3953 break; 3954 } 3955 PF_RULES_WLOCK(); 3956 error = pfr_set_addrs(&io->pfrio_table, pfras, 3957 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 3958 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 3959 PFR_FLAG_USERIOCTL, 0); 3960 PF_RULES_WUNLOCK(); 3961 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 3962 error = copyout(pfras, io->pfrio_buffer, totlen); 3963 free(pfras, M_TEMP); 3964 break; 3965 } 3966 3967 case DIOCRGETADDRS: { 3968 struct pfioc_table *io = (struct pfioc_table *)addr; 3969 struct pfr_addr *pfras; 3970 size_t totlen; 3971 3972 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 3973 error = ENODEV; 3974 break; 3975 } 3976 if (io->pfrio_size < 0 || 3977 io->pfrio_size > pf_ioctl_maxcount || 3978 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 3979 error = EINVAL; 3980 break; 3981 } 3982 totlen = io->pfrio_size * sizeof(struct pfr_addr); 3983 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 3984 M_TEMP, M_NOWAIT); 3985 if (! pfras) { 3986 error = ENOMEM; 3987 break; 3988 } 3989 PF_RULES_RLOCK(); 3990 error = pfr_get_addrs(&io->pfrio_table, pfras, 3991 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 3992 PF_RULES_RUNLOCK(); 3993 if (error == 0) 3994 error = copyout(pfras, io->pfrio_buffer, totlen); 3995 free(pfras, M_TEMP); 3996 break; 3997 } 3998 3999 case DIOCRGETASTATS: { 4000 struct pfioc_table *io = (struct pfioc_table *)addr; 4001 struct pfr_astats *pfrastats; 4002 size_t totlen; 4003 4004 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4005 error = ENODEV; 4006 break; 4007 } 4008 if (io->pfrio_size < 0 || 4009 io->pfrio_size > pf_ioctl_maxcount || 4010 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4011 error = EINVAL; 4012 break; 4013 } 4014 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4015 pfrastats = mallocarray(io->pfrio_size, 4016 sizeof(struct pfr_astats), M_TEMP, M_NOWAIT); 4017 if (! pfrastats) { 4018 error = ENOMEM; 4019 break; 4020 } 4021 PF_RULES_RLOCK(); 4022 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4023 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4024 PF_RULES_RUNLOCK(); 4025 if (error == 0) 4026 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4027 free(pfrastats, M_TEMP); 4028 break; 4029 } 4030 4031 case DIOCRCLRASTATS: { 4032 struct pfioc_table *io = (struct pfioc_table *)addr; 4033 struct pfr_addr *pfras; 4034 size_t totlen; 4035 4036 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4037 error = ENODEV; 4038 break; 4039 } 4040 if (io->pfrio_size < 0 || 4041 io->pfrio_size > pf_ioctl_maxcount || 4042 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4043 error = EINVAL; 4044 break; 4045 } 4046 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4047 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4048 M_TEMP, M_NOWAIT); 4049 if (! pfras) { 4050 error = ENOMEM; 4051 break; 4052 } 4053 error = copyin(io->pfrio_buffer, pfras, totlen); 4054 if (error) { 4055 free(pfras, M_TEMP); 4056 break; 4057 } 4058 PF_RULES_WLOCK(); 4059 error = pfr_clr_astats(&io->pfrio_table, pfras, 4060 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4061 PFR_FLAG_USERIOCTL); 4062 PF_RULES_WUNLOCK(); 4063 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4064 error = copyout(pfras, io->pfrio_buffer, totlen); 4065 free(pfras, M_TEMP); 4066 break; 4067 } 4068 4069 case DIOCRTSTADDRS: { 4070 struct pfioc_table *io = (struct pfioc_table *)addr; 4071 struct pfr_addr *pfras; 4072 size_t totlen; 4073 4074 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4075 error = ENODEV; 4076 break; 4077 } 4078 if (io->pfrio_size < 0 || 4079 io->pfrio_size > pf_ioctl_maxcount || 4080 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4081 error = EINVAL; 4082 break; 4083 } 4084 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4085 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4086 M_TEMP, M_NOWAIT); 4087 if (! pfras) { 4088 error = ENOMEM; 4089 break; 4090 } 4091 error = copyin(io->pfrio_buffer, pfras, totlen); 4092 if (error) { 4093 free(pfras, M_TEMP); 4094 break; 4095 } 4096 PF_RULES_RLOCK(); 4097 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4098 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4099 PFR_FLAG_USERIOCTL); 4100 PF_RULES_RUNLOCK(); 4101 if (error == 0) 4102 error = copyout(pfras, io->pfrio_buffer, totlen); 4103 free(pfras, M_TEMP); 4104 break; 4105 } 4106 4107 case DIOCRINADEFINE: { 4108 struct pfioc_table *io = (struct pfioc_table *)addr; 4109 struct pfr_addr *pfras; 4110 size_t totlen; 4111 4112 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4113 error = ENODEV; 4114 break; 4115 } 4116 if (io->pfrio_size < 0 || 4117 io->pfrio_size > pf_ioctl_maxcount || 4118 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4119 error = EINVAL; 4120 break; 4121 } 4122 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4123 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4124 M_TEMP, M_NOWAIT); 4125 if (! pfras) { 4126 error = ENOMEM; 4127 break; 4128 } 4129 error = copyin(io->pfrio_buffer, pfras, totlen); 4130 if (error) { 4131 free(pfras, M_TEMP); 4132 break; 4133 } 4134 PF_RULES_WLOCK(); 4135 error = pfr_ina_define(&io->pfrio_table, pfras, 4136 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 4137 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4138 PF_RULES_WUNLOCK(); 4139 free(pfras, M_TEMP); 4140 break; 4141 } 4142 4143 case DIOCOSFPADD: { 4144 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4145 PF_RULES_WLOCK(); 4146 error = pf_osfp_add(io); 4147 PF_RULES_WUNLOCK(); 4148 break; 4149 } 4150 4151 case DIOCOSFPGET: { 4152 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4153 PF_RULES_RLOCK(); 4154 error = pf_osfp_get(io); 4155 PF_RULES_RUNLOCK(); 4156 break; 4157 } 4158 4159 case DIOCXBEGIN: { 4160 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4161 struct pfioc_trans_e *ioes, *ioe; 4162 size_t totlen; 4163 int i; 4164 4165 if (io->esize != sizeof(*ioe)) { 4166 error = ENODEV; 4167 break; 4168 } 4169 if (io->size < 0 || 4170 io->size > pf_ioctl_maxcount || 4171 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4172 error = EINVAL; 4173 break; 4174 } 4175 totlen = sizeof(struct pfioc_trans_e) * io->size; 4176 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4177 M_TEMP, M_NOWAIT); 4178 if (! ioes) { 4179 error = ENOMEM; 4180 break; 4181 } 4182 error = copyin(io->array, ioes, totlen); 4183 if (error) { 4184 free(ioes, M_TEMP); 4185 break; 4186 } 4187 PF_RULES_WLOCK(); 4188 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4189 switch (ioe->rs_num) { 4190 #ifdef ALTQ 4191 case PF_RULESET_ALTQ: 4192 if (ioe->anchor[0]) { 4193 PF_RULES_WUNLOCK(); 4194 free(ioes, M_TEMP); 4195 error = EINVAL; 4196 goto fail; 4197 } 4198 if ((error = pf_begin_altq(&ioe->ticket))) { 4199 PF_RULES_WUNLOCK(); 4200 free(ioes, M_TEMP); 4201 goto fail; 4202 } 4203 break; 4204 #endif /* ALTQ */ 4205 case PF_RULESET_TABLE: 4206 { 4207 struct pfr_table table; 4208 4209 bzero(&table, sizeof(table)); 4210 strlcpy(table.pfrt_anchor, ioe->anchor, 4211 sizeof(table.pfrt_anchor)); 4212 if ((error = pfr_ina_begin(&table, 4213 &ioe->ticket, NULL, 0))) { 4214 PF_RULES_WUNLOCK(); 4215 free(ioes, M_TEMP); 4216 goto fail; 4217 } 4218 break; 4219 } 4220 default: 4221 if ((error = pf_begin_rules(&ioe->ticket, 4222 ioe->rs_num, ioe->anchor))) { 4223 PF_RULES_WUNLOCK(); 4224 free(ioes, M_TEMP); 4225 goto fail; 4226 } 4227 break; 4228 } 4229 } 4230 PF_RULES_WUNLOCK(); 4231 error = copyout(ioes, io->array, totlen); 4232 free(ioes, M_TEMP); 4233 break; 4234 } 4235 4236 case DIOCXROLLBACK: { 4237 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4238 struct pfioc_trans_e *ioe, *ioes; 4239 size_t totlen; 4240 int i; 4241 4242 if (io->esize != sizeof(*ioe)) { 4243 error = ENODEV; 4244 break; 4245 } 4246 if (io->size < 0 || 4247 io->size > pf_ioctl_maxcount || 4248 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4249 error = EINVAL; 4250 break; 4251 } 4252 totlen = sizeof(struct pfioc_trans_e) * io->size; 4253 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4254 M_TEMP, M_NOWAIT); 4255 if (! ioes) { 4256 error = ENOMEM; 4257 break; 4258 } 4259 error = copyin(io->array, ioes, totlen); 4260 if (error) { 4261 free(ioes, M_TEMP); 4262 break; 4263 } 4264 PF_RULES_WLOCK(); 4265 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4266 switch (ioe->rs_num) { 4267 #ifdef ALTQ 4268 case PF_RULESET_ALTQ: 4269 if (ioe->anchor[0]) { 4270 PF_RULES_WUNLOCK(); 4271 free(ioes, M_TEMP); 4272 error = EINVAL; 4273 goto fail; 4274 } 4275 if ((error = pf_rollback_altq(ioe->ticket))) { 4276 PF_RULES_WUNLOCK(); 4277 free(ioes, M_TEMP); 4278 goto fail; /* really bad */ 4279 } 4280 break; 4281 #endif /* ALTQ */ 4282 case PF_RULESET_TABLE: 4283 { 4284 struct pfr_table table; 4285 4286 bzero(&table, sizeof(table)); 4287 strlcpy(table.pfrt_anchor, ioe->anchor, 4288 sizeof(table.pfrt_anchor)); 4289 if ((error = pfr_ina_rollback(&table, 4290 ioe->ticket, NULL, 0))) { 4291 PF_RULES_WUNLOCK(); 4292 free(ioes, M_TEMP); 4293 goto fail; /* really bad */ 4294 } 4295 break; 4296 } 4297 default: 4298 if ((error = pf_rollback_rules(ioe->ticket, 4299 ioe->rs_num, ioe->anchor))) { 4300 PF_RULES_WUNLOCK(); 4301 free(ioes, M_TEMP); 4302 goto fail; /* really bad */ 4303 } 4304 break; 4305 } 4306 } 4307 PF_RULES_WUNLOCK(); 4308 free(ioes, M_TEMP); 4309 break; 4310 } 4311 4312 case DIOCXCOMMIT: { 4313 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4314 struct pfioc_trans_e *ioe, *ioes; 4315 struct pf_kruleset *rs; 4316 size_t totlen; 4317 int i; 4318 4319 if (io->esize != sizeof(*ioe)) { 4320 error = ENODEV; 4321 break; 4322 } 4323 4324 if (io->size < 0 || 4325 io->size > pf_ioctl_maxcount || 4326 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4327 error = EINVAL; 4328 break; 4329 } 4330 4331 totlen = sizeof(struct pfioc_trans_e) * io->size; 4332 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4333 M_TEMP, M_NOWAIT); 4334 if (ioes == NULL) { 4335 error = ENOMEM; 4336 break; 4337 } 4338 error = copyin(io->array, ioes, totlen); 4339 if (error) { 4340 free(ioes, M_TEMP); 4341 break; 4342 } 4343 PF_RULES_WLOCK(); 4344 /* First makes sure everything will succeed. */ 4345 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4346 switch (ioe->rs_num) { 4347 #ifdef ALTQ 4348 case PF_RULESET_ALTQ: 4349 if (ioe->anchor[0]) { 4350 PF_RULES_WUNLOCK(); 4351 free(ioes, M_TEMP); 4352 error = EINVAL; 4353 goto fail; 4354 } 4355 if (!V_altqs_inactive_open || ioe->ticket != 4356 V_ticket_altqs_inactive) { 4357 PF_RULES_WUNLOCK(); 4358 free(ioes, M_TEMP); 4359 error = EBUSY; 4360 goto fail; 4361 } 4362 break; 4363 #endif /* ALTQ */ 4364 case PF_RULESET_TABLE: 4365 rs = pf_find_kruleset(ioe->anchor); 4366 if (rs == NULL || !rs->topen || ioe->ticket != 4367 rs->tticket) { 4368 PF_RULES_WUNLOCK(); 4369 free(ioes, M_TEMP); 4370 error = EBUSY; 4371 goto fail; 4372 } 4373 break; 4374 default: 4375 if (ioe->rs_num < 0 || ioe->rs_num >= 4376 PF_RULESET_MAX) { 4377 PF_RULES_WUNLOCK(); 4378 free(ioes, M_TEMP); 4379 error = EINVAL; 4380 goto fail; 4381 } 4382 rs = pf_find_kruleset(ioe->anchor); 4383 if (rs == NULL || 4384 !rs->rules[ioe->rs_num].inactive.open || 4385 rs->rules[ioe->rs_num].inactive.ticket != 4386 ioe->ticket) { 4387 PF_RULES_WUNLOCK(); 4388 free(ioes, M_TEMP); 4389 error = EBUSY; 4390 goto fail; 4391 } 4392 break; 4393 } 4394 } 4395 /* Now do the commit - no errors should happen here. */ 4396 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4397 switch (ioe->rs_num) { 4398 #ifdef ALTQ 4399 case PF_RULESET_ALTQ: 4400 if ((error = pf_commit_altq(ioe->ticket))) { 4401 PF_RULES_WUNLOCK(); 4402 free(ioes, M_TEMP); 4403 goto fail; /* really bad */ 4404 } 4405 break; 4406 #endif /* ALTQ */ 4407 case PF_RULESET_TABLE: 4408 { 4409 struct pfr_table table; 4410 4411 bzero(&table, sizeof(table)); 4412 strlcpy(table.pfrt_anchor, ioe->anchor, 4413 sizeof(table.pfrt_anchor)); 4414 if ((error = pfr_ina_commit(&table, 4415 ioe->ticket, NULL, NULL, 0))) { 4416 PF_RULES_WUNLOCK(); 4417 free(ioes, M_TEMP); 4418 goto fail; /* really bad */ 4419 } 4420 break; 4421 } 4422 default: 4423 if ((error = pf_commit_rules(ioe->ticket, 4424 ioe->rs_num, ioe->anchor))) { 4425 PF_RULES_WUNLOCK(); 4426 free(ioes, M_TEMP); 4427 goto fail; /* really bad */ 4428 } 4429 break; 4430 } 4431 } 4432 PF_RULES_WUNLOCK(); 4433 free(ioes, M_TEMP); 4434 break; 4435 } 4436 4437 case DIOCGETSRCNODES: { 4438 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 4439 struct pf_srchash *sh; 4440 struct pf_ksrc_node *n; 4441 struct pf_src_node *p, *pstore; 4442 uint32_t i, nr = 0; 4443 4444 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 4445 i++, sh++) { 4446 PF_HASHROW_LOCK(sh); 4447 LIST_FOREACH(n, &sh->nodes, entry) 4448 nr++; 4449 PF_HASHROW_UNLOCK(sh); 4450 } 4451 4452 psn->psn_len = min(psn->psn_len, 4453 sizeof(struct pf_src_node) * nr); 4454 4455 if (psn->psn_len == 0) { 4456 psn->psn_len = sizeof(struct pf_src_node) * nr; 4457 break; 4458 } 4459 4460 nr = 0; 4461 4462 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 4463 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 4464 i++, sh++) { 4465 PF_HASHROW_LOCK(sh); 4466 LIST_FOREACH(n, &sh->nodes, entry) { 4467 4468 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 4469 break; 4470 4471 pf_src_node_copy(n, p); 4472 4473 p++; 4474 nr++; 4475 } 4476 PF_HASHROW_UNLOCK(sh); 4477 } 4478 error = copyout(pstore, psn->psn_src_nodes, 4479 sizeof(struct pf_src_node) * nr); 4480 if (error) { 4481 free(pstore, M_TEMP); 4482 break; 4483 } 4484 psn->psn_len = sizeof(struct pf_src_node) * nr; 4485 free(pstore, M_TEMP); 4486 break; 4487 } 4488 4489 case DIOCCLRSRCNODES: { 4490 pf_clear_srcnodes(NULL); 4491 pf_purge_expired_src_nodes(); 4492 break; 4493 } 4494 4495 case DIOCKILLSRCNODES: 4496 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 4497 break; 4498 4499 case DIOCKEEPCOUNTERS: 4500 error = pf_keepcounters((struct pfioc_nv *)addr); 4501 break; 4502 4503 case DIOCSETHOSTID: { 4504 u_int32_t *hostid = (u_int32_t *)addr; 4505 4506 PF_RULES_WLOCK(); 4507 if (*hostid == 0) 4508 V_pf_status.hostid = arc4random(); 4509 else 4510 V_pf_status.hostid = *hostid; 4511 PF_RULES_WUNLOCK(); 4512 break; 4513 } 4514 4515 case DIOCOSFPFLUSH: 4516 PF_RULES_WLOCK(); 4517 pf_osfp_flush(); 4518 PF_RULES_WUNLOCK(); 4519 break; 4520 4521 case DIOCIGETIFACES: { 4522 struct pfioc_iface *io = (struct pfioc_iface *)addr; 4523 struct pfi_kif *ifstore; 4524 size_t bufsiz; 4525 4526 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 4527 error = ENODEV; 4528 break; 4529 } 4530 4531 if (io->pfiio_size < 0 || 4532 io->pfiio_size > pf_ioctl_maxcount || 4533 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 4534 error = EINVAL; 4535 break; 4536 } 4537 4538 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 4539 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 4540 M_TEMP, M_NOWAIT); 4541 if (ifstore == NULL) { 4542 error = ENOMEM; 4543 break; 4544 } 4545 4546 PF_RULES_RLOCK(); 4547 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 4548 PF_RULES_RUNLOCK(); 4549 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 4550 free(ifstore, M_TEMP); 4551 break; 4552 } 4553 4554 case DIOCSETIFFLAG: { 4555 struct pfioc_iface *io = (struct pfioc_iface *)addr; 4556 4557 PF_RULES_WLOCK(); 4558 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 4559 PF_RULES_WUNLOCK(); 4560 break; 4561 } 4562 4563 case DIOCCLRIFFLAG: { 4564 struct pfioc_iface *io = (struct pfioc_iface *)addr; 4565 4566 PF_RULES_WLOCK(); 4567 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 4568 PF_RULES_WUNLOCK(); 4569 break; 4570 } 4571 4572 default: 4573 error = ENODEV; 4574 break; 4575 } 4576 fail: 4577 if (sx_xlocked(&pf_ioctl_lock)) 4578 sx_xunlock(&pf_ioctl_lock); 4579 CURVNET_RESTORE(); 4580 4581 #undef ERROUT_IOCTL 4582 4583 return (error); 4584 } 4585 4586 void 4587 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 4588 { 4589 bzero(sp, sizeof(struct pfsync_state)); 4590 4591 /* copy from state key */ 4592 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 4593 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 4594 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 4595 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 4596 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 4597 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 4598 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 4599 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 4600 sp->proto = st->key[PF_SK_WIRE]->proto; 4601 sp->af = st->key[PF_SK_WIRE]->af; 4602 4603 /* copy from state */ 4604 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 4605 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 4606 sp->creation = htonl(time_uptime - st->creation); 4607 sp->expire = pf_state_expires(st); 4608 if (sp->expire <= time_uptime) 4609 sp->expire = htonl(0); 4610 else 4611 sp->expire = htonl(sp->expire - time_uptime); 4612 4613 sp->direction = st->direction; 4614 sp->log = st->log; 4615 sp->timeout = st->timeout; 4616 sp->state_flags = st->state_flags; 4617 if (st->src_node) 4618 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 4619 if (st->nat_src_node) 4620 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 4621 4622 sp->id = st->id; 4623 sp->creatorid = st->creatorid; 4624 pf_state_peer_hton(&st->src, &sp->src); 4625 pf_state_peer_hton(&st->dst, &sp->dst); 4626 4627 if (st->rule.ptr == NULL) 4628 sp->rule = htonl(-1); 4629 else 4630 sp->rule = htonl(st->rule.ptr->nr); 4631 if (st->anchor.ptr == NULL) 4632 sp->anchor = htonl(-1); 4633 else 4634 sp->anchor = htonl(st->anchor.ptr->nr); 4635 if (st->nat_rule.ptr == NULL) 4636 sp->nat_rule = htonl(-1); 4637 else 4638 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 4639 4640 pf_state_counter_hton(counter_u64_fetch(st->packets[0]), 4641 sp->packets[0]); 4642 pf_state_counter_hton(counter_u64_fetch(st->packets[1]), 4643 sp->packets[1]); 4644 pf_state_counter_hton(counter_u64_fetch(st->bytes[0]), sp->bytes[0]); 4645 pf_state_counter_hton(counter_u64_fetch(st->bytes[1]), sp->bytes[1]); 4646 4647 } 4648 4649 static void 4650 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 4651 { 4652 struct pfr_ktable *kt; 4653 4654 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 4655 4656 kt = aw->p.tbl; 4657 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 4658 kt = kt->pfrkt_root; 4659 aw->p.tbl = NULL; 4660 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 4661 kt->pfrkt_cnt : -1; 4662 } 4663 4664 /* 4665 * XXX - Check for version missmatch!!! 4666 */ 4667 static void 4668 pf_clear_all_states(void) 4669 { 4670 struct pf_state *s; 4671 u_int i; 4672 4673 for (i = 0; i <= pf_hashmask; i++) { 4674 struct pf_idhash *ih = &V_pf_idhash[i]; 4675 relock: 4676 PF_HASHROW_LOCK(ih); 4677 LIST_FOREACH(s, &ih->states, entry) { 4678 s->timeout = PFTM_PURGE; 4679 /* Don't send out individual delete messages. */ 4680 s->state_flags |= PFSTATE_NOSYNC; 4681 pf_unlink_state(s, PF_ENTER_LOCKED); 4682 goto relock; 4683 } 4684 PF_HASHROW_UNLOCK(ih); 4685 } 4686 } 4687 4688 static int 4689 pf_clear_tables(void) 4690 { 4691 struct pfioc_table io; 4692 int error; 4693 4694 bzero(&io, sizeof(io)); 4695 4696 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 4697 io.pfrio_flags); 4698 4699 return (error); 4700 } 4701 4702 static void 4703 pf_clear_srcnodes(struct pf_ksrc_node *n) 4704 { 4705 struct pf_state *s; 4706 int i; 4707 4708 for (i = 0; i <= pf_hashmask; i++) { 4709 struct pf_idhash *ih = &V_pf_idhash[i]; 4710 4711 PF_HASHROW_LOCK(ih); 4712 LIST_FOREACH(s, &ih->states, entry) { 4713 if (n == NULL || n == s->src_node) 4714 s->src_node = NULL; 4715 if (n == NULL || n == s->nat_src_node) 4716 s->nat_src_node = NULL; 4717 } 4718 PF_HASHROW_UNLOCK(ih); 4719 } 4720 4721 if (n == NULL) { 4722 struct pf_srchash *sh; 4723 4724 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 4725 i++, sh++) { 4726 PF_HASHROW_LOCK(sh); 4727 LIST_FOREACH(n, &sh->nodes, entry) { 4728 n->expire = 1; 4729 n->states = 0; 4730 } 4731 PF_HASHROW_UNLOCK(sh); 4732 } 4733 } else { 4734 /* XXX: hash slot should already be locked here. */ 4735 n->expire = 1; 4736 n->states = 0; 4737 } 4738 } 4739 4740 static void 4741 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 4742 { 4743 struct pf_ksrc_node_list kill; 4744 4745 LIST_INIT(&kill); 4746 for (int i = 0; i <= pf_srchashmask; i++) { 4747 struct pf_srchash *sh = &V_pf_srchash[i]; 4748 struct pf_ksrc_node *sn, *tmp; 4749 4750 PF_HASHROW_LOCK(sh); 4751 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 4752 if (PF_MATCHA(psnk->psnk_src.neg, 4753 &psnk->psnk_src.addr.v.a.addr, 4754 &psnk->psnk_src.addr.v.a.mask, 4755 &sn->addr, sn->af) && 4756 PF_MATCHA(psnk->psnk_dst.neg, 4757 &psnk->psnk_dst.addr.v.a.addr, 4758 &psnk->psnk_dst.addr.v.a.mask, 4759 &sn->raddr, sn->af)) { 4760 pf_unlink_src_node(sn); 4761 LIST_INSERT_HEAD(&kill, sn, entry); 4762 sn->expire = 1; 4763 } 4764 PF_HASHROW_UNLOCK(sh); 4765 } 4766 4767 for (int i = 0; i <= pf_hashmask; i++) { 4768 struct pf_idhash *ih = &V_pf_idhash[i]; 4769 struct pf_state *s; 4770 4771 PF_HASHROW_LOCK(ih); 4772 LIST_FOREACH(s, &ih->states, entry) { 4773 if (s->src_node && s->src_node->expire == 1) 4774 s->src_node = NULL; 4775 if (s->nat_src_node && s->nat_src_node->expire == 1) 4776 s->nat_src_node = NULL; 4777 } 4778 PF_HASHROW_UNLOCK(ih); 4779 } 4780 4781 psnk->psnk_killed = pf_free_src_nodes(&kill); 4782 } 4783 4784 static int 4785 pf_keepcounters(struct pfioc_nv *nv) 4786 { 4787 nvlist_t *nvl = NULL; 4788 void *nvlpacked = NULL; 4789 int error = 0; 4790 4791 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 4792 4793 if (nv->len > pf_ioctl_maxcount) 4794 ERROUT(ENOMEM); 4795 4796 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 4797 if (nvlpacked == NULL) 4798 ERROUT(ENOMEM); 4799 4800 error = copyin(nv->data, nvlpacked, nv->len); 4801 if (error) 4802 ERROUT(error); 4803 4804 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 4805 if (nvl == NULL) 4806 ERROUT(EBADMSG); 4807 4808 if (! nvlist_exists_bool(nvl, "keep_counters")) 4809 ERROUT(EBADMSG); 4810 4811 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 4812 4813 on_error: 4814 nvlist_destroy(nvl); 4815 free(nvlpacked, M_TEMP); 4816 return (error); 4817 } 4818 4819 static unsigned int 4820 pf_clear_states(const struct pf_kstate_kill *kill) 4821 { 4822 struct pf_state_key_cmp match_key; 4823 struct pf_state *s; 4824 struct pfi_kkif *kif; 4825 int idx; 4826 unsigned int killed = 0, dir; 4827 4828 for (unsigned int i = 0; i <= pf_hashmask; i++) { 4829 struct pf_idhash *ih = &V_pf_idhash[i]; 4830 4831 relock_DIOCCLRSTATES: 4832 PF_HASHROW_LOCK(ih); 4833 LIST_FOREACH(s, &ih->states, entry) { 4834 /* For floating states look at the original kif. */ 4835 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 4836 4837 if (kill->psk_ifname[0] && 4838 strcmp(kill->psk_ifname, 4839 kif->pfik_name)) 4840 continue; 4841 4842 if (kill->psk_kill_match) { 4843 bzero(&match_key, sizeof(match_key)); 4844 4845 if (s->direction == PF_OUT) { 4846 dir = PF_IN; 4847 idx = PF_SK_STACK; 4848 } else { 4849 dir = PF_OUT; 4850 idx = PF_SK_WIRE; 4851 } 4852 4853 match_key.af = s->key[idx]->af; 4854 match_key.proto = s->key[idx]->proto; 4855 PF_ACPY(&match_key.addr[0], 4856 &s->key[idx]->addr[1], match_key.af); 4857 match_key.port[0] = s->key[idx]->port[1]; 4858 PF_ACPY(&match_key.addr[1], 4859 &s->key[idx]->addr[0], match_key.af); 4860 match_key.port[1] = s->key[idx]->port[0]; 4861 } 4862 4863 /* 4864 * Don't send out individual 4865 * delete messages. 4866 */ 4867 s->state_flags |= PFSTATE_NOSYNC; 4868 pf_unlink_state(s, PF_ENTER_LOCKED); 4869 killed++; 4870 4871 if (kill->psk_kill_match) 4872 killed += pf_kill_matching_state(&match_key, 4873 dir); 4874 4875 goto relock_DIOCCLRSTATES; 4876 } 4877 PF_HASHROW_UNLOCK(ih); 4878 } 4879 4880 if (V_pfsync_clear_states_ptr != NULL) 4881 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 4882 4883 return (killed); 4884 } 4885 4886 static int 4887 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 4888 { 4889 struct pf_state *s; 4890 4891 if (kill->psk_pfcmp.id) { 4892 if (kill->psk_pfcmp.creatorid == 0) 4893 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 4894 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 4895 kill->psk_pfcmp.creatorid))) { 4896 pf_unlink_state(s, PF_ENTER_LOCKED); 4897 *killed = 1; 4898 } 4899 return (0); 4900 } 4901 4902 for (unsigned int i = 0; i <= pf_hashmask; i++) 4903 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 4904 4905 return (0); 4906 } 4907 4908 static int 4909 pf_killstates_nv(struct pfioc_nv *nv) 4910 { 4911 struct pf_kstate_kill kill; 4912 nvlist_t *nvl = NULL; 4913 void *nvlpacked = NULL; 4914 int error = 0; 4915 unsigned int killed = 0; 4916 4917 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 4918 4919 if (nv->len > pf_ioctl_maxcount) 4920 ERROUT(ENOMEM); 4921 4922 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 4923 if (nvlpacked == NULL) 4924 ERROUT(ENOMEM); 4925 4926 error = copyin(nv->data, nvlpacked, nv->len); 4927 if (error) 4928 ERROUT(error); 4929 4930 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 4931 if (nvl == NULL) 4932 ERROUT(EBADMSG); 4933 4934 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 4935 if (error) 4936 ERROUT(error); 4937 4938 error = pf_killstates(&kill, &killed); 4939 4940 free(nvlpacked, M_TEMP); 4941 nvlpacked = NULL; 4942 nvlist_destroy(nvl); 4943 nvl = nvlist_create(0); 4944 if (nvl == NULL) 4945 ERROUT(ENOMEM); 4946 4947 nvlist_add_number(nvl, "killed", killed); 4948 4949 nvlpacked = nvlist_pack(nvl, &nv->len); 4950 if (nvlpacked == NULL) 4951 ERROUT(ENOMEM); 4952 4953 if (nv->size == 0) 4954 ERROUT(0); 4955 else if (nv->size < nv->len) 4956 ERROUT(ENOSPC); 4957 4958 error = copyout(nvlpacked, nv->data, nv->len); 4959 4960 on_error: 4961 nvlist_destroy(nvl); 4962 free(nvlpacked, M_TEMP); 4963 return (error); 4964 } 4965 4966 static int 4967 pf_clearstates_nv(struct pfioc_nv *nv) 4968 { 4969 struct pf_kstate_kill kill; 4970 nvlist_t *nvl = NULL; 4971 void *nvlpacked = NULL; 4972 int error = 0; 4973 unsigned int killed; 4974 4975 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 4976 4977 if (nv->len > pf_ioctl_maxcount) 4978 ERROUT(ENOMEM); 4979 4980 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 4981 if (nvlpacked == NULL) 4982 ERROUT(ENOMEM); 4983 4984 error = copyin(nv->data, nvlpacked, nv->len); 4985 if (error) 4986 ERROUT(error); 4987 4988 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 4989 if (nvl == NULL) 4990 ERROUT(EBADMSG); 4991 4992 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 4993 if (error) 4994 ERROUT(error); 4995 4996 killed = pf_clear_states(&kill); 4997 4998 free(nvlpacked, M_TEMP); 4999 nvlpacked = NULL; 5000 nvlist_destroy(nvl); 5001 nvl = nvlist_create(0); 5002 if (nvl == NULL) 5003 ERROUT(ENOMEM); 5004 5005 nvlist_add_number(nvl, "killed", killed); 5006 5007 nvlpacked = nvlist_pack(nvl, &nv->len); 5008 if (nvlpacked == NULL) 5009 ERROUT(ENOMEM); 5010 5011 if (nv->size == 0) 5012 ERROUT(0); 5013 else if (nv->size < nv->len) 5014 ERROUT(ENOSPC); 5015 5016 error = copyout(nvlpacked, nv->data, nv->len); 5017 5018 #undef ERROUT 5019 on_error: 5020 nvlist_destroy(nvl); 5021 free(nvlpacked, M_TEMP); 5022 return (error); 5023 } 5024 5025 static int 5026 pf_getstate(struct pfioc_nv *nv) 5027 { 5028 nvlist_t *nvl = NULL, *nvls; 5029 void *nvlpacked = NULL; 5030 struct pf_state *s = NULL; 5031 int error = 0; 5032 uint64_t id, creatorid; 5033 5034 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5035 5036 if (nv->len > pf_ioctl_maxcount) 5037 ERROUT(ENOMEM); 5038 5039 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 5040 if (nvlpacked == NULL) 5041 ERROUT(ENOMEM); 5042 5043 error = copyin(nv->data, nvlpacked, nv->len); 5044 if (error) 5045 ERROUT(error); 5046 5047 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5048 if (nvl == NULL) 5049 ERROUT(EBADMSG); 5050 5051 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 5052 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 5053 5054 s = pf_find_state_byid(id, creatorid); 5055 if (s == NULL) 5056 ERROUT(ENOENT); 5057 5058 free(nvlpacked, M_TEMP); 5059 nvlpacked = NULL; 5060 nvlist_destroy(nvl); 5061 nvl = nvlist_create(0); 5062 if (nvl == NULL) 5063 ERROUT(ENOMEM); 5064 5065 nvls = pf_state_to_nvstate(s); 5066 if (nvls == NULL) 5067 ERROUT(ENOMEM); 5068 5069 nvlist_add_nvlist(nvl, "state", nvls); 5070 nvlist_destroy(nvls); 5071 5072 nvlpacked = nvlist_pack(nvl, &nv->len); 5073 if (nvlpacked == NULL) 5074 ERROUT(ENOMEM); 5075 5076 if (nv->size == 0) 5077 ERROUT(0); 5078 else if (nv->size < nv->len) 5079 ERROUT(ENOSPC); 5080 5081 error = copyout(nvlpacked, nv->data, nv->len); 5082 5083 #undef ERROUT 5084 errout: 5085 if (s != NULL) 5086 PF_STATE_UNLOCK(s); 5087 free(nvlpacked, M_TEMP); 5088 nvlist_destroy(nvl); 5089 return (error); 5090 } 5091 5092 static int 5093 pf_getstates(struct pfioc_nv *nv) 5094 { 5095 nvlist_t *nvl = NULL, *nvls; 5096 void *nvlpacked = NULL; 5097 struct pf_state *s = NULL; 5098 int error = 0; 5099 uint64_t count = 0; 5100 5101 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5102 5103 nvl = nvlist_create(0); 5104 if (nvl == NULL) 5105 ERROUT(ENOMEM); 5106 5107 nvlist_add_number(nvl, "count", uma_zone_get_cur(V_pf_state_z)); 5108 5109 for (int i = 0; i < pf_hashmask; i++) { 5110 struct pf_idhash *ih = &V_pf_idhash[i]; 5111 5112 PF_HASHROW_LOCK(ih); 5113 LIST_FOREACH(s, &ih->states, entry) { 5114 if (s->timeout == PFTM_UNLINKED) 5115 continue; 5116 5117 nvls = pf_state_to_nvstate(s); 5118 if (nvls == NULL) { 5119 PF_HASHROW_UNLOCK(ih); 5120 ERROUT(ENOMEM); 5121 } 5122 if ((nvlist_size(nvl) + nvlist_size(nvls)) > nv->size) { 5123 /* We've run out of room for more states. */ 5124 nvlist_destroy(nvls); 5125 PF_HASHROW_UNLOCK(ih); 5126 goto DIOCGETSTATESNV_full; 5127 } 5128 nvlist_append_nvlist_array(nvl, "states", nvls); 5129 count++; 5130 } 5131 PF_HASHROW_UNLOCK(ih); 5132 } 5133 5134 /* We've managed to put them all the available space. Let's make sure 5135 * 'count' matches our array (that's racy, because we don't hold a lock 5136 * over all states, only over each row individually. */ 5137 (void)nvlist_take_number(nvl, "count"); 5138 nvlist_add_number(nvl, "count", count); 5139 5140 DIOCGETSTATESNV_full: 5141 5142 nvlpacked = nvlist_pack(nvl, &nv->len); 5143 if (nvlpacked == NULL) 5144 ERROUT(ENOMEM); 5145 5146 if (nv->size == 0) 5147 ERROUT(0); 5148 else if (nv->size < nv->len) 5149 ERROUT(ENOSPC); 5150 5151 error = copyout(nvlpacked, nv->data, nv->len); 5152 5153 #undef ERROUT 5154 errout: 5155 free(nvlpacked, M_TEMP); 5156 nvlist_destroy(nvl); 5157 return (error); 5158 } 5159 5160 /* 5161 * XXX - Check for version missmatch!!! 5162 */ 5163 5164 /* 5165 * Duplicate pfctl -Fa operation to get rid of as much as we can. 5166 */ 5167 static int 5168 shutdown_pf(void) 5169 { 5170 int error = 0; 5171 u_int32_t t[5]; 5172 char nn = '\0'; 5173 5174 do { 5175 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 5176 != 0) { 5177 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 5178 break; 5179 } 5180 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 5181 != 0) { 5182 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 5183 break; /* XXX: rollback? */ 5184 } 5185 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 5186 != 0) { 5187 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 5188 break; /* XXX: rollback? */ 5189 } 5190 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 5191 != 0) { 5192 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 5193 break; /* XXX: rollback? */ 5194 } 5195 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 5196 != 0) { 5197 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 5198 break; /* XXX: rollback? */ 5199 } 5200 5201 /* XXX: these should always succeed here */ 5202 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 5203 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 5204 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 5205 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 5206 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 5207 5208 if ((error = pf_clear_tables()) != 0) 5209 break; 5210 5211 #ifdef ALTQ 5212 if ((error = pf_begin_altq(&t[0])) != 0) { 5213 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 5214 break; 5215 } 5216 pf_commit_altq(t[0]); 5217 #endif 5218 5219 pf_clear_all_states(); 5220 5221 pf_clear_srcnodes(NULL); 5222 5223 /* status does not use malloced mem so no need to cleanup */ 5224 /* fingerprints and interfaces have their own cleanup code */ 5225 } while(0); 5226 5227 return (error); 5228 } 5229 5230 static pfil_return_t 5231 pf_check_return(int chk, struct mbuf **m) 5232 { 5233 5234 switch (chk) { 5235 case PF_PASS: 5236 if (*m == NULL) 5237 return (PFIL_CONSUMED); 5238 else 5239 return (PFIL_PASS); 5240 break; 5241 default: 5242 if (*m != NULL) { 5243 m_freem(*m); 5244 *m = NULL; 5245 } 5246 return (PFIL_DROPPED); 5247 } 5248 } 5249 5250 #ifdef INET 5251 static pfil_return_t 5252 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 5253 void *ruleset __unused, struct inpcb *inp) 5254 { 5255 int chk; 5256 5257 chk = pf_test(PF_IN, flags, ifp, m, inp); 5258 5259 return (pf_check_return(chk, m)); 5260 } 5261 5262 static pfil_return_t 5263 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 5264 void *ruleset __unused, struct inpcb *inp) 5265 { 5266 int chk; 5267 5268 chk = pf_test(PF_OUT, flags, ifp, m, inp); 5269 5270 return (pf_check_return(chk, m)); 5271 } 5272 #endif 5273 5274 #ifdef INET6 5275 static pfil_return_t 5276 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 5277 void *ruleset __unused, struct inpcb *inp) 5278 { 5279 int chk; 5280 5281 /* 5282 * In case of loopback traffic IPv6 uses the real interface in 5283 * order to support scoped addresses. In order to support stateful 5284 * filtering we have change this to lo0 as it is the case in IPv4. 5285 */ 5286 CURVNET_SET(ifp->if_vnet); 5287 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 5288 CURVNET_RESTORE(); 5289 5290 return (pf_check_return(chk, m)); 5291 } 5292 5293 static pfil_return_t 5294 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 5295 void *ruleset __unused, struct inpcb *inp) 5296 { 5297 int chk; 5298 5299 CURVNET_SET(ifp->if_vnet); 5300 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 5301 CURVNET_RESTORE(); 5302 5303 return (pf_check_return(chk, m)); 5304 } 5305 #endif /* INET6 */ 5306 5307 #ifdef INET 5308 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 5309 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 5310 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 5311 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 5312 #endif 5313 #ifdef INET6 5314 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 5315 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 5316 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 5317 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 5318 #endif 5319 5320 static void 5321 hook_pf(void) 5322 { 5323 struct pfil_hook_args pha; 5324 struct pfil_link_args pla; 5325 int ret; 5326 5327 if (V_pf_pfil_hooked) 5328 return; 5329 5330 pha.pa_version = PFIL_VERSION; 5331 pha.pa_modname = "pf"; 5332 pha.pa_ruleset = NULL; 5333 5334 pla.pa_version = PFIL_VERSION; 5335 5336 #ifdef INET 5337 pha.pa_type = PFIL_TYPE_IP4; 5338 pha.pa_func = pf_check_in; 5339 pha.pa_flags = PFIL_IN; 5340 pha.pa_rulname = "default-in"; 5341 V_pf_ip4_in_hook = pfil_add_hook(&pha); 5342 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 5343 pla.pa_head = V_inet_pfil_head; 5344 pla.pa_hook = V_pf_ip4_in_hook; 5345 ret = pfil_link(&pla); 5346 MPASS(ret == 0); 5347 pha.pa_func = pf_check_out; 5348 pha.pa_flags = PFIL_OUT; 5349 pha.pa_rulname = "default-out"; 5350 V_pf_ip4_out_hook = pfil_add_hook(&pha); 5351 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 5352 pla.pa_head = V_inet_pfil_head; 5353 pla.pa_hook = V_pf_ip4_out_hook; 5354 ret = pfil_link(&pla); 5355 MPASS(ret == 0); 5356 #endif 5357 #ifdef INET6 5358 pha.pa_type = PFIL_TYPE_IP6; 5359 pha.pa_func = pf_check6_in; 5360 pha.pa_flags = PFIL_IN; 5361 pha.pa_rulname = "default-in6"; 5362 V_pf_ip6_in_hook = pfil_add_hook(&pha); 5363 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 5364 pla.pa_head = V_inet6_pfil_head; 5365 pla.pa_hook = V_pf_ip6_in_hook; 5366 ret = pfil_link(&pla); 5367 MPASS(ret == 0); 5368 pha.pa_func = pf_check6_out; 5369 pha.pa_rulname = "default-out6"; 5370 pha.pa_flags = PFIL_OUT; 5371 V_pf_ip6_out_hook = pfil_add_hook(&pha); 5372 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 5373 pla.pa_head = V_inet6_pfil_head; 5374 pla.pa_hook = V_pf_ip6_out_hook; 5375 ret = pfil_link(&pla); 5376 MPASS(ret == 0); 5377 #endif 5378 5379 V_pf_pfil_hooked = 1; 5380 } 5381 5382 static void 5383 dehook_pf(void) 5384 { 5385 5386 if (V_pf_pfil_hooked == 0) 5387 return; 5388 5389 #ifdef INET 5390 pfil_remove_hook(V_pf_ip4_in_hook); 5391 pfil_remove_hook(V_pf_ip4_out_hook); 5392 #endif 5393 #ifdef INET6 5394 pfil_remove_hook(V_pf_ip6_in_hook); 5395 pfil_remove_hook(V_pf_ip6_out_hook); 5396 #endif 5397 5398 V_pf_pfil_hooked = 0; 5399 } 5400 5401 static void 5402 pf_load_vnet(void) 5403 { 5404 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 5405 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 5406 5407 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 5408 PF_RULE_TAG_HASH_SIZE_DEFAULT); 5409 #ifdef ALTQ 5410 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 5411 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 5412 #endif 5413 5414 pfattach_vnet(); 5415 V_pf_vnet_active = 1; 5416 } 5417 5418 static int 5419 pf_load(void) 5420 { 5421 int error; 5422 5423 rm_init(&pf_rules_lock, "pf rulesets"); 5424 sx_init(&pf_ioctl_lock, "pf ioctl"); 5425 sx_init(&pf_end_lock, "pf end thread"); 5426 5427 pf_mtag_initialize(); 5428 5429 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 5430 if (pf_dev == NULL) 5431 return (ENOMEM); 5432 5433 pf_end_threads = 0; 5434 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 5435 if (error != 0) 5436 return (error); 5437 5438 pfi_initialize(); 5439 5440 return (0); 5441 } 5442 5443 static void 5444 pf_unload_vnet(void) 5445 { 5446 int ret; 5447 5448 V_pf_vnet_active = 0; 5449 V_pf_status.running = 0; 5450 dehook_pf(); 5451 5452 PF_RULES_WLOCK(); 5453 shutdown_pf(); 5454 PF_RULES_WUNLOCK(); 5455 5456 ret = swi_remove(V_pf_swi_cookie); 5457 MPASS(ret == 0); 5458 ret = intr_event_destroy(V_pf_swi_ie); 5459 MPASS(ret == 0); 5460 5461 pf_unload_vnet_purge(); 5462 5463 pf_normalize_cleanup(); 5464 PF_RULES_WLOCK(); 5465 pfi_cleanup_vnet(); 5466 PF_RULES_WUNLOCK(); 5467 pfr_cleanup(); 5468 pf_osfp_flush(); 5469 pf_cleanup(); 5470 if (IS_DEFAULT_VNET(curvnet)) 5471 pf_mtag_cleanup(); 5472 5473 pf_cleanup_tagset(&V_pf_tags); 5474 #ifdef ALTQ 5475 pf_cleanup_tagset(&V_pf_qids); 5476 #endif 5477 uma_zdestroy(V_pf_tag_z); 5478 5479 /* Free counters last as we updated them during shutdown. */ 5480 counter_u64_free(V_pf_default_rule.evaluations); 5481 for (int i = 0; i < 2; i++) { 5482 counter_u64_free(V_pf_default_rule.packets[i]); 5483 counter_u64_free(V_pf_default_rule.bytes[i]); 5484 } 5485 counter_u64_free(V_pf_default_rule.states_cur); 5486 counter_u64_free(V_pf_default_rule.states_tot); 5487 counter_u64_free(V_pf_default_rule.src_nodes); 5488 5489 for (int i = 0; i < PFRES_MAX; i++) 5490 counter_u64_free(V_pf_status.counters[i]); 5491 for (int i = 0; i < LCNT_MAX; i++) 5492 counter_u64_free(V_pf_status.lcounters[i]); 5493 for (int i = 0; i < FCNT_MAX; i++) 5494 counter_u64_free(V_pf_status.fcounters[i]); 5495 for (int i = 0; i < SCNT_MAX; i++) 5496 counter_u64_free(V_pf_status.scounters[i]); 5497 } 5498 5499 static void 5500 pf_unload(void) 5501 { 5502 5503 sx_xlock(&pf_end_lock); 5504 pf_end_threads = 1; 5505 while (pf_end_threads < 2) { 5506 wakeup_one(pf_purge_thread); 5507 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 5508 } 5509 sx_xunlock(&pf_end_lock); 5510 5511 if (pf_dev != NULL) 5512 destroy_dev(pf_dev); 5513 5514 pfi_cleanup(); 5515 5516 rm_destroy(&pf_rules_lock); 5517 sx_destroy(&pf_ioctl_lock); 5518 sx_destroy(&pf_end_lock); 5519 } 5520 5521 static void 5522 vnet_pf_init(void *unused __unused) 5523 { 5524 5525 pf_load_vnet(); 5526 } 5527 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 5528 vnet_pf_init, NULL); 5529 5530 static void 5531 vnet_pf_uninit(const void *unused __unused) 5532 { 5533 5534 pf_unload_vnet(); 5535 } 5536 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 5537 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 5538 vnet_pf_uninit, NULL); 5539 5540 static int 5541 pf_modevent(module_t mod, int type, void *data) 5542 { 5543 int error = 0; 5544 5545 switch(type) { 5546 case MOD_LOAD: 5547 error = pf_load(); 5548 break; 5549 case MOD_UNLOAD: 5550 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 5551 * the vnet_pf_uninit()s */ 5552 break; 5553 default: 5554 error = EINVAL; 5555 break; 5556 } 5557 5558 return (error); 5559 } 5560 5561 static moduledata_t pf_mod = { 5562 "pf", 5563 pf_modevent, 5564 0 5565 }; 5566 5567 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 5568 MODULE_VERSION(pf, PF_MODVER); 5569