1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2003 Cedric Berger 6 * Copyright (c) 2005 Henning Brauer <henning@openbsd.org> 7 * Copyright (c) 2005 Ryan McBride <mcbride@openbsd.org> 8 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * - Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * - Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $OpenBSD: pf_if.c,v 1.54 2008/06/14 16:55:28 mk Exp $ 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_inet.h" 42 #include "opt_inet6.h" 43 44 #include <sys/param.h> 45 #include <sys/kernel.h> 46 #include <sys/eventhandler.h> 47 #include <sys/lock.h> 48 #include <sys/mbuf.h> 49 #include <sys/socket.h> 50 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/vnet.h> 54 #include <net/pfvar.h> 55 #include <net/route.h> 56 57 VNET_DEFINE(struct pfi_kkif *, pfi_all); 58 VNET_DEFINE_STATIC(long, pfi_update); 59 #define V_pfi_update VNET(pfi_update) 60 #define PFI_BUFFER_MAX 0x10000 61 62 VNET_DECLARE(int, pf_vnet_active); 63 #define V_pf_vnet_active VNET(pf_vnet_active) 64 65 VNET_DEFINE_STATIC(struct pfr_addr *, pfi_buffer); 66 VNET_DEFINE_STATIC(int, pfi_buffer_cnt); 67 VNET_DEFINE_STATIC(int, pfi_buffer_max); 68 #define V_pfi_buffer VNET(pfi_buffer) 69 #define V_pfi_buffer_cnt VNET(pfi_buffer_cnt) 70 #define V_pfi_buffer_max VNET(pfi_buffer_max) 71 72 #ifdef PF_WANT_32_TO_64_COUNTER 73 VNET_DEFINE(struct allkiflist_head, pf_allkiflist); 74 VNET_DEFINE(size_t, pf_allkifcount); 75 VNET_DEFINE(struct pfi_kkif *, pf_kifmarker); 76 #endif 77 78 eventhandler_tag pfi_attach_cookie; 79 eventhandler_tag pfi_detach_cookie; 80 eventhandler_tag pfi_attach_group_cookie; 81 eventhandler_tag pfi_change_group_cookie; 82 eventhandler_tag pfi_detach_group_cookie; 83 eventhandler_tag pfi_ifaddr_event_cookie; 84 85 static void pfi_attach_ifnet(struct ifnet *, struct pfi_kkif *); 86 static void pfi_attach_ifgroup(struct ifg_group *, struct pfi_kkif *); 87 88 static void pfi_kkif_update(struct pfi_kkif *); 89 static void pfi_dynaddr_update(struct pfi_dynaddr *dyn); 90 static void pfi_table_update(struct pfr_ktable *, struct pfi_kkif *, int, 91 int); 92 static void pfi_instance_add(struct ifnet *, int, int); 93 static void pfi_address_add(struct sockaddr *, int, int); 94 static int pfi_kkif_compare(struct pfi_kkif *, struct pfi_kkif *); 95 static int pfi_skip_if(const char *, struct pfi_kkif *); 96 static int pfi_unmask(void *); 97 static void pfi_attach_ifnet_event(void * __unused, struct ifnet *); 98 static void pfi_detach_ifnet_event(void * __unused, struct ifnet *); 99 static void pfi_attach_group_event(void * __unused, struct ifg_group *); 100 static void pfi_change_group_event(void * __unused, char *); 101 static void pfi_detach_group_event(void * __unused, struct ifg_group *); 102 static void pfi_ifaddr_event(void * __unused, struct ifnet *); 103 104 RB_HEAD(pfi_ifhead, pfi_kkif); 105 static RB_PROTOTYPE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare); 106 static RB_GENERATE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare); 107 VNET_DEFINE_STATIC(struct pfi_ifhead, pfi_ifs); 108 #define V_pfi_ifs VNET(pfi_ifs) 109 110 #define PFI_BUFFER_MAX 0x10000 111 MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interface database"); 112 113 LIST_HEAD(pfi_list, pfi_kkif); 114 VNET_DEFINE_STATIC(struct pfi_list, pfi_unlinked_kifs); 115 #define V_pfi_unlinked_kifs VNET(pfi_unlinked_kifs) 116 static struct mtx pfi_unlnkdkifs_mtx; 117 MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces", 118 MTX_DEF); 119 120 void 121 pfi_initialize_vnet(void) 122 { 123 struct pfi_list kifs = LIST_HEAD_INITIALIZER(); 124 struct epoch_tracker et; 125 struct pfi_kkif *kif; 126 struct ifg_group *ifg; 127 struct ifnet *ifp; 128 int nkifs; 129 130 V_pfi_buffer_max = 64; 131 V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer), 132 PFI_MTYPE, M_WAITOK); 133 134 nkifs = 1; /* one for V_pfi_all */ 135 IFNET_RLOCK(); 136 CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 137 nkifs++; 138 CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) 139 nkifs++; 140 141 for (int n = 0; n < nkifs; n++) { 142 kif = pf_kkif_create(M_WAITOK); 143 LIST_INSERT_HEAD(&kifs, kif, pfik_list); 144 } 145 146 NET_EPOCH_ENTER(et); 147 PF_RULES_WLOCK(); 148 kif = LIST_FIRST(&kifs); 149 LIST_REMOVE(kif, pfik_list); 150 V_pfi_all = pfi_kkif_attach(kif, IFG_ALL); 151 CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) { 152 kif = LIST_FIRST(&kifs); 153 LIST_REMOVE(kif, pfik_list); 154 pfi_attach_ifgroup(ifg, kif); 155 } 156 CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { 157 kif = LIST_FIRST(&kifs); 158 LIST_REMOVE(kif, pfik_list); 159 pfi_attach_ifnet(ifp, kif); 160 } 161 PF_RULES_WUNLOCK(); 162 NET_EPOCH_EXIT(et); 163 IFNET_RUNLOCK(); 164 165 MPASS(LIST_EMPTY(&kifs)); 166 } 167 168 void 169 pfi_initialize(void) 170 { 171 172 pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event, 173 pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); 174 pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event, 175 pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); 176 pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event, 177 pfi_attach_group_event, NULL, EVENTHANDLER_PRI_ANY); 178 pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event, 179 pfi_change_group_event, NULL, EVENTHANDLER_PRI_ANY); 180 pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event, 181 pfi_detach_group_event, NULL, EVENTHANDLER_PRI_ANY); 182 pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event, 183 pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); 184 } 185 186 void 187 pfi_cleanup_vnet(void) 188 { 189 struct pfi_kkif *kif; 190 191 PF_RULES_WASSERT(); 192 193 V_pfi_all = NULL; 194 while ((kif = RB_MIN(pfi_ifhead, &V_pfi_ifs))) { 195 RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif); 196 if (kif->pfik_group) 197 kif->pfik_group->ifg_pf_kif = NULL; 198 if (kif->pfik_ifp) { 199 if_rele(kif->pfik_ifp); 200 kif->pfik_ifp->if_pf_kif = NULL; 201 } 202 pf_kkif_free(kif); 203 } 204 205 mtx_lock(&pfi_unlnkdkifs_mtx); 206 while ((kif = LIST_FIRST(&V_pfi_unlinked_kifs))) { 207 LIST_REMOVE(kif, pfik_list); 208 pf_kkif_free(kif); 209 } 210 mtx_unlock(&pfi_unlnkdkifs_mtx); 211 212 free(V_pfi_buffer, PFI_MTYPE); 213 } 214 215 void 216 pfi_cleanup(void) 217 { 218 219 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, pfi_attach_cookie); 220 EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfi_detach_cookie); 221 EVENTHANDLER_DEREGISTER(group_attach_event, pfi_attach_group_cookie); 222 EVENTHANDLER_DEREGISTER(group_change_event, pfi_change_group_cookie); 223 EVENTHANDLER_DEREGISTER(group_detach_event, pfi_detach_group_cookie); 224 EVENTHANDLER_DEREGISTER(ifaddr_event, pfi_ifaddr_event_cookie); 225 } 226 227 struct pfi_kkif* 228 pf_kkif_create(int flags) 229 { 230 struct pfi_kkif *kif; 231 #ifdef PF_WANT_32_TO_64_COUNTER 232 bool wowned; 233 #endif 234 235 kif = malloc(sizeof(*kif), PFI_MTYPE, flags | M_ZERO); 236 if (! kif) 237 return (kif); 238 239 for (int i = 0; i < 2; i++) { 240 for (int j = 0; j < 2; j++) { 241 for (int k = 0; k < 2; k++) { 242 if (pf_counter_u64_init(&kif->pfik_packets[i][j][k], flags) != 0) { 243 pf_kkif_free(kif); 244 return (NULL); 245 } 246 247 if (pf_counter_u64_init(&kif->pfik_bytes[i][j][k], flags) != 0) { 248 pf_kkif_free(kif); 249 return (NULL); 250 } 251 } 252 } 253 } 254 255 #ifdef PF_WANT_32_TO_64_COUNTER 256 wowned = PF_RULES_WOWNED(); 257 if (!wowned) 258 PF_RULES_WLOCK(); 259 LIST_INSERT_HEAD(&V_pf_allkiflist, kif, pfik_allkiflist); 260 V_pf_allkifcount++; 261 if (!wowned) 262 PF_RULES_WUNLOCK(); 263 #endif 264 265 return (kif); 266 } 267 268 void 269 pf_kkif_free(struct pfi_kkif *kif) 270 { 271 #ifdef PF_WANT_32_TO_64_COUNTER 272 bool wowned; 273 #endif 274 275 if (! kif) 276 return; 277 278 #ifdef PF_WANT_32_TO_64_COUNTER 279 wowned = PF_RULES_WOWNED(); 280 if (!wowned) 281 PF_RULES_WLOCK(); 282 LIST_REMOVE(kif, pfik_allkiflist); 283 V_pf_allkifcount--; 284 if (!wowned) 285 PF_RULES_WUNLOCK(); 286 #endif 287 288 for (int i = 0; i < 2; i++) { 289 for (int j = 0; j < 2; j++) { 290 for (int k = 0; k < 2; k++) { 291 pf_counter_u64_deinit(&kif->pfik_packets[i][j][k]); 292 pf_counter_u64_deinit(&kif->pfik_bytes[i][j][k]); 293 } 294 } 295 } 296 297 free(kif, PFI_MTYPE); 298 } 299 300 void 301 pf_kkif_zero(struct pfi_kkif *kif) 302 { 303 304 for (int i = 0; i < 2; i++) { 305 for (int j = 0; j < 2; j++) { 306 for (int k = 0; k < 2; k++) { 307 pf_counter_u64_zero(&kif->pfik_packets[i][j][k]); 308 pf_counter_u64_zero(&kif->pfik_bytes[i][j][k]); 309 } 310 } 311 } 312 kif->pfik_tzero = time_second; 313 } 314 315 struct pfi_kkif * 316 pfi_kkif_find(const char *kif_name) 317 { 318 struct pfi_kif_cmp s; 319 320 PF_RULES_ASSERT(); 321 322 memset(&s, 0, sizeof(s)); 323 strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name)); 324 325 return (RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&s)); 326 } 327 328 struct pfi_kkif * 329 pfi_kkif_attach(struct pfi_kkif *kif, const char *kif_name) 330 { 331 struct pfi_kkif *kif1; 332 333 PF_RULES_WASSERT(); 334 KASSERT(kif != NULL, ("%s: null kif", __func__)); 335 336 kif1 = pfi_kkif_find(kif_name); 337 if (kif1 != NULL) { 338 pf_kkif_free(kif); 339 return (kif1); 340 } 341 342 pf_kkif_zero(kif); 343 strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name)); 344 /* 345 * It seems that the value of time_second is in unintialzied state 346 * when pf sets interface statistics clear time in boot phase if pf 347 * was statically linked to kernel. Instead of setting the bogus 348 * time value have pfi_get_ifaces handle this case. In 349 * pfi_get_ifaces it uses time_second if it sees the time is 0. 350 */ 351 kif->pfik_tzero = time_second > 1 ? time_second : 0; 352 TAILQ_INIT(&kif->pfik_dynaddrs); 353 354 RB_INSERT(pfi_ifhead, &V_pfi_ifs, kif); 355 356 return (kif); 357 } 358 359 void 360 pfi_kkif_ref(struct pfi_kkif *kif) 361 { 362 363 PF_RULES_WASSERT(); 364 kif->pfik_rulerefs++; 365 } 366 367 static void 368 pfi_kkif_remove_if_unref(struct pfi_kkif *kif) 369 { 370 371 PF_RULES_WASSERT(); 372 373 if (kif->pfik_rulerefs > 0) 374 return; 375 376 /* kif referencing an existing ifnet or group or holding flags should 377 * exist. */ 378 if (kif->pfik_ifp != NULL || kif->pfik_group != NULL || 379 kif == V_pfi_all || kif->pfik_flags != 0) 380 return; 381 382 RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif); 383 384 kif->pfik_flags |= PFI_IFLAG_REFS; 385 386 mtx_lock(&pfi_unlnkdkifs_mtx); 387 LIST_INSERT_HEAD(&V_pfi_unlinked_kifs, kif, pfik_list); 388 mtx_unlock(&pfi_unlnkdkifs_mtx); 389 } 390 391 void 392 pfi_kkif_unref(struct pfi_kkif *kif) 393 { 394 395 PF_RULES_WASSERT(); 396 KASSERT(kif->pfik_rulerefs > 0, ("%s: %p has zero refs", __func__, kif)); 397 398 kif->pfik_rulerefs--; 399 400 pfi_kkif_remove_if_unref(kif); 401 } 402 403 void 404 pfi_kkif_purge(void) 405 { 406 struct pfi_kkif *kif, *kif1; 407 408 /* 409 * Do naive mark-and-sweep garbage collecting of old kifs. 410 * Reference flag is raised by pf_purge_expired_states(). 411 */ 412 mtx_lock(&pfi_unlnkdkifs_mtx); 413 LIST_FOREACH_SAFE(kif, &V_pfi_unlinked_kifs, pfik_list, kif1) { 414 if (!(kif->pfik_flags & PFI_IFLAG_REFS)) { 415 LIST_REMOVE(kif, pfik_list); 416 pf_kkif_free(kif); 417 } else 418 kif->pfik_flags &= ~PFI_IFLAG_REFS; 419 } 420 mtx_unlock(&pfi_unlnkdkifs_mtx); 421 } 422 423 int 424 pfi_kkif_match(struct pfi_kkif *rule_kif, struct pfi_kkif *packet_kif) 425 { 426 struct ifg_list *p; 427 428 NET_EPOCH_ASSERT(); 429 430 if (rule_kif == NULL || rule_kif == packet_kif) 431 return (1); 432 433 if (rule_kif->pfik_group != NULL) { 434 CK_STAILQ_FOREACH(p, &packet_kif->pfik_ifp->if_groups, ifgl_next) 435 if (p->ifgl_group == rule_kif->pfik_group) 436 return (1); 437 } 438 439 return (0); 440 } 441 442 static void 443 pfi_attach_ifnet(struct ifnet *ifp, struct pfi_kkif *kif) 444 { 445 446 PF_RULES_WASSERT(); 447 448 V_pfi_update++; 449 kif = pfi_kkif_attach(kif, ifp->if_xname); 450 if_ref(ifp); 451 kif->pfik_ifp = ifp; 452 ifp->if_pf_kif = kif; 453 pfi_kkif_update(kif); 454 } 455 456 static void 457 pfi_attach_ifgroup(struct ifg_group *ifg, struct pfi_kkif *kif) 458 { 459 460 PF_RULES_WASSERT(); 461 462 V_pfi_update++; 463 kif = pfi_kkif_attach(kif, ifg->ifg_group); 464 kif->pfik_group = ifg; 465 ifg->ifg_pf_kif = kif; 466 } 467 468 int 469 pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af) 470 { 471 switch (af) { 472 #ifdef INET 473 case AF_INET: 474 switch (dyn->pfid_acnt4) { 475 case 0: 476 return (0); 477 case 1: 478 return (PF_MATCHA(0, &dyn->pfid_addr4, 479 &dyn->pfid_mask4, a, AF_INET)); 480 default: 481 return (pfr_match_addr(dyn->pfid_kt, a, AF_INET)); 482 } 483 break; 484 #endif /* INET */ 485 #ifdef INET6 486 case AF_INET6: 487 switch (dyn->pfid_acnt6) { 488 case 0: 489 return (0); 490 case 1: 491 return (PF_MATCHA(0, &dyn->pfid_addr6, 492 &dyn->pfid_mask6, a, AF_INET6)); 493 default: 494 return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6)); 495 } 496 break; 497 #endif /* INET6 */ 498 default: 499 return (0); 500 } 501 } 502 503 int 504 pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af) 505 { 506 struct epoch_tracker et; 507 struct pfi_dynaddr *dyn; 508 char tblname[PF_TABLE_NAME_SIZE]; 509 struct pf_kruleset *ruleset = NULL; 510 struct pfi_kkif *kif; 511 int rv = 0; 512 513 PF_RULES_WASSERT(); 514 KASSERT(aw->type == PF_ADDR_DYNIFTL, ("%s: type %u", 515 __func__, aw->type)); 516 KASSERT(aw->p.dyn == NULL, ("%s: dyn is %p", __func__, aw->p.dyn)); 517 518 if ((dyn = malloc(sizeof(*dyn), PFI_MTYPE, M_NOWAIT | M_ZERO)) == NULL) 519 return (ENOMEM); 520 521 if ((kif = pf_kkif_create(M_NOWAIT)) == NULL) { 522 free(dyn, PFI_MTYPE); 523 return (ENOMEM); 524 } 525 526 if (!strcmp(aw->v.ifname, "self")) 527 dyn->pfid_kif = pfi_kkif_attach(kif, IFG_ALL); 528 else 529 dyn->pfid_kif = pfi_kkif_attach(kif, aw->v.ifname); 530 kif = NULL; 531 pfi_kkif_ref(dyn->pfid_kif); 532 533 dyn->pfid_net = pfi_unmask(&aw->v.a.mask); 534 if (af == AF_INET && dyn->pfid_net == 32) 535 dyn->pfid_net = 128; 536 strlcpy(tblname, aw->v.ifname, sizeof(tblname)); 537 if (aw->iflags & PFI_AFLAG_NETWORK) 538 strlcat(tblname, ":network", sizeof(tblname)); 539 if (aw->iflags & PFI_AFLAG_BROADCAST) 540 strlcat(tblname, ":broadcast", sizeof(tblname)); 541 if (aw->iflags & PFI_AFLAG_PEER) 542 strlcat(tblname, ":peer", sizeof(tblname)); 543 if (aw->iflags & PFI_AFLAG_NOALIAS) 544 strlcat(tblname, ":0", sizeof(tblname)); 545 if (dyn->pfid_net != 128) 546 snprintf(tblname + strlen(tblname), 547 sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net); 548 if ((ruleset = pf_find_or_create_kruleset(PF_RESERVED_ANCHOR)) == NULL) { 549 rv = ENOMEM; 550 goto _bad; 551 } 552 553 if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) { 554 rv = ENOMEM; 555 goto _bad; 556 } 557 558 dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE; 559 dyn->pfid_iflags = aw->iflags; 560 dyn->pfid_af = af; 561 562 TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry); 563 aw->p.dyn = dyn; 564 NET_EPOCH_ENTER(et); 565 pfi_kkif_update(dyn->pfid_kif); 566 NET_EPOCH_EXIT(et); 567 568 return (0); 569 570 _bad: 571 if (dyn->pfid_kt != NULL) 572 pfr_detach_table(dyn->pfid_kt); 573 if (ruleset != NULL) 574 pf_remove_if_empty_kruleset(ruleset); 575 pfi_kkif_unref(dyn->pfid_kif); 576 free(dyn, PFI_MTYPE); 577 578 return (rv); 579 } 580 581 static void 582 pfi_kkif_update(struct pfi_kkif *kif) 583 { 584 struct ifg_list *ifgl; 585 struct ifg_member *ifgm; 586 struct pfi_dynaddr *p; 587 struct pfi_kkif *tmpkif; 588 589 NET_EPOCH_ASSERT(); 590 PF_RULES_WASSERT(); 591 592 /* update all dynaddr */ 593 TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry) 594 pfi_dynaddr_update(p); 595 596 /* Apply group flags to new members. */ 597 if (kif->pfik_group != NULL) { 598 CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, 599 ifgm_next) { 600 tmpkif = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif; 601 if (tmpkif == NULL) 602 continue; 603 604 tmpkif->pfik_flags |= kif->pfik_flags; 605 } 606 } 607 608 /* again for all groups kif is member of */ 609 if (kif->pfik_ifp != NULL) { 610 CK_STAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next) 611 pfi_kkif_update((struct pfi_kkif *) 612 ifgl->ifgl_group->ifg_pf_kif); 613 } 614 } 615 616 static void 617 pfi_dynaddr_update(struct pfi_dynaddr *dyn) 618 { 619 struct pfi_kkif *kif; 620 struct pfr_ktable *kt; 621 622 PF_RULES_WASSERT(); 623 KASSERT(dyn && dyn->pfid_kif && dyn->pfid_kt, 624 ("%s: bad argument", __func__)); 625 626 kif = dyn->pfid_kif; 627 kt = dyn->pfid_kt; 628 629 if (kt->pfrkt_larg != V_pfi_update) { 630 /* this table needs to be brought up-to-date */ 631 pfi_table_update(kt, kif, dyn->pfid_net, dyn->pfid_iflags); 632 kt->pfrkt_larg = V_pfi_update; 633 } 634 pfr_dynaddr_update(kt, dyn); 635 } 636 637 static void 638 pfi_table_update(struct pfr_ktable *kt, struct pfi_kkif *kif, int net, int flags) 639 { 640 int e, size2 = 0; 641 struct ifg_member *ifgm; 642 643 NET_EPOCH_ASSERT(); 644 645 V_pfi_buffer_cnt = 0; 646 647 if (kif->pfik_ifp != NULL) 648 pfi_instance_add(kif->pfik_ifp, net, flags); 649 else if (kif->pfik_group != NULL) { 650 CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, ifgm_next) 651 pfi_instance_add(ifgm->ifgm_ifp, net, flags); 652 } 653 654 if ((e = pfr_set_addrs(&kt->pfrkt_t, V_pfi_buffer, V_pfi_buffer_cnt, &size2, 655 NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK))) 656 printf("%s: cannot set %d new addresses into table %s: %d\n", 657 __func__, V_pfi_buffer_cnt, kt->pfrkt_name, e); 658 } 659 660 static void 661 pfi_instance_add(struct ifnet *ifp, int net, int flags) 662 { 663 struct ifaddr *ia; 664 int got4 = 0, got6 = 0; 665 int net2, af; 666 667 NET_EPOCH_ASSERT(); 668 669 CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) { 670 if (ia->ifa_addr == NULL) 671 continue; 672 af = ia->ifa_addr->sa_family; 673 if (af != AF_INET && af != AF_INET6) 674 continue; 675 /* 676 * XXX: For point-to-point interfaces, (ifname:0) and IPv4, 677 * jump over addresses without a proper route to work 678 * around a problem with ppp not fully removing the 679 * address used during IPCP. 680 */ 681 if ((ifp->if_flags & IFF_POINTOPOINT) && 682 !(ia->ifa_flags & IFA_ROUTE) && 683 (flags & PFI_AFLAG_NOALIAS) && (af == AF_INET)) 684 continue; 685 if ((flags & PFI_AFLAG_BROADCAST) && af == AF_INET6) 686 continue; 687 if ((flags & PFI_AFLAG_BROADCAST) && 688 !(ifp->if_flags & IFF_BROADCAST)) 689 continue; 690 if ((flags & PFI_AFLAG_PEER) && 691 !(ifp->if_flags & IFF_POINTOPOINT)) 692 continue; 693 if ((flags & (PFI_AFLAG_NETWORK | PFI_AFLAG_NOALIAS)) && 694 af == AF_INET6 && 695 IN6_IS_ADDR_LINKLOCAL( 696 &((struct sockaddr_in6 *)ia->ifa_addr)->sin6_addr)) 697 continue; 698 if (flags & PFI_AFLAG_NOALIAS) { 699 if (af == AF_INET && got4) 700 continue; 701 if (af == AF_INET6 && got6) 702 continue; 703 } 704 if (af == AF_INET) 705 got4 = 1; 706 else if (af == AF_INET6) 707 got6 = 1; 708 net2 = net; 709 if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) { 710 if (af == AF_INET) 711 net2 = pfi_unmask(&((struct sockaddr_in *) 712 ia->ifa_netmask)->sin_addr); 713 else if (af == AF_INET6) 714 net2 = pfi_unmask(&((struct sockaddr_in6 *) 715 ia->ifa_netmask)->sin6_addr); 716 } 717 if (af == AF_INET && net2 > 32) 718 net2 = 32; 719 if (flags & PFI_AFLAG_BROADCAST) 720 pfi_address_add(ia->ifa_broadaddr, af, net2); 721 else if (flags & PFI_AFLAG_PEER) 722 pfi_address_add(ia->ifa_dstaddr, af, net2); 723 else 724 pfi_address_add(ia->ifa_addr, af, net2); 725 } 726 } 727 728 static void 729 pfi_address_add(struct sockaddr *sa, int af, int net) 730 { 731 struct pfr_addr *p; 732 int i; 733 734 if (V_pfi_buffer_cnt >= V_pfi_buffer_max) { 735 int new_max = V_pfi_buffer_max * 2; 736 737 if (new_max > PFI_BUFFER_MAX) { 738 printf("%s: address buffer full (%d/%d)\n", __func__, 739 V_pfi_buffer_cnt, PFI_BUFFER_MAX); 740 return; 741 } 742 p = malloc(new_max * sizeof(*V_pfi_buffer), PFI_MTYPE, 743 M_NOWAIT); 744 if (p == NULL) { 745 printf("%s: no memory to grow buffer (%d/%d)\n", 746 __func__, V_pfi_buffer_cnt, PFI_BUFFER_MAX); 747 return; 748 } 749 memcpy(p, V_pfi_buffer, V_pfi_buffer_max * sizeof(*V_pfi_buffer)); 750 /* no need to zero buffer */ 751 free(V_pfi_buffer, PFI_MTYPE); 752 V_pfi_buffer = p; 753 V_pfi_buffer_max = new_max; 754 } 755 if (af == AF_INET && net > 32) 756 net = 128; 757 p = V_pfi_buffer + V_pfi_buffer_cnt++; 758 memset(p, 0, sizeof(*p)); 759 p->pfra_af = af; 760 p->pfra_net = net; 761 if (af == AF_INET) 762 p->pfra_ip4addr = ((struct sockaddr_in *)sa)->sin_addr; 763 else if (af == AF_INET6) { 764 p->pfra_ip6addr = ((struct sockaddr_in6 *)sa)->sin6_addr; 765 if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr)) 766 p->pfra_ip6addr.s6_addr16[1] = 0; 767 } 768 /* mask network address bits */ 769 if (net < 128) 770 ((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8)); 771 for (i = (p->pfra_net+7)/8; i < sizeof(p->pfra_u); i++) 772 ((caddr_t)p)[i] = 0; 773 } 774 775 void 776 pfi_dynaddr_remove(struct pfi_dynaddr *dyn) 777 { 778 779 KASSERT(dyn->pfid_kif != NULL, ("%s: null pfid_kif", __func__)); 780 KASSERT(dyn->pfid_kt != NULL, ("%s: null pfid_kt", __func__)); 781 782 TAILQ_REMOVE(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry); 783 pfi_kkif_unref(dyn->pfid_kif); 784 pfr_detach_table(dyn->pfid_kt); 785 free(dyn, PFI_MTYPE); 786 } 787 788 void 789 pfi_dynaddr_copyout(struct pf_addr_wrap *aw) 790 { 791 792 KASSERT(aw->type == PF_ADDR_DYNIFTL, 793 ("%s: type %u", __func__, aw->type)); 794 795 if (aw->p.dyn == NULL || aw->p.dyn->pfid_kif == NULL) 796 return; 797 aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6; 798 } 799 800 static int 801 pfi_kkif_compare(struct pfi_kkif *p, struct pfi_kkif *q) 802 { 803 return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ)); 804 } 805 806 void 807 pfi_update_status(const char *name, struct pf_status *pfs) 808 { 809 struct pfi_kkif *p; 810 struct pfi_kif_cmp key; 811 struct ifg_member p_member, *ifgm; 812 CK_STAILQ_HEAD(, ifg_member) ifg_members; 813 int i, j, k; 814 815 if (pfs) { 816 memset(pfs->pcounters, 0, sizeof(pfs->pcounters)); 817 memset(pfs->bcounters, 0, sizeof(pfs->bcounters)); 818 } 819 820 strlcpy(key.pfik_name, name, sizeof(key.pfik_name)); 821 p = RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&key); 822 if (p == NULL) { 823 return; 824 } 825 826 if (p->pfik_group != NULL) { 827 memcpy(&ifg_members, &p->pfik_group->ifg_members, 828 sizeof(ifg_members)); 829 } else { 830 /* build a temporary list for p only */ 831 memset(&p_member, 0, sizeof(p_member)); 832 p_member.ifgm_ifp = p->pfik_ifp; 833 CK_STAILQ_INIT(&ifg_members); 834 CK_STAILQ_INSERT_TAIL(&ifg_members, &p_member, ifgm_next); 835 } 836 CK_STAILQ_FOREACH(ifgm, &ifg_members, ifgm_next) { 837 if (ifgm->ifgm_ifp == NULL || ifgm->ifgm_ifp->if_pf_kif == NULL) 838 continue; 839 p = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif; 840 841 /* just clear statistics */ 842 if (pfs == NULL) { 843 pf_kkif_zero(p); 844 continue; 845 } 846 for (i = 0; i < 2; i++) 847 for (j = 0; j < 2; j++) 848 for (k = 0; k < 2; k++) { 849 pfs->pcounters[i][j][k] += 850 pf_counter_u64_fetch(&p->pfik_packets[i][j][k]); 851 pfs->bcounters[i][j] += 852 pf_counter_u64_fetch(&p->pfik_bytes[i][j][k]); 853 } 854 } 855 } 856 857 static void 858 pf_kkif_to_kif(struct pfi_kkif *kkif, struct pfi_kif *kif) 859 { 860 861 memset(kif, 0, sizeof(*kif)); 862 strlcpy(kif->pfik_name, kkif->pfik_name, sizeof(kif->pfik_name)); 863 for (int i = 0; i < 2; i++) { 864 for (int j = 0; j < 2; j++) { 865 for (int k = 0; k < 2; k++) { 866 kif->pfik_packets[i][j][k] = 867 pf_counter_u64_fetch(&kkif->pfik_packets[i][j][k]); 868 kif->pfik_bytes[i][j][k] = 869 pf_counter_u64_fetch(&kkif->pfik_bytes[i][j][k]); 870 } 871 } 872 } 873 kif->pfik_flags = kkif->pfik_flags; 874 kif->pfik_tzero = kkif->pfik_tzero; 875 kif->pfik_rulerefs = kkif->pfik_rulerefs; 876 /* 877 * Userspace relies on this pointer to decide if this is a group or 878 * not. We don't want to share the actual pointer, because it's 879 * useless to userspace and leaks kernel memory layout information. 880 * So instead we provide 0xfeedcode as 'true' and NULL as 'false'. 881 */ 882 kif->pfik_group = 883 kkif->pfik_group ? (struct ifg_group *)0xfeedc0de : NULL; 884 } 885 886 void 887 pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size) 888 { 889 struct epoch_tracker et; 890 struct pfi_kkif *p, *nextp; 891 int n = 0; 892 893 NET_EPOCH_ENTER(et); 894 for (p = RB_MIN(pfi_ifhead, &V_pfi_ifs); p; p = nextp) { 895 nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p); 896 if (pfi_skip_if(name, p)) 897 continue; 898 if (*size <= n++) 899 break; 900 if (!p->pfik_tzero) 901 p->pfik_tzero = time_second; 902 pf_kkif_to_kif(p, buf++); 903 nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p); 904 } 905 *size = n; 906 NET_EPOCH_EXIT(et); 907 } 908 909 static int 910 pfi_skip_if(const char *filter, struct pfi_kkif *p) 911 { 912 struct ifg_list *i; 913 int n; 914 915 NET_EPOCH_ASSERT(); 916 917 if (filter == NULL || !*filter) 918 return (0); 919 if (!strcmp(p->pfik_name, filter)) 920 return (0); /* exact match */ 921 n = strlen(filter); 922 if (n < 1 || n >= IFNAMSIZ) 923 return (1); /* sanity check */ 924 if (filter[n-1] >= '0' && filter[n-1] <= '9') 925 return (1); /* group names may not end in a digit */ 926 if (p->pfik_ifp == NULL) 927 return (1); 928 CK_STAILQ_FOREACH(i, &p->pfik_ifp->if_groups, ifgl_next) 929 if (!strncmp(i->ifgl_group->ifg_group, filter, IFNAMSIZ)) 930 return (0); /* iface is in group "filter" */ 931 return (1); 932 } 933 934 int 935 pfi_set_flags(const char *name, int flags) 936 { 937 struct epoch_tracker et; 938 struct pfi_kkif *p, *kif; 939 940 kif = pf_kkif_create(M_NOWAIT); 941 if (kif == NULL) 942 return (ENOMEM); 943 944 NET_EPOCH_ENTER(et); 945 946 kif = pfi_kkif_attach(kif, name); 947 948 RB_FOREACH(p, pfi_ifhead, &V_pfi_ifs) { 949 if (pfi_skip_if(name, p)) 950 continue; 951 p->pfik_flags |= flags; 952 } 953 NET_EPOCH_EXIT(et); 954 return (0); 955 } 956 957 int 958 pfi_clear_flags(const char *name, int flags) 959 { 960 struct epoch_tracker et; 961 struct pfi_kkif *p, *tmp; 962 963 NET_EPOCH_ENTER(et); 964 RB_FOREACH_SAFE(p, pfi_ifhead, &V_pfi_ifs, tmp) { 965 if (pfi_skip_if(name, p)) 966 continue; 967 p->pfik_flags &= ~flags; 968 969 if (p->pfik_ifp == NULL && p->pfik_group == NULL && 970 p->pfik_flags == 0 && p->pfik_rulerefs == 0) { 971 /* Delete this kif. */ 972 RB_REMOVE(pfi_ifhead, &V_pfi_ifs, p); 973 pf_kkif_free(p); 974 } 975 } 976 NET_EPOCH_EXIT(et); 977 return (0); 978 } 979 980 /* from pf_print_state.c */ 981 static int 982 pfi_unmask(void *addr) 983 { 984 struct pf_addr *m = addr; 985 int i = 31, j = 0, b = 0; 986 u_int32_t tmp; 987 988 while (j < 4 && m->addr32[j] == 0xffffffff) { 989 b += 32; 990 j++; 991 } 992 if (j < 4) { 993 tmp = ntohl(m->addr32[j]); 994 for (i = 31; tmp & (1 << i); --i) 995 b++; 996 } 997 return (b); 998 } 999 1000 static void 1001 pfi_attach_ifnet_event(void *arg __unused, struct ifnet *ifp) 1002 { 1003 struct epoch_tracker et; 1004 struct pfi_kkif *kif; 1005 1006 if (V_pf_vnet_active == 0) { 1007 /* Avoid teardown race in the least expensive way. */ 1008 return; 1009 } 1010 kif = pf_kkif_create(M_NOWAIT); 1011 NET_EPOCH_ENTER(et); 1012 PF_RULES_WLOCK(); 1013 pfi_attach_ifnet(ifp, kif); 1014 #ifdef ALTQ 1015 pf_altq_ifnet_event(ifp, 0); 1016 #endif 1017 PF_RULES_WUNLOCK(); 1018 NET_EPOCH_EXIT(et); 1019 } 1020 1021 static void 1022 pfi_detach_ifnet_event(void *arg __unused, struct ifnet *ifp) 1023 { 1024 struct epoch_tracker et; 1025 struct pfi_kkif *kif = (struct pfi_kkif *)ifp->if_pf_kif; 1026 1027 if (pfsync_detach_ifnet_ptr) 1028 pfsync_detach_ifnet_ptr(ifp); 1029 1030 if (kif == NULL) 1031 return; 1032 1033 if (V_pf_vnet_active == 0) { 1034 /* Avoid teardown race in the least expensive way. */ 1035 return; 1036 } 1037 1038 NET_EPOCH_ENTER(et); 1039 PF_RULES_WLOCK(); 1040 V_pfi_update++; 1041 pfi_kkif_update(kif); 1042 1043 if (kif->pfik_ifp) 1044 if_rele(kif->pfik_ifp); 1045 1046 kif->pfik_ifp = NULL; 1047 ifp->if_pf_kif = NULL; 1048 #ifdef ALTQ 1049 pf_altq_ifnet_event(ifp, 1); 1050 #endif 1051 pfi_kkif_remove_if_unref(kif); 1052 1053 PF_RULES_WUNLOCK(); 1054 NET_EPOCH_EXIT(et); 1055 } 1056 1057 static void 1058 pfi_attach_group_event(void *arg __unused, struct ifg_group *ifg) 1059 { 1060 struct epoch_tracker et; 1061 struct pfi_kkif *kif; 1062 1063 if (V_pf_vnet_active == 0) { 1064 /* Avoid teardown race in the least expensive way. */ 1065 return; 1066 } 1067 kif = pf_kkif_create(M_WAITOK); 1068 NET_EPOCH_ENTER(et); 1069 PF_RULES_WLOCK(); 1070 pfi_attach_ifgroup(ifg, kif); 1071 PF_RULES_WUNLOCK(); 1072 NET_EPOCH_EXIT(et); 1073 } 1074 1075 static void 1076 pfi_change_group_event(void *arg __unused, char *gname) 1077 { 1078 struct epoch_tracker et; 1079 struct pfi_kkif *kif; 1080 1081 if (V_pf_vnet_active == 0) { 1082 /* Avoid teardown race in the least expensive way. */ 1083 return; 1084 } 1085 1086 kif = pf_kkif_create(M_WAITOK); 1087 NET_EPOCH_ENTER(et); 1088 PF_RULES_WLOCK(); 1089 V_pfi_update++; 1090 kif = pfi_kkif_attach(kif, gname); 1091 pfi_kkif_update(kif); 1092 PF_RULES_WUNLOCK(); 1093 NET_EPOCH_EXIT(et); 1094 } 1095 1096 static void 1097 pfi_detach_group_event(void *arg __unused, struct ifg_group *ifg) 1098 { 1099 struct pfi_kkif *kif = (struct pfi_kkif *)ifg->ifg_pf_kif; 1100 1101 if (kif == NULL) 1102 return; 1103 1104 if (V_pf_vnet_active == 0) { 1105 /* Avoid teardown race in the least expensive way. */ 1106 return; 1107 } 1108 PF_RULES_WLOCK(); 1109 V_pfi_update++; 1110 1111 kif->pfik_group = NULL; 1112 ifg->ifg_pf_kif = NULL; 1113 1114 pfi_kkif_remove_if_unref(kif); 1115 PF_RULES_WUNLOCK(); 1116 } 1117 1118 static void 1119 pfi_ifaddr_event(void *arg __unused, struct ifnet *ifp) 1120 { 1121 1122 KASSERT(ifp, ("ifp == NULL")); 1123 1124 if (ifp->if_pf_kif == NULL) 1125 return; 1126 1127 if (V_pf_vnet_active == 0) { 1128 /* Avoid teardown race in the least expensive way. */ 1129 return; 1130 } 1131 PF_RULES_WLOCK(); 1132 if (ifp->if_pf_kif) { 1133 struct epoch_tracker et; 1134 1135 V_pfi_update++; 1136 NET_EPOCH_ENTER(et); 1137 pfi_kkif_update(ifp->if_pf_kif); 1138 NET_EPOCH_EXIT(et); 1139 } 1140 PF_RULES_WUNLOCK(); 1141 } 1142