1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2003 Cedric Berger
6 * Copyright (c) 2005 Henning Brauer <henning@openbsd.org>
7 * Copyright (c) 2005 Ryan McBride <mcbride@openbsd.org>
8 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * - Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $OpenBSD: pf_if.c,v 1.54 2008/06/14 16:55:28 mk Exp $
36 */
37
38 #include <sys/cdefs.h>
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/eventhandler.h>
45 #include <sys/lock.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_private.h>
52 #include <net/vnet.h>
53 #include <net/pfvar.h>
54 #include <net/route.h>
55
56 VNET_DEFINE(struct pfi_kkif *, pfi_all);
57 VNET_DEFINE_STATIC(long, pfi_update);
58 #define V_pfi_update VNET(pfi_update)
59 #define PFI_BUFFER_MAX 0x10000
60
61 VNET_DECLARE(int, pf_vnet_active);
62 #define V_pf_vnet_active VNET(pf_vnet_active)
63
64 VNET_DEFINE_STATIC(struct pfr_addr *, pfi_buffer);
65 VNET_DEFINE_STATIC(int, pfi_buffer_cnt);
66 VNET_DEFINE_STATIC(int, pfi_buffer_max);
67 #define V_pfi_buffer VNET(pfi_buffer)
68 #define V_pfi_buffer_cnt VNET(pfi_buffer_cnt)
69 #define V_pfi_buffer_max VNET(pfi_buffer_max)
70
71 #ifdef PF_WANT_32_TO_64_COUNTER
72 VNET_DEFINE(struct allkiflist_head, pf_allkiflist);
73 VNET_DEFINE(size_t, pf_allkifcount);
74 VNET_DEFINE(struct pfi_kkif *, pf_kifmarker);
75 #endif
76
77 eventhandler_tag pfi_attach_cookie;
78 eventhandler_tag pfi_detach_cookie;
79 eventhandler_tag pfi_attach_group_cookie;
80 eventhandler_tag pfi_change_group_cookie;
81 eventhandler_tag pfi_detach_group_cookie;
82 eventhandler_tag pfi_ifaddr_event_cookie;
83
84 static void pfi_attach_ifnet(struct ifnet *, struct pfi_kkif *);
85 static void pfi_attach_ifgroup(struct ifg_group *, struct pfi_kkif *);
86
87 static void pfi_kkif_update(struct pfi_kkif *);
88 static void pfi_dynaddr_update(struct pfi_dynaddr *dyn);
89 static void pfi_table_update(struct pfr_ktable *, struct pfi_kkif *, uint8_t,
90 int);
91 static void pfi_instance_add(struct ifnet *, uint8_t, int);
92 static void pfi_address_add(struct sockaddr *, sa_family_t, uint8_t);
93 static int pfi_kkif_compare(struct pfi_kkif *, struct pfi_kkif *);
94 static int pfi_skip_if(const char *, struct pfi_kkif *);
95 static int pfi_unmask(void *);
96 static void pfi_attach_ifnet_event(void * __unused, struct ifnet *);
97 static void pfi_detach_ifnet_event(void * __unused, struct ifnet *);
98 static void pfi_attach_group_event(void * __unused, struct ifg_group *);
99 static void pfi_change_group_event(void * __unused, char *);
100 static void pfi_detach_group_event(void * __unused, struct ifg_group *);
101 static void pfi_ifaddr_event(void * __unused, struct ifnet *);
102
103 RB_HEAD(pfi_ifhead, pfi_kkif);
104 static RB_PROTOTYPE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare);
105 static RB_GENERATE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare);
106 VNET_DEFINE_STATIC(struct pfi_ifhead, pfi_ifs);
107 #define V_pfi_ifs VNET(pfi_ifs)
108
109 #define PFI_BUFFER_MAX 0x10000
110 MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interface database");
111
112 LIST_HEAD(pfi_list, pfi_kkif);
113 VNET_DEFINE_STATIC(struct pfi_list, pfi_unlinked_kifs);
114 #define V_pfi_unlinked_kifs VNET(pfi_unlinked_kifs)
115 static struct mtx pfi_unlnkdkifs_mtx;
116 MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces",
117 MTX_DEF);
118
119 void
pfi_initialize_vnet(void)120 pfi_initialize_vnet(void)
121 {
122 struct pfi_list kifs = LIST_HEAD_INITIALIZER();
123 struct epoch_tracker et;
124 struct pfi_kkif *kif;
125 struct ifg_group *ifg;
126 struct ifnet *ifp;
127 int nkifs;
128
129 V_pfi_buffer_max = 64;
130 V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
131 PFI_MTYPE, M_WAITOK);
132
133 nkifs = 1; /* one for V_pfi_all */
134 IFNET_RLOCK();
135 CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
136 nkifs++;
137 CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link)
138 nkifs++;
139
140 for (int n = 0; n < nkifs; n++) {
141 kif = pf_kkif_create(M_WAITOK);
142 LIST_INSERT_HEAD(&kifs, kif, pfik_list);
143 }
144
145 NET_EPOCH_ENTER(et);
146 PF_RULES_WLOCK();
147 kif = LIST_FIRST(&kifs);
148 LIST_REMOVE(kif, pfik_list);
149 V_pfi_all = pfi_kkif_attach(kif, IFG_ALL);
150 CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) {
151 kif = LIST_FIRST(&kifs);
152 LIST_REMOVE(kif, pfik_list);
153 pfi_attach_ifgroup(ifg, kif);
154 }
155 CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
156 kif = LIST_FIRST(&kifs);
157 LIST_REMOVE(kif, pfik_list);
158 pfi_attach_ifnet(ifp, kif);
159 }
160 PF_RULES_WUNLOCK();
161 NET_EPOCH_EXIT(et);
162 IFNET_RUNLOCK();
163
164 MPASS(LIST_EMPTY(&kifs));
165 }
166
167 void
pfi_initialize(void)168 pfi_initialize(void)
169 {
170
171 pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
172 pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
173 pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
174 pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
175 pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
176 pfi_attach_group_event, NULL, EVENTHANDLER_PRI_ANY);
177 pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
178 pfi_change_group_event, NULL, EVENTHANDLER_PRI_ANY);
179 pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
180 pfi_detach_group_event, NULL, EVENTHANDLER_PRI_ANY);
181 pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
182 pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
183 }
184
185 void
pfi_cleanup_vnet(void)186 pfi_cleanup_vnet(void)
187 {
188 struct pfi_kkif *kif;
189
190 PF_RULES_WASSERT();
191
192 V_pfi_all = NULL;
193 while ((kif = RB_MIN(pfi_ifhead, &V_pfi_ifs))) {
194 RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
195 if (kif->pfik_group)
196 kif->pfik_group->ifg_pf_kif = NULL;
197 if (kif->pfik_ifp) {
198 if_rele(kif->pfik_ifp);
199 kif->pfik_ifp->if_pf_kif = NULL;
200 }
201 pf_kkif_free(kif);
202 }
203
204 mtx_lock(&pfi_unlnkdkifs_mtx);
205 while ((kif = LIST_FIRST(&V_pfi_unlinked_kifs))) {
206 LIST_REMOVE(kif, pfik_list);
207 pf_kkif_free(kif);
208 }
209 mtx_unlock(&pfi_unlnkdkifs_mtx);
210
211 free(V_pfi_buffer, PFI_MTYPE);
212 }
213
214 void
pfi_cleanup(void)215 pfi_cleanup(void)
216 {
217
218 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, pfi_attach_cookie);
219 EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfi_detach_cookie);
220 EVENTHANDLER_DEREGISTER(group_attach_event, pfi_attach_group_cookie);
221 EVENTHANDLER_DEREGISTER(group_change_event, pfi_change_group_cookie);
222 EVENTHANDLER_DEREGISTER(group_detach_event, pfi_detach_group_cookie);
223 EVENTHANDLER_DEREGISTER(ifaddr_event, pfi_ifaddr_event_cookie);
224 }
225
226 struct pfi_kkif*
pf_kkif_create(int flags)227 pf_kkif_create(int flags)
228 {
229 struct pfi_kkif *kif;
230 #ifdef PF_WANT_32_TO_64_COUNTER
231 bool wowned;
232 #endif
233
234 kif = malloc(sizeof(*kif), PFI_MTYPE, flags | M_ZERO);
235 if (! kif)
236 return (kif);
237
238 for (int i = 0; i < 2; i++) {
239 for (int j = 0; j < 2; j++) {
240 for (int k = 0; k < 2; k++) {
241 if (pf_counter_u64_init(&kif->pfik_packets[i][j][k], flags) != 0) {
242 pf_kkif_free(kif);
243 return (NULL);
244 }
245
246 if (pf_counter_u64_init(&kif->pfik_bytes[i][j][k], flags) != 0) {
247 pf_kkif_free(kif);
248 return (NULL);
249 }
250 }
251 }
252 }
253
254 #ifdef PF_WANT_32_TO_64_COUNTER
255 wowned = PF_RULES_WOWNED();
256 if (!wowned)
257 PF_RULES_WLOCK();
258 LIST_INSERT_HEAD(&V_pf_allkiflist, kif, pfik_allkiflist);
259 V_pf_allkifcount++;
260 if (!wowned)
261 PF_RULES_WUNLOCK();
262 #endif
263
264 return (kif);
265 }
266
267 void
pf_kkif_free(struct pfi_kkif * kif)268 pf_kkif_free(struct pfi_kkif *kif)
269 {
270 #ifdef PF_WANT_32_TO_64_COUNTER
271 bool wowned;
272 #endif
273
274 if (! kif)
275 return;
276
277 #ifdef INVARIANTS
278 if (kif->pfik_ifp) {
279 struct ifnet *ifp = kif->pfik_ifp;
280 MPASS(ifp->if_pf_kif == NULL || ifp->if_pf_kif == kif);
281 }
282 #endif
283
284 #ifdef PF_WANT_32_TO_64_COUNTER
285 wowned = PF_RULES_WOWNED();
286 if (!wowned)
287 PF_RULES_WLOCK();
288 LIST_REMOVE(kif, pfik_allkiflist);
289 V_pf_allkifcount--;
290 if (!wowned)
291 PF_RULES_WUNLOCK();
292 #endif
293
294 for (int i = 0; i < 2; i++) {
295 for (int j = 0; j < 2; j++) {
296 for (int k = 0; k < 2; k++) {
297 pf_counter_u64_deinit(&kif->pfik_packets[i][j][k]);
298 pf_counter_u64_deinit(&kif->pfik_bytes[i][j][k]);
299 }
300 }
301 }
302
303 free(kif, PFI_MTYPE);
304 }
305
306 void
pf_kkif_zero(struct pfi_kkif * kif)307 pf_kkif_zero(struct pfi_kkif *kif)
308 {
309
310 for (int i = 0; i < 2; i++) {
311 for (int j = 0; j < 2; j++) {
312 for (int k = 0; k < 2; k++) {
313 pf_counter_u64_zero(&kif->pfik_packets[i][j][k]);
314 pf_counter_u64_zero(&kif->pfik_bytes[i][j][k]);
315 }
316 }
317 }
318 kif->pfik_tzero = time_second;
319 }
320
321 struct pfi_kkif *
pfi_kkif_find(const char * kif_name)322 pfi_kkif_find(const char *kif_name)
323 {
324 struct pfi_kif_cmp s;
325
326 PF_RULES_ASSERT();
327
328 memset(&s, 0, sizeof(s));
329 strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name));
330
331 return (RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&s));
332 }
333
334 struct pfi_kkif *
pfi_kkif_attach(struct pfi_kkif * kif,const char * kif_name)335 pfi_kkif_attach(struct pfi_kkif *kif, const char *kif_name)
336 {
337 struct pfi_kkif *kif1;
338
339 PF_RULES_WASSERT();
340 KASSERT(kif != NULL, ("%s: null kif", __func__));
341
342 kif1 = pfi_kkif_find(kif_name);
343 if (kif1 != NULL) {
344 pf_kkif_free(kif);
345 return (kif1);
346 }
347
348 pf_kkif_zero(kif);
349 strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name));
350 /*
351 * It seems that the value of time_second is in unintialzied state
352 * when pf sets interface statistics clear time in boot phase if pf
353 * was statically linked to kernel. Instead of setting the bogus
354 * time value have pfi_get_ifaces handle this case. In
355 * pfi_get_ifaces it uses time_second if it sees the time is 0.
356 */
357 kif->pfik_tzero = time_second > 1 ? time_second : 0;
358 TAILQ_INIT(&kif->pfik_dynaddrs);
359
360 if (!strcmp(kif->pfik_name, "any")) {
361 /* both so it works in the ioctl and the regular case */
362 kif->pfik_flags |= PFI_IFLAG_ANY;
363 }
364
365 RB_INSERT(pfi_ifhead, &V_pfi_ifs, kif);
366
367 return (kif);
368 }
369
370 void
pfi_kkif_ref(struct pfi_kkif * kif)371 pfi_kkif_ref(struct pfi_kkif *kif)
372 {
373
374 PF_RULES_WASSERT();
375 kif->pfik_rulerefs++;
376 }
377
378 static void
pfi_kkif_remove_if_unref(struct pfi_kkif * kif)379 pfi_kkif_remove_if_unref(struct pfi_kkif *kif)
380 {
381
382 PF_RULES_WASSERT();
383
384 if (kif->pfik_rulerefs > 0)
385 return;
386
387 /* kif referencing an existing ifnet or group or holding flags should
388 * exist. */
389 if (kif->pfik_ifp != NULL || kif->pfik_group != NULL ||
390 kif == V_pfi_all || kif->pfik_flags != 0)
391 return;
392
393 /*
394 * We can get here in at least two distinct paths:
395 * - when the struct ifnet is removed, via pfi_detach_ifnet_event()
396 * - when a rule referencing us is removed, via pfi_kkif_unref().
397 * These two events can race against each other, leading us to free this kif
398 * twice. That leads to a loop in V_pfi_unlinked_kifs, and an eventual
399 * deadlock.
400 *
401 * Avoid this by making sure we only ever insert the kif into
402 * V_pfi_unlinked_kifs once.
403 * If we don't find it in V_pfi_ifs it's already been removed. Check that it
404 * exists in V_pfi_unlinked_kifs.
405 */
406 if (! RB_FIND(pfi_ifhead, &V_pfi_ifs, kif)) {
407 #ifdef INVARIANTS
408 struct pfi_kkif *tmp;
409 bool found = false;
410 mtx_lock(&pfi_unlnkdkifs_mtx);
411 LIST_FOREACH(tmp, &V_pfi_unlinked_kifs, pfik_list) {
412 if (tmp == kif) {
413 found = true;
414 break;
415 }
416 }
417 mtx_unlock(&pfi_unlnkdkifs_mtx);
418 MPASS(found);
419 #endif
420 return;
421 }
422 RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
423
424 kif->pfik_flags |= PFI_IFLAG_REFS;
425
426 mtx_lock(&pfi_unlnkdkifs_mtx);
427 LIST_INSERT_HEAD(&V_pfi_unlinked_kifs, kif, pfik_list);
428 mtx_unlock(&pfi_unlnkdkifs_mtx);
429 }
430
431 void
pfi_kkif_unref(struct pfi_kkif * kif)432 pfi_kkif_unref(struct pfi_kkif *kif)
433 {
434
435 PF_RULES_WASSERT();
436 KASSERT(kif->pfik_rulerefs > 0, ("%s: %p has zero refs", __func__, kif));
437
438 kif->pfik_rulerefs--;
439
440 pfi_kkif_remove_if_unref(kif);
441 }
442
443 void
pfi_kkif_purge(void)444 pfi_kkif_purge(void)
445 {
446 struct pfi_kkif *kif, *kif1;
447
448 /*
449 * Do naive mark-and-sweep garbage collecting of old kifs.
450 * Reference flag is raised by pf_purge_expired_states().
451 */
452 mtx_lock(&pfi_unlnkdkifs_mtx);
453 LIST_FOREACH_SAFE(kif, &V_pfi_unlinked_kifs, pfik_list, kif1) {
454 if (!(kif->pfik_flags & PFI_IFLAG_REFS)) {
455 LIST_REMOVE(kif, pfik_list);
456 pf_kkif_free(kif);
457 } else
458 kif->pfik_flags &= ~PFI_IFLAG_REFS;
459 }
460 mtx_unlock(&pfi_unlnkdkifs_mtx);
461 }
462
463 int
pfi_kkif_match(struct pfi_kkif * rule_kif,struct pfi_kkif * packet_kif)464 pfi_kkif_match(struct pfi_kkif *rule_kif, struct pfi_kkif *packet_kif)
465 {
466 struct ifg_list *p;
467
468 NET_EPOCH_ASSERT();
469
470 MPASS(packet_kif != NULL);
471 MPASS(packet_kif->pfik_ifp != NULL);
472
473 if (rule_kif == NULL || rule_kif == packet_kif)
474 return (1);
475
476 if (rule_kif->pfik_group != NULL) {
477 CK_STAILQ_FOREACH(p, &packet_kif->pfik_ifp->if_groups, ifgl_next)
478 if (p->ifgl_group == rule_kif->pfik_group)
479 return (1);
480 }
481
482 if (rule_kif->pfik_flags & PFI_IFLAG_ANY && packet_kif->pfik_ifp &&
483 !(packet_kif->pfik_ifp->if_flags & IFF_LOOPBACK))
484 return (1);
485
486 return (0);
487 }
488
489 static void
pfi_attach_ifnet(struct ifnet * ifp,struct pfi_kkif * kif)490 pfi_attach_ifnet(struct ifnet *ifp, struct pfi_kkif *kif)
491 {
492
493 PF_RULES_WASSERT();
494
495 V_pfi_update++;
496 kif = pfi_kkif_attach(kif, ifp->if_xname);
497 if_ref(ifp);
498 kif->pfik_ifp = ifp;
499 ifp->if_pf_kif = kif;
500 pfi_kkif_update(kif);
501 }
502
503 static void
pfi_attach_ifgroup(struct ifg_group * ifg,struct pfi_kkif * kif)504 pfi_attach_ifgroup(struct ifg_group *ifg, struct pfi_kkif *kif)
505 {
506
507 PF_RULES_WASSERT();
508
509 V_pfi_update++;
510 kif = pfi_kkif_attach(kif, ifg->ifg_group);
511 kif->pfik_group = ifg;
512 ifg->ifg_pf_kif = kif;
513 }
514
515 int
pfi_match_addr(struct pfi_dynaddr * dyn,struct pf_addr * a,sa_family_t af)516 pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af)
517 {
518 switch (af) {
519 #ifdef INET
520 case AF_INET:
521 switch (dyn->pfid_acnt4) {
522 case 0:
523 return (0);
524 case 1:
525 return (pf_match_addr(0, &dyn->pfid_addr4,
526 &dyn->pfid_mask4, a, AF_INET));
527 default:
528 return (pfr_match_addr(dyn->pfid_kt, a, AF_INET));
529 }
530 break;
531 #endif /* INET */
532 #ifdef INET6
533 case AF_INET6:
534 switch (dyn->pfid_acnt6) {
535 case 0:
536 return (0);
537 case 1:
538 return (pf_match_addr(0, &dyn->pfid_addr6,
539 &dyn->pfid_mask6, a, AF_INET6));
540 default:
541 return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6));
542 }
543 break;
544 #endif /* INET6 */
545 default:
546 return (0);
547 }
548 }
549
550 int
pfi_dynaddr_setup(struct pf_addr_wrap * aw,sa_family_t af)551 pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af)
552 {
553 struct epoch_tracker et;
554 struct pfi_dynaddr *dyn;
555 char tblname[PF_TABLE_NAME_SIZE];
556 struct pf_kruleset *ruleset = NULL;
557 struct pfi_kkif *kif;
558 int rv = 0;
559
560 PF_RULES_WASSERT();
561 KASSERT(aw->type == PF_ADDR_DYNIFTL, ("%s: type %u",
562 __func__, aw->type));
563 KASSERT(aw->p.dyn == NULL, ("%s: dyn is %p", __func__, aw->p.dyn));
564
565 if ((dyn = malloc(sizeof(*dyn), PFI_MTYPE, M_NOWAIT | M_ZERO)) == NULL)
566 return (ENOMEM);
567
568 if ((kif = pf_kkif_create(M_NOWAIT)) == NULL) {
569 free(dyn, PFI_MTYPE);
570 return (ENOMEM);
571 }
572
573 if (!strcmp(aw->v.ifname, "self"))
574 dyn->pfid_kif = pfi_kkif_attach(kif, IFG_ALL);
575 else
576 dyn->pfid_kif = pfi_kkif_attach(kif, aw->v.ifname);
577 kif = NULL;
578 pfi_kkif_ref(dyn->pfid_kif);
579
580 dyn->pfid_net = pfi_unmask(&aw->v.a.mask);
581 if (af == AF_INET && dyn->pfid_net == 32)
582 dyn->pfid_net = 128;
583 strlcpy(tblname, aw->v.ifname, sizeof(tblname));
584 if (aw->iflags & PFI_AFLAG_NETWORK)
585 strlcat(tblname, ":network", sizeof(tblname));
586 if (aw->iflags & PFI_AFLAG_BROADCAST)
587 strlcat(tblname, ":broadcast", sizeof(tblname));
588 if (aw->iflags & PFI_AFLAG_PEER)
589 strlcat(tblname, ":peer", sizeof(tblname));
590 if (aw->iflags & PFI_AFLAG_NOALIAS)
591 strlcat(tblname, ":0", sizeof(tblname));
592 if (dyn->pfid_net != 128)
593 snprintf(tblname + strlen(tblname),
594 sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net);
595 if ((ruleset = pf_find_or_create_kruleset(PF_RESERVED_ANCHOR)) == NULL) {
596 rv = ENOMEM;
597 goto _bad;
598 }
599
600 if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) {
601 rv = ENOMEM;
602 goto _bad;
603 }
604
605 dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE;
606 dyn->pfid_iflags = aw->iflags;
607 dyn->pfid_af = af;
608
609 TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
610 aw->p.dyn = dyn;
611 NET_EPOCH_ENTER(et);
612 pfi_kkif_update(dyn->pfid_kif);
613 NET_EPOCH_EXIT(et);
614
615 return (0);
616
617 _bad:
618 if (dyn->pfid_kt != NULL)
619 pfr_detach_table(dyn->pfid_kt);
620 if (ruleset != NULL)
621 pf_remove_if_empty_kruleset(ruleset);
622 pfi_kkif_unref(dyn->pfid_kif);
623 free(dyn, PFI_MTYPE);
624
625 return (rv);
626 }
627
628 static void
pfi_kkif_update(struct pfi_kkif * kif)629 pfi_kkif_update(struct pfi_kkif *kif)
630 {
631 struct ifg_list *ifgl;
632 struct ifg_member *ifgm;
633 struct pfi_dynaddr *p;
634 struct pfi_kkif *tmpkif;
635
636 NET_EPOCH_ASSERT();
637 PF_RULES_WASSERT();
638
639 /* update all dynaddr */
640 TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry)
641 pfi_dynaddr_update(p);
642
643 /* Apply group flags to new members. */
644 if (kif->pfik_group != NULL) {
645 CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members,
646 ifgm_next) {
647 tmpkif = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif;
648 if (tmpkif == NULL)
649 continue;
650
651 tmpkif->pfik_flags |= kif->pfik_flags;
652 }
653 }
654
655 /* again for all groups kif is member of */
656 if (kif->pfik_ifp != NULL) {
657 CK_STAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next)
658 if (ifgl->ifgl_group->ifg_pf_kif) {
659 pfi_kkif_update((struct pfi_kkif *)
660 ifgl->ifgl_group->ifg_pf_kif);
661 }
662 }
663 }
664
665 static void
pfi_dynaddr_update(struct pfi_dynaddr * dyn)666 pfi_dynaddr_update(struct pfi_dynaddr *dyn)
667 {
668 struct pfi_kkif *kif;
669 struct pfr_ktable *kt;
670
671 PF_RULES_WASSERT();
672 KASSERT(dyn && dyn->pfid_kif && dyn->pfid_kt,
673 ("%s: bad argument", __func__));
674
675 kif = dyn->pfid_kif;
676 kt = dyn->pfid_kt;
677
678 if (kt->pfrkt_larg != V_pfi_update) {
679 /* this table needs to be brought up-to-date */
680 pfi_table_update(kt, kif, dyn->pfid_net, dyn->pfid_iflags);
681 kt->pfrkt_larg = V_pfi_update;
682 }
683 pfr_dynaddr_update(kt, dyn);
684 }
685
686 static void
pfi_table_update(struct pfr_ktable * kt,struct pfi_kkif * kif,uint8_t net,int flags)687 pfi_table_update(struct pfr_ktable *kt, struct pfi_kkif *kif, uint8_t net,
688 int flags)
689 {
690 int e, size2 = 0;
691 struct ifg_member *ifgm;
692
693 NET_EPOCH_ASSERT();
694
695 V_pfi_buffer_cnt = 0;
696
697 if (kif->pfik_ifp != NULL)
698 pfi_instance_add(kif->pfik_ifp, net, flags);
699 else if (kif->pfik_group != NULL) {
700 CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, ifgm_next)
701 pfi_instance_add(ifgm->ifgm_ifp, net, flags);
702 }
703
704 if ((e = pfr_set_addrs(&kt->pfrkt_t, V_pfi_buffer, V_pfi_buffer_cnt, &size2,
705 NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK)))
706 printf("%s: cannot set %d new addresses into table %s: %d\n",
707 __func__, V_pfi_buffer_cnt, kt->pfrkt_name, e);
708 }
709
710 static void
pfi_instance_add(struct ifnet * ifp,uint8_t net,int flags)711 pfi_instance_add(struct ifnet *ifp, uint8_t net, int flags)
712 {
713 struct ifaddr *ia;
714 int got4 = 0, got6 = 0;
715 sa_family_t af;
716 uint8_t net2;
717
718 NET_EPOCH_ASSERT();
719
720 CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
721 if (ia->ifa_addr == NULL)
722 continue;
723 af = ia->ifa_addr->sa_family;
724 if (af != AF_INET && af != AF_INET6)
725 continue;
726 /*
727 * XXX: For point-to-point interfaces, (ifname:0) and IPv4,
728 * jump over addresses without a proper route to work
729 * around a problem with ppp not fully removing the
730 * address used during IPCP.
731 */
732 if ((ifp->if_flags & IFF_POINTOPOINT) &&
733 !(ia->ifa_flags & IFA_ROUTE) &&
734 (flags & PFI_AFLAG_NOALIAS) && (af == AF_INET))
735 continue;
736 if ((flags & PFI_AFLAG_BROADCAST) && af == AF_INET6)
737 continue;
738 if ((flags & PFI_AFLAG_BROADCAST) &&
739 !(ifp->if_flags & IFF_BROADCAST))
740 continue;
741 if ((flags & PFI_AFLAG_PEER) &&
742 !(ifp->if_flags & IFF_POINTOPOINT))
743 continue;
744 if ((flags & (PFI_AFLAG_NETWORK | PFI_AFLAG_NOALIAS)) &&
745 af == AF_INET6 &&
746 IN6_IS_ADDR_LINKLOCAL(
747 &((struct sockaddr_in6 *)ia->ifa_addr)->sin6_addr))
748 continue;
749 if (flags & PFI_AFLAG_NOALIAS) {
750 if (af == AF_INET && got4)
751 continue;
752 if (af == AF_INET6 && got6)
753 continue;
754 }
755 if (af == AF_INET)
756 got4 = 1;
757 else if (af == AF_INET6)
758 got6 = 1;
759 net2 = net;
760 if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) {
761 if (af == AF_INET)
762 net2 = pfi_unmask(&((struct sockaddr_in *)
763 ia->ifa_netmask)->sin_addr);
764 else if (af == AF_INET6)
765 net2 = pfi_unmask(&((struct sockaddr_in6 *)
766 ia->ifa_netmask)->sin6_addr);
767 }
768 if (af == AF_INET && net2 > 32)
769 net2 = 32;
770 if (flags & PFI_AFLAG_BROADCAST)
771 pfi_address_add(ia->ifa_broadaddr, af, net2);
772 else if (flags & PFI_AFLAG_PEER)
773 pfi_address_add(ia->ifa_dstaddr, af, net2);
774 else
775 pfi_address_add(ia->ifa_addr, af, net2);
776 }
777 }
778
779 static void
pfi_address_add(struct sockaddr * sa,sa_family_t af,uint8_t net)780 pfi_address_add(struct sockaddr *sa, sa_family_t af, uint8_t net)
781 {
782 struct pfr_addr *p;
783 int i;
784
785 if (V_pfi_buffer_cnt >= V_pfi_buffer_max) {
786 int new_max = V_pfi_buffer_max * 2;
787
788 if (new_max > PFI_BUFFER_MAX) {
789 printf("%s: address buffer full (%d/%d)\n", __func__,
790 V_pfi_buffer_cnt, PFI_BUFFER_MAX);
791 return;
792 }
793 p = malloc(new_max * sizeof(*V_pfi_buffer), PFI_MTYPE,
794 M_NOWAIT);
795 if (p == NULL) {
796 printf("%s: no memory to grow buffer (%d/%d)\n",
797 __func__, V_pfi_buffer_cnt, PFI_BUFFER_MAX);
798 return;
799 }
800 memcpy(p, V_pfi_buffer, V_pfi_buffer_max * sizeof(*V_pfi_buffer));
801 /* no need to zero buffer */
802 free(V_pfi_buffer, PFI_MTYPE);
803 V_pfi_buffer = p;
804 V_pfi_buffer_max = new_max;
805 }
806 if (af == AF_INET && net > 32)
807 net = 128;
808 p = V_pfi_buffer + V_pfi_buffer_cnt++;
809 memset(p, 0, sizeof(*p));
810 p->pfra_af = af;
811 p->pfra_net = net;
812 if (af == AF_INET)
813 p->pfra_ip4addr = ((struct sockaddr_in *)sa)->sin_addr;
814 else if (af == AF_INET6) {
815 p->pfra_ip6addr = ((struct sockaddr_in6 *)sa)->sin6_addr;
816 if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr))
817 p->pfra_ip6addr.s6_addr16[1] = 0;
818 }
819 /* mask network address bits */
820 if (net < 128)
821 ((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8));
822 for (i = (p->pfra_net+7)/8; i < sizeof(p->pfra_u); i++)
823 ((caddr_t)p)[i] = 0;
824 }
825
826 void
pfi_dynaddr_remove(struct pfi_dynaddr * dyn)827 pfi_dynaddr_remove(struct pfi_dynaddr *dyn)
828 {
829
830 KASSERT(dyn->pfid_kif != NULL, ("%s: null pfid_kif", __func__));
831 KASSERT(dyn->pfid_kt != NULL, ("%s: null pfid_kt", __func__));
832
833 TAILQ_REMOVE(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
834 pfi_kkif_unref(dyn->pfid_kif);
835 pfr_detach_table(dyn->pfid_kt);
836 free(dyn, PFI_MTYPE);
837 }
838
839 void
pfi_dynaddr_copyout(struct pf_addr_wrap * aw)840 pfi_dynaddr_copyout(struct pf_addr_wrap *aw)
841 {
842
843 KASSERT(aw->type == PF_ADDR_DYNIFTL,
844 ("%s: type %u", __func__, aw->type));
845
846 if (aw->p.dyn == NULL || aw->p.dyn->pfid_kif == NULL)
847 return;
848 aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6;
849 }
850
851 static int
pfi_kkif_compare(struct pfi_kkif * p,struct pfi_kkif * q)852 pfi_kkif_compare(struct pfi_kkif *p, struct pfi_kkif *q)
853 {
854 return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ));
855 }
856
857 void
pfi_update_status(const char * name,struct pf_status * pfs)858 pfi_update_status(const char *name, struct pf_status *pfs)
859 {
860 struct pfi_kkif *p;
861 struct pfi_kif_cmp key;
862 struct ifg_member p_member, *ifgm;
863 CK_STAILQ_HEAD(, ifg_member) ifg_members;
864 int i, j, k;
865
866 if (pfs) {
867 memset(pfs->pcounters, 0, sizeof(pfs->pcounters));
868 memset(pfs->bcounters, 0, sizeof(pfs->bcounters));
869 }
870
871 strlcpy(key.pfik_name, name, sizeof(key.pfik_name));
872 p = RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&key);
873 if (p == NULL) {
874 return;
875 }
876
877 if (p->pfik_group != NULL) {
878 memcpy(&ifg_members, &p->pfik_group->ifg_members,
879 sizeof(ifg_members));
880 } else {
881 /* build a temporary list for p only */
882 memset(&p_member, 0, sizeof(p_member));
883 p_member.ifgm_ifp = p->pfik_ifp;
884 CK_STAILQ_INIT(&ifg_members);
885 CK_STAILQ_INSERT_TAIL(&ifg_members, &p_member, ifgm_next);
886 }
887 CK_STAILQ_FOREACH(ifgm, &ifg_members, ifgm_next) {
888 if (ifgm->ifgm_ifp == NULL || ifgm->ifgm_ifp->if_pf_kif == NULL)
889 continue;
890 p = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif;
891
892 /* just clear statistics */
893 if (pfs == NULL) {
894 pf_kkif_zero(p);
895 continue;
896 }
897 for (i = 0; i < 2; i++)
898 for (j = 0; j < 2; j++)
899 for (k = 0; k < 2; k++) {
900 pfs->pcounters[i][j][k] +=
901 pf_counter_u64_fetch(&p->pfik_packets[i][j][k]);
902 pfs->bcounters[i][j] +=
903 pf_counter_u64_fetch(&p->pfik_bytes[i][j][k]);
904 }
905 }
906 }
907
908 static void
pf_kkif_to_kif(struct pfi_kkif * kkif,struct pfi_kif * kif)909 pf_kkif_to_kif(struct pfi_kkif *kkif, struct pfi_kif *kif)
910 {
911
912 memset(kif, 0, sizeof(*kif));
913 strlcpy(kif->pfik_name, kkif->pfik_name, sizeof(kif->pfik_name));
914 for (int i = 0; i < 2; i++) {
915 for (int j = 0; j < 2; j++) {
916 for (int k = 0; k < 2; k++) {
917 kif->pfik_packets[i][j][k] =
918 pf_counter_u64_fetch(&kkif->pfik_packets[i][j][k]);
919 kif->pfik_bytes[i][j][k] =
920 pf_counter_u64_fetch(&kkif->pfik_bytes[i][j][k]);
921 }
922 }
923 }
924 kif->pfik_flags = kkif->pfik_flags;
925 kif->pfik_tzero = kkif->pfik_tzero;
926 kif->pfik_rulerefs = kkif->pfik_rulerefs;
927 /*
928 * Userspace relies on this pointer to decide if this is a group or
929 * not. We don't want to share the actual pointer, because it's
930 * useless to userspace and leaks kernel memory layout information.
931 * So instead we provide 0xfeedcode as 'true' and NULL as 'false'.
932 */
933 kif->pfik_group =
934 kkif->pfik_group ? (struct ifg_group *)0xfeedc0de : NULL;
935 }
936
937 void
pfi_get_ifaces(const char * name,struct pfi_kif * buf,int * size)938 pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size)
939 {
940 struct epoch_tracker et;
941 struct pfi_kkif *p, *nextp;
942 int n = 0;
943
944 NET_EPOCH_ENTER(et);
945 for (p = RB_MIN(pfi_ifhead, &V_pfi_ifs); p; p = nextp) {
946 nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
947 if (pfi_skip_if(name, p))
948 continue;
949 if (*size <= n++)
950 break;
951 if (!p->pfik_tzero)
952 p->pfik_tzero = time_second;
953 pf_kkif_to_kif(p, buf++);
954 nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
955 }
956 *size = n;
957 NET_EPOCH_EXIT(et);
958 }
959
960 static int
pfi_skip_if(const char * filter,struct pfi_kkif * p)961 pfi_skip_if(const char *filter, struct pfi_kkif *p)
962 {
963 struct ifg_list *i;
964 int n;
965
966 NET_EPOCH_ASSERT();
967
968 if (filter == NULL || !*filter)
969 return (0);
970 if (!strcmp(p->pfik_name, filter))
971 return (0); /* exact match */
972 n = strlen(filter);
973 if (n < 1 || n >= IFNAMSIZ)
974 return (1); /* sanity check */
975 if (filter[n-1] >= '0' && filter[n-1] <= '9')
976 return (1); /* group names may not end in a digit */
977 if (p->pfik_ifp == NULL)
978 return (1);
979 CK_STAILQ_FOREACH(i, &p->pfik_ifp->if_groups, ifgl_next)
980 if (!strncmp(i->ifgl_group->ifg_group, filter, IFNAMSIZ))
981 return (0); /* iface is in group "filter" */
982 return (1);
983 }
984
985 int
pfi_set_flags(const char * name,int flags)986 pfi_set_flags(const char *name, int flags)
987 {
988 struct epoch_tracker et;
989 struct pfi_kkif *p, *kif;
990
991 kif = pf_kkif_create(M_NOWAIT);
992 if (kif == NULL)
993 return (ENOMEM);
994
995 NET_EPOCH_ENTER(et);
996
997 kif = pfi_kkif_attach(kif, name);
998
999 RB_FOREACH(p, pfi_ifhead, &V_pfi_ifs) {
1000 if (pfi_skip_if(name, p))
1001 continue;
1002 p->pfik_flags |= flags;
1003 }
1004 NET_EPOCH_EXIT(et);
1005 return (0);
1006 }
1007
1008 int
pfi_clear_flags(const char * name,int flags)1009 pfi_clear_flags(const char *name, int flags)
1010 {
1011 struct epoch_tracker et;
1012 struct pfi_kkif *p, *tmp;
1013
1014 NET_EPOCH_ENTER(et);
1015 RB_FOREACH_SAFE(p, pfi_ifhead, &V_pfi_ifs, tmp) {
1016 if (pfi_skip_if(name, p))
1017 continue;
1018 p->pfik_flags &= ~flags;
1019
1020 if (p->pfik_ifp == NULL && p->pfik_group == NULL &&
1021 p->pfik_flags == 0 && p->pfik_rulerefs == 0) {
1022 /* Delete this kif. */
1023 RB_REMOVE(pfi_ifhead, &V_pfi_ifs, p);
1024 pf_kkif_free(p);
1025 }
1026 }
1027 NET_EPOCH_EXIT(et);
1028 return (0);
1029 }
1030
1031 /* from pf_print_state.c */
1032 static int
pfi_unmask(void * addr)1033 pfi_unmask(void *addr)
1034 {
1035 struct pf_addr *m = addr;
1036 int i = 31, j = 0, b = 0;
1037 u_int32_t tmp;
1038
1039 while (j < 4 && m->addr32[j] == 0xffffffff) {
1040 b += 32;
1041 j++;
1042 }
1043 if (j < 4) {
1044 tmp = ntohl(m->addr32[j]);
1045 for (i = 31; tmp & (1 << i); --i)
1046 b++;
1047 }
1048 return (b);
1049 }
1050
1051 static void
pfi_attach_ifnet_event(void * arg __unused,struct ifnet * ifp)1052 pfi_attach_ifnet_event(void *arg __unused, struct ifnet *ifp)
1053 {
1054 struct epoch_tracker et;
1055 struct pfi_kkif *kif;
1056
1057 if (V_pf_vnet_active == 0) {
1058 /* Avoid teardown race in the least expensive way. */
1059 return;
1060 }
1061 kif = pf_kkif_create(M_NOWAIT);
1062 NET_EPOCH_ENTER(et);
1063 PF_RULES_WLOCK();
1064 pfi_attach_ifnet(ifp, kif);
1065 #ifdef ALTQ
1066 pf_altq_ifnet_event(ifp, 0);
1067 #endif
1068 PF_RULES_WUNLOCK();
1069 NET_EPOCH_EXIT(et);
1070 }
1071
1072 static void
pfi_detach_ifnet_event(void * arg __unused,struct ifnet * ifp)1073 pfi_detach_ifnet_event(void *arg __unused, struct ifnet *ifp)
1074 {
1075 struct epoch_tracker et;
1076 struct pfi_kkif *kif = (struct pfi_kkif *)ifp->if_pf_kif;
1077
1078 if (pfsync_detach_ifnet_ptr)
1079 pfsync_detach_ifnet_ptr(ifp);
1080
1081 if (kif == NULL)
1082 return;
1083
1084 if (V_pf_vnet_active == 0) {
1085 /* Avoid teardown race in the least expensive way. */
1086 return;
1087 }
1088
1089 NET_EPOCH_ENTER(et);
1090 PF_RULES_WLOCK();
1091 V_pfi_update++;
1092 pfi_kkif_update(kif);
1093
1094 if (kif->pfik_ifp)
1095 if_rele(kif->pfik_ifp);
1096
1097 kif->pfik_ifp = NULL;
1098 ifp->if_pf_kif = NULL;
1099 #ifdef ALTQ
1100 pf_altq_ifnet_event(ifp, 1);
1101 #endif
1102 pfi_kkif_remove_if_unref(kif);
1103
1104 PF_RULES_WUNLOCK();
1105 NET_EPOCH_EXIT(et);
1106 }
1107
1108 static void
pfi_attach_group_event(void * arg __unused,struct ifg_group * ifg)1109 pfi_attach_group_event(void *arg __unused, struct ifg_group *ifg)
1110 {
1111 struct epoch_tracker et;
1112 struct pfi_kkif *kif;
1113
1114 if (V_pf_vnet_active == 0) {
1115 /* Avoid teardown race in the least expensive way. */
1116 return;
1117 }
1118 kif = pf_kkif_create(M_WAITOK);
1119 NET_EPOCH_ENTER(et);
1120 PF_RULES_WLOCK();
1121 pfi_attach_ifgroup(ifg, kif);
1122 PF_RULES_WUNLOCK();
1123 NET_EPOCH_EXIT(et);
1124 }
1125
1126 static void
pfi_change_group_event(void * arg __unused,char * gname)1127 pfi_change_group_event(void *arg __unused, char *gname)
1128 {
1129 struct epoch_tracker et;
1130 struct pfi_kkif *kif;
1131
1132 if (V_pf_vnet_active == 0) {
1133 /* Avoid teardown race in the least expensive way. */
1134 return;
1135 }
1136
1137 kif = pf_kkif_create(M_WAITOK);
1138 NET_EPOCH_ENTER(et);
1139 PF_RULES_WLOCK();
1140 V_pfi_update++;
1141 kif = pfi_kkif_attach(kif, gname);
1142 pfi_kkif_update(kif);
1143 PF_RULES_WUNLOCK();
1144 NET_EPOCH_EXIT(et);
1145 }
1146
1147 static void
pfi_detach_group_event(void * arg __unused,struct ifg_group * ifg)1148 pfi_detach_group_event(void *arg __unused, struct ifg_group *ifg)
1149 {
1150 struct pfi_kkif *kif = (struct pfi_kkif *)ifg->ifg_pf_kif;
1151
1152 if (kif == NULL)
1153 return;
1154
1155 if (V_pf_vnet_active == 0) {
1156 /* Avoid teardown race in the least expensive way. */
1157 return;
1158 }
1159 PF_RULES_WLOCK();
1160 V_pfi_update++;
1161
1162 kif->pfik_group = NULL;
1163 ifg->ifg_pf_kif = NULL;
1164
1165 pfi_kkif_remove_if_unref(kif);
1166 PF_RULES_WUNLOCK();
1167 }
1168
1169 static void
pfi_ifaddr_event(void * arg __unused,struct ifnet * ifp)1170 pfi_ifaddr_event(void *arg __unused, struct ifnet *ifp)
1171 {
1172
1173 KASSERT(ifp, ("ifp == NULL"));
1174
1175 if (ifp->if_pf_kif == NULL)
1176 return;
1177
1178 if (V_pf_vnet_active == 0) {
1179 /* Avoid teardown race in the least expensive way. */
1180 return;
1181 }
1182 PF_RULES_WLOCK();
1183 if (ifp->if_pf_kif) {
1184 struct epoch_tracker et;
1185
1186 V_pfi_update++;
1187 NET_EPOCH_ENTER(et);
1188 pfi_kkif_update(ifp->if_pf_kif);
1189 NET_EPOCH_EXIT(et);
1190 }
1191 PF_RULES_WUNLOCK();
1192 }
1193