xref: /freebsd/sys/netpfil/pf/pf_if.c (revision b51f459a2098622c31ed54f5c1bf0e03efce403b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2003 Cedric Berger
6  * Copyright (c) 2005 Henning Brauer <henning@openbsd.org>
7  * Copyright (c) 2005 Ryan McBride <mcbride@openbsd.org>
8  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *    - Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *    - Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  *	$OpenBSD: pf_if.c,v 1.54 2008/06/14 16:55:28 mk Exp $
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/eventhandler.h>
47 #include <sys/lock.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/vnet.h>
54 #include <net/pfvar.h>
55 #include <net/route.h>
56 
57 VNET_DEFINE(struct pfi_kkif *,	 pfi_all);
58 VNET_DEFINE_STATIC(long, pfi_update);
59 #define	V_pfi_update	VNET(pfi_update)
60 #define PFI_BUFFER_MAX	0x10000
61 
62 VNET_DECLARE(int, pf_vnet_active);
63 #define V_pf_vnet_active	VNET(pf_vnet_active)
64 
65 VNET_DEFINE_STATIC(struct pfr_addr *, pfi_buffer);
66 VNET_DEFINE_STATIC(int, pfi_buffer_cnt);
67 VNET_DEFINE_STATIC(int,	pfi_buffer_max);
68 #define	V_pfi_buffer		 VNET(pfi_buffer)
69 #define	V_pfi_buffer_cnt	 VNET(pfi_buffer_cnt)
70 #define	V_pfi_buffer_max	 VNET(pfi_buffer_max)
71 
72 eventhandler_tag	 pfi_attach_cookie;
73 eventhandler_tag	 pfi_detach_cookie;
74 eventhandler_tag	 pfi_attach_group_cookie;
75 eventhandler_tag	 pfi_change_group_cookie;
76 eventhandler_tag	 pfi_detach_group_cookie;
77 eventhandler_tag	 pfi_ifaddr_event_cookie;
78 
79 static void	 pfi_attach_ifnet(struct ifnet *, struct pfi_kkif *);
80 static void	 pfi_attach_ifgroup(struct ifg_group *, struct pfi_kkif *);
81 
82 static void	 pfi_kkif_update(struct pfi_kkif *);
83 static void	 pfi_dynaddr_update(struct pfi_dynaddr *dyn);
84 static void	 pfi_table_update(struct pfr_ktable *, struct pfi_kkif *, int,
85 		    int);
86 static void	 pfi_instance_add(struct ifnet *, int, int);
87 static void	 pfi_address_add(struct sockaddr *, int, int);
88 static int	 pfi_kkif_compare(struct pfi_kkif *, struct pfi_kkif *);
89 static int	 pfi_skip_if(const char *, struct pfi_kkif *);
90 static int	 pfi_unmask(void *);
91 static void	 pfi_attach_ifnet_event(void * __unused, struct ifnet *);
92 static void	 pfi_detach_ifnet_event(void * __unused, struct ifnet *);
93 static void	 pfi_attach_group_event(void * __unused, struct ifg_group *);
94 static void	 pfi_change_group_event(void * __unused, char *);
95 static void	 pfi_detach_group_event(void * __unused, struct ifg_group *);
96 static void	 pfi_ifaddr_event(void * __unused, struct ifnet *);
97 
98 RB_HEAD(pfi_ifhead, pfi_kkif);
99 static RB_PROTOTYPE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare);
100 static RB_GENERATE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare);
101 VNET_DEFINE_STATIC(struct pfi_ifhead, pfi_ifs);
102 #define	V_pfi_ifs	VNET(pfi_ifs)
103 
104 #define	PFI_BUFFER_MAX		0x10000
105 MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interface database");
106 
107 LIST_HEAD(pfi_list, pfi_kkif);
108 VNET_DEFINE_STATIC(struct pfi_list, pfi_unlinked_kifs);
109 #define	V_pfi_unlinked_kifs	VNET(pfi_unlinked_kifs)
110 static struct mtx pfi_unlnkdkifs_mtx;
111 MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces",
112     MTX_DEF);
113 
114 void
115 pfi_initialize_vnet(void)
116 {
117 	struct pfi_list kifs = LIST_HEAD_INITIALIZER();
118 	struct epoch_tracker et;
119 	struct pfi_kkif *kif;
120 	struct ifg_group *ifg;
121 	struct ifnet *ifp;
122 	int nkifs;
123 
124 	V_pfi_buffer_max = 64;
125 	V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
126 	    PFI_MTYPE, M_WAITOK);
127 
128 	nkifs = 1;	/* one for V_pfi_all */
129 	IFNET_RLOCK();
130 	CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
131 		nkifs++;
132 	CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link)
133 		nkifs++;
134 
135 	for (int n = 0; n < nkifs; n++) {
136 		kif = pf_kkif_create(M_WAITOK);
137 		LIST_INSERT_HEAD(&kifs, kif, pfik_list);
138 	}
139 
140 	NET_EPOCH_ENTER(et);
141 	PF_RULES_WLOCK();
142 	kif = LIST_FIRST(&kifs);
143 	LIST_REMOVE(kif, pfik_list);
144 	V_pfi_all = pfi_kkif_attach(kif, IFG_ALL);
145 	CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) {
146 		kif = LIST_FIRST(&kifs);
147 		LIST_REMOVE(kif, pfik_list);
148 		pfi_attach_ifgroup(ifg, kif);
149 	}
150 	CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
151 		kif = LIST_FIRST(&kifs);
152 		LIST_REMOVE(kif, pfik_list);
153 		pfi_attach_ifnet(ifp, kif);
154 	}
155 	PF_RULES_WUNLOCK();
156 	NET_EPOCH_EXIT(et);
157 	IFNET_RUNLOCK();
158 
159 	MPASS(LIST_EMPTY(&kifs));
160 }
161 
162 void
163 pfi_initialize(void)
164 {
165 
166 	pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
167 	    pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
168 	pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
169 	    pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
170 	pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
171 	    pfi_attach_group_event, NULL, EVENTHANDLER_PRI_ANY);
172 	pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
173 	    pfi_change_group_event, NULL, EVENTHANDLER_PRI_ANY);
174 	pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
175 	    pfi_detach_group_event, NULL, EVENTHANDLER_PRI_ANY);
176 	pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
177 	    pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
178 }
179 
180 void
181 pfi_cleanup_vnet(void)
182 {
183 	struct pfi_kkif *kif;
184 
185 	PF_RULES_WASSERT();
186 
187 	V_pfi_all = NULL;
188 	while ((kif = RB_MIN(pfi_ifhead, &V_pfi_ifs))) {
189 		RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
190 		if (kif->pfik_group)
191 			kif->pfik_group->ifg_pf_kif = NULL;
192 		if (kif->pfik_ifp) {
193 			if_rele(kif->pfik_ifp);
194 			kif->pfik_ifp->if_pf_kif = NULL;
195 		}
196 		pf_kkif_free(kif);
197 	}
198 
199 	mtx_lock(&pfi_unlnkdkifs_mtx);
200 	while ((kif = LIST_FIRST(&V_pfi_unlinked_kifs))) {
201 		LIST_REMOVE(kif, pfik_list);
202 		pf_kkif_free(kif);
203 	}
204 	mtx_unlock(&pfi_unlnkdkifs_mtx);
205 
206 	free(V_pfi_buffer, PFI_MTYPE);
207 }
208 
209 void
210 pfi_cleanup(void)
211 {
212 
213 	EVENTHANDLER_DEREGISTER(ifnet_arrival_event, pfi_attach_cookie);
214 	EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfi_detach_cookie);
215 	EVENTHANDLER_DEREGISTER(group_attach_event, pfi_attach_group_cookie);
216 	EVENTHANDLER_DEREGISTER(group_change_event, pfi_change_group_cookie);
217 	EVENTHANDLER_DEREGISTER(group_detach_event, pfi_detach_group_cookie);
218 	EVENTHANDLER_DEREGISTER(ifaddr_event, pfi_ifaddr_event_cookie);
219 }
220 
221 struct pfi_kkif*
222 pf_kkif_create(int flags)
223 {
224 	struct pfi_kkif *kif;
225 
226 	kif = malloc(sizeof(*kif), PFI_MTYPE, flags | M_ZERO);
227 	if (! kif)
228 		return (kif);
229 
230 	for (int i = 0; i < 2; i++) {
231 		for (int j = 0; j < 2; j++) {
232 			for (int k = 0; k < 2; k++) {
233 				kif->pfik_packets[i][j][k] =
234 				    counter_u64_alloc(flags);
235 				kif->pfik_bytes[i][j][k] =
236 				    counter_u64_alloc(flags);
237 
238 				if (! kif->pfik_packets[i][j][k] ||
239 				    ! kif->pfik_bytes[i][j][k]) {
240 					pf_kkif_free(kif);
241 					return (NULL);
242 				}
243 			}
244 		}
245 	}
246 
247 	return (kif);
248 }
249 
250 void
251 pf_kkif_free(struct pfi_kkif *kif)
252 {
253 	if (! kif)
254 		return;
255 
256 	for (int i = 0; i < 2; i++) {
257 		for (int j = 0; j < 2; j++) {
258 			for (int k = 0; k < 2; k++) {
259 				counter_u64_free(kif->pfik_packets[i][j][k]);
260 				counter_u64_free(kif->pfik_bytes[i][j][k]);
261 			}
262 		}
263 	}
264 
265 	free(kif, PFI_MTYPE);
266 }
267 
268 void
269 pf_kkif_zero(struct pfi_kkif *kif)
270 {
271 
272 	for (int i = 0; i < 2; i++) {
273 		for (int j = 0; j < 2; j++) {
274 			for (int k = 0; k < 2; k++) {
275 				counter_u64_zero(kif->pfik_packets[i][j][k]);
276 				counter_u64_zero(kif->pfik_bytes[i][j][k]);
277 			}
278 		}
279 	}
280 	kif->pfik_tzero = time_second;
281 }
282 
283 struct pfi_kkif *
284 pfi_kkif_find(const char *kif_name)
285 {
286 	struct pfi_kif_cmp s;
287 
288 	PF_RULES_ASSERT();
289 
290 	bzero(&s, sizeof(s));
291 	strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name));
292 
293 	return (RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&s));
294 }
295 
296 struct pfi_kkif *
297 pfi_kkif_attach(struct pfi_kkif *kif, const char *kif_name)
298 {
299 	struct pfi_kkif *kif1;
300 
301 	PF_RULES_WASSERT();
302 	KASSERT(kif != NULL, ("%s: null kif", __func__));
303 
304 	kif1 = pfi_kkif_find(kif_name);
305 	if (kif1 != NULL) {
306 		pf_kkif_free(kif);
307 		return (kif1);
308 	}
309 
310 	pf_kkif_zero(kif);
311 	strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name));
312 	/*
313 	 * It seems that the value of time_second is in unintialzied state
314 	 * when pf sets interface statistics clear time in boot phase if pf
315 	 * was statically linked to kernel. Instead of setting the bogus
316 	 * time value have pfi_get_ifaces handle this case. In
317 	 * pfi_get_ifaces it uses time_second if it sees the time is 0.
318 	 */
319 	kif->pfik_tzero = time_second > 1 ? time_second : 0;
320 	TAILQ_INIT(&kif->pfik_dynaddrs);
321 
322 	RB_INSERT(pfi_ifhead, &V_pfi_ifs, kif);
323 
324 	return (kif);
325 }
326 
327 void
328 pfi_kkif_ref(struct pfi_kkif *kif)
329 {
330 
331 	PF_RULES_WASSERT();
332 	kif->pfik_rulerefs++;
333 }
334 
335 void
336 pfi_kkif_unref(struct pfi_kkif *kif)
337 {
338 
339 	PF_RULES_WASSERT();
340 	KASSERT(kif->pfik_rulerefs > 0, ("%s: %p has zero refs", __func__, kif));
341 
342 	kif->pfik_rulerefs--;
343 
344 	if (kif->pfik_rulerefs > 0)
345 		return;
346 
347 	/* kif referencing an existing ifnet or group or holding flags should
348 	 * exist. */
349 	if (kif->pfik_ifp != NULL || kif->pfik_group != NULL ||
350 	    kif == V_pfi_all || kif->pfik_flags != 0)
351 		return;
352 
353 	RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
354 
355 	kif->pfik_flags |= PFI_IFLAG_REFS;
356 
357 	mtx_lock(&pfi_unlnkdkifs_mtx);
358 	LIST_INSERT_HEAD(&V_pfi_unlinked_kifs, kif, pfik_list);
359 	mtx_unlock(&pfi_unlnkdkifs_mtx);
360 }
361 
362 void
363 pfi_kkif_purge(void)
364 {
365 	struct pfi_kkif *kif, *kif1;
366 
367 	/*
368 	 * Do naive mark-and-sweep garbage collecting of old kifs.
369 	 * Reference flag is raised by pf_purge_expired_states().
370 	 */
371 	mtx_lock(&pfi_unlnkdkifs_mtx);
372 	LIST_FOREACH_SAFE(kif, &V_pfi_unlinked_kifs, pfik_list, kif1) {
373 		if (!(kif->pfik_flags & PFI_IFLAG_REFS)) {
374 			LIST_REMOVE(kif, pfik_list);
375 			pf_kkif_free(kif);
376 		} else
377 			kif->pfik_flags &= ~PFI_IFLAG_REFS;
378 	}
379 	mtx_unlock(&pfi_unlnkdkifs_mtx);
380 }
381 
382 int
383 pfi_kkif_match(struct pfi_kkif *rule_kif, struct pfi_kkif *packet_kif)
384 {
385 	struct ifg_list	*p;
386 
387 	NET_EPOCH_ASSERT();
388 
389 	if (rule_kif == NULL || rule_kif == packet_kif)
390 		return (1);
391 
392 	if (rule_kif->pfik_group != NULL) {
393 		CK_STAILQ_FOREACH(p, &packet_kif->pfik_ifp->if_groups, ifgl_next)
394 			if (p->ifgl_group == rule_kif->pfik_group)
395 				return (1);
396 	}
397 
398 	return (0);
399 }
400 
401 static void
402 pfi_attach_ifnet(struct ifnet *ifp, struct pfi_kkif *kif)
403 {
404 
405 	PF_RULES_WASSERT();
406 
407 	V_pfi_update++;
408 	kif = pfi_kkif_attach(kif, ifp->if_xname);
409 	if_ref(ifp);
410 	kif->pfik_ifp = ifp;
411 	ifp->if_pf_kif = kif;
412 	pfi_kkif_update(kif);
413 }
414 
415 static void
416 pfi_attach_ifgroup(struct ifg_group *ifg, struct pfi_kkif *kif)
417 {
418 
419 	PF_RULES_WASSERT();
420 
421 	V_pfi_update++;
422 	kif = pfi_kkif_attach(kif, ifg->ifg_group);
423 	kif->pfik_group = ifg;
424 	ifg->ifg_pf_kif = kif;
425 }
426 
427 int
428 pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af)
429 {
430 	switch (af) {
431 #ifdef INET
432 	case AF_INET:
433 		switch (dyn->pfid_acnt4) {
434 		case 0:
435 			return (0);
436 		case 1:
437 			return (PF_MATCHA(0, &dyn->pfid_addr4,
438 			    &dyn->pfid_mask4, a, AF_INET));
439 		default:
440 			return (pfr_match_addr(dyn->pfid_kt, a, AF_INET));
441 		}
442 		break;
443 #endif /* INET */
444 #ifdef INET6
445 	case AF_INET6:
446 		switch (dyn->pfid_acnt6) {
447 		case 0:
448 			return (0);
449 		case 1:
450 			return (PF_MATCHA(0, &dyn->pfid_addr6,
451 			    &dyn->pfid_mask6, a, AF_INET6));
452 		default:
453 			return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6));
454 		}
455 		break;
456 #endif /* INET6 */
457 	default:
458 		return (0);
459 	}
460 }
461 
462 int
463 pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af)
464 {
465 	struct epoch_tracker	 et;
466 	struct pfi_dynaddr	*dyn;
467 	char			 tblname[PF_TABLE_NAME_SIZE];
468 	struct pf_kruleset	*ruleset = NULL;
469 	struct pfi_kkif		*kif;
470 	int			 rv = 0;
471 
472 	PF_RULES_WASSERT();
473 	KASSERT(aw->type == PF_ADDR_DYNIFTL, ("%s: type %u",
474 	    __func__, aw->type));
475 	KASSERT(aw->p.dyn == NULL, ("%s: dyn is %p", __func__, aw->p.dyn));
476 
477 	if ((dyn = malloc(sizeof(*dyn), PFI_MTYPE, M_NOWAIT | M_ZERO)) == NULL)
478 		return (ENOMEM);
479 
480 	if ((kif = pf_kkif_create(M_NOWAIT)) == NULL) {
481 		free(dyn, PFI_MTYPE);
482 		return (ENOMEM);
483 	}
484 
485 	if (!strcmp(aw->v.ifname, "self"))
486 		dyn->pfid_kif = pfi_kkif_attach(kif, IFG_ALL);
487 	else
488 		dyn->pfid_kif = pfi_kkif_attach(kif, aw->v.ifname);
489 	kif = NULL;
490 	pfi_kkif_ref(dyn->pfid_kif);
491 
492 	dyn->pfid_net = pfi_unmask(&aw->v.a.mask);
493 	if (af == AF_INET && dyn->pfid_net == 32)
494 		dyn->pfid_net = 128;
495 	strlcpy(tblname, aw->v.ifname, sizeof(tblname));
496 	if (aw->iflags & PFI_AFLAG_NETWORK)
497 		strlcat(tblname, ":network", sizeof(tblname));
498 	if (aw->iflags & PFI_AFLAG_BROADCAST)
499 		strlcat(tblname, ":broadcast", sizeof(tblname));
500 	if (aw->iflags & PFI_AFLAG_PEER)
501 		strlcat(tblname, ":peer", sizeof(tblname));
502 	if (aw->iflags & PFI_AFLAG_NOALIAS)
503 		strlcat(tblname, ":0", sizeof(tblname));
504 	if (dyn->pfid_net != 128)
505 		snprintf(tblname + strlen(tblname),
506 		    sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net);
507 	if ((ruleset = pf_find_or_create_kruleset(PF_RESERVED_ANCHOR)) == NULL) {
508 		rv = ENOMEM;
509 		goto _bad;
510 	}
511 
512 	if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) {
513 		rv = ENOMEM;
514 		goto _bad;
515 	}
516 
517 	dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE;
518 	dyn->pfid_iflags = aw->iflags;
519 	dyn->pfid_af = af;
520 
521 	TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
522 	aw->p.dyn = dyn;
523 	NET_EPOCH_ENTER(et);
524 	pfi_kkif_update(dyn->pfid_kif);
525 	NET_EPOCH_EXIT(et);
526 
527 	return (0);
528 
529 _bad:
530 	if (dyn->pfid_kt != NULL)
531 		pfr_detach_table(dyn->pfid_kt);
532 	if (ruleset != NULL)
533 		pf_remove_if_empty_kruleset(ruleset);
534 	if (dyn->pfid_kif != NULL)
535 		pfi_kkif_unref(dyn->pfid_kif);
536 	free(dyn, PFI_MTYPE);
537 
538 	return (rv);
539 }
540 
541 static void
542 pfi_kkif_update(struct pfi_kkif *kif)
543 {
544 	struct ifg_list		*ifgl;
545 	struct ifg_member	*ifgm;
546 	struct pfi_dynaddr	*p;
547 	struct pfi_kkif		*tmpkif;
548 
549 	NET_EPOCH_ASSERT();
550 	PF_RULES_WASSERT();
551 
552 	/* update all dynaddr */
553 	TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry)
554 		pfi_dynaddr_update(p);
555 
556 	/* Apply group flags to new members. */
557 	if (kif->pfik_group != NULL) {
558 		CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members,
559 		    ifgm_next) {
560 			tmpkif = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif;
561 			if (tmpkif == NULL)
562 				continue;
563 
564 			tmpkif->pfik_flags |= kif->pfik_flags;
565 		}
566 	}
567 
568 	/* again for all groups kif is member of */
569 	if (kif->pfik_ifp != NULL) {
570 		CK_STAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next)
571 			pfi_kkif_update((struct pfi_kkif *)
572 			    ifgl->ifgl_group->ifg_pf_kif);
573 	}
574 }
575 
576 static void
577 pfi_dynaddr_update(struct pfi_dynaddr *dyn)
578 {
579 	struct pfi_kkif		*kif;
580 	struct pfr_ktable	*kt;
581 
582 	PF_RULES_WASSERT();
583 	KASSERT(dyn && dyn->pfid_kif && dyn->pfid_kt,
584 	    ("%s: bad argument", __func__));
585 
586 	kif = dyn->pfid_kif;
587 	kt = dyn->pfid_kt;
588 
589 	if (kt->pfrkt_larg != V_pfi_update) {
590 		/* this table needs to be brought up-to-date */
591 		pfi_table_update(kt, kif, dyn->pfid_net, dyn->pfid_iflags);
592 		kt->pfrkt_larg = V_pfi_update;
593 	}
594 	pfr_dynaddr_update(kt, dyn);
595 }
596 
597 static void
598 pfi_table_update(struct pfr_ktable *kt, struct pfi_kkif *kif, int net, int flags)
599 {
600 	int			 e, size2 = 0;
601 	struct ifg_member	*ifgm;
602 
603 	NET_EPOCH_ASSERT();
604 
605 	V_pfi_buffer_cnt = 0;
606 
607 	if (kif->pfik_ifp != NULL)
608 		pfi_instance_add(kif->pfik_ifp, net, flags);
609 	else if (kif->pfik_group != NULL) {
610 		CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, ifgm_next)
611 			pfi_instance_add(ifgm->ifgm_ifp, net, flags);
612 	}
613 
614 	if ((e = pfr_set_addrs(&kt->pfrkt_t, V_pfi_buffer, V_pfi_buffer_cnt, &size2,
615 	    NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK)))
616 		printf("%s: cannot set %d new addresses into table %s: %d\n",
617 		    __func__, V_pfi_buffer_cnt, kt->pfrkt_name, e);
618 }
619 
620 static void
621 pfi_instance_add(struct ifnet *ifp, int net, int flags)
622 {
623 	struct ifaddr	*ia;
624 	int		 got4 = 0, got6 = 0;
625 	int		 net2, af;
626 
627 	NET_EPOCH_ASSERT();
628 
629 	CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
630 		if (ia->ifa_addr == NULL)
631 			continue;
632 		af = ia->ifa_addr->sa_family;
633 		if (af != AF_INET && af != AF_INET6)
634 			continue;
635 		/*
636 		 * XXX: For point-to-point interfaces, (ifname:0) and IPv4,
637 		 *      jump over addresses without a proper route to work
638 		 *      around a problem with ppp not fully removing the
639 		 *      address used during IPCP.
640 		 */
641 		if ((ifp->if_flags & IFF_POINTOPOINT) &&
642 		    !(ia->ifa_flags & IFA_ROUTE) &&
643 		    (flags & PFI_AFLAG_NOALIAS) && (af == AF_INET))
644 			continue;
645 		if ((flags & PFI_AFLAG_BROADCAST) && af == AF_INET6)
646 			continue;
647 		if ((flags & PFI_AFLAG_BROADCAST) &&
648 		    !(ifp->if_flags & IFF_BROADCAST))
649 			continue;
650 		if ((flags & PFI_AFLAG_PEER) &&
651 		    !(ifp->if_flags & IFF_POINTOPOINT))
652 			continue;
653 		if ((flags & (PFI_AFLAG_NETWORK | PFI_AFLAG_NOALIAS)) &&
654 		    af == AF_INET6 &&
655 		    IN6_IS_ADDR_LINKLOCAL(
656 		    &((struct sockaddr_in6 *)ia->ifa_addr)->sin6_addr))
657 			continue;
658 		if (flags & PFI_AFLAG_NOALIAS) {
659 			if (af == AF_INET && got4)
660 				continue;
661 			if (af == AF_INET6 && got6)
662 				continue;
663 		}
664 		if (af == AF_INET)
665 			got4 = 1;
666 		else if (af == AF_INET6)
667 			got6 = 1;
668 		net2 = net;
669 		if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) {
670 			if (af == AF_INET)
671 				net2 = pfi_unmask(&((struct sockaddr_in *)
672 				    ia->ifa_netmask)->sin_addr);
673 			else if (af == AF_INET6)
674 				net2 = pfi_unmask(&((struct sockaddr_in6 *)
675 				    ia->ifa_netmask)->sin6_addr);
676 		}
677 		if (af == AF_INET && net2 > 32)
678 			net2 = 32;
679 		if (flags & PFI_AFLAG_BROADCAST)
680 			pfi_address_add(ia->ifa_broadaddr, af, net2);
681 		else if (flags & PFI_AFLAG_PEER)
682 			pfi_address_add(ia->ifa_dstaddr, af, net2);
683 		else
684 			pfi_address_add(ia->ifa_addr, af, net2);
685 	}
686 }
687 
688 static void
689 pfi_address_add(struct sockaddr *sa, int af, int net)
690 {
691 	struct pfr_addr	*p;
692 	int		 i;
693 
694 	if (V_pfi_buffer_cnt >= V_pfi_buffer_max) {
695 		int		 new_max = V_pfi_buffer_max * 2;
696 
697 		if (new_max > PFI_BUFFER_MAX) {
698 			printf("%s: address buffer full (%d/%d)\n", __func__,
699 			    V_pfi_buffer_cnt, PFI_BUFFER_MAX);
700 			return;
701 		}
702 		p = malloc(new_max * sizeof(*V_pfi_buffer), PFI_MTYPE,
703 		    M_NOWAIT);
704 		if (p == NULL) {
705 			printf("%s: no memory to grow buffer (%d/%d)\n",
706 			    __func__, V_pfi_buffer_cnt, PFI_BUFFER_MAX);
707 			return;
708 		}
709 		memcpy(p, V_pfi_buffer, V_pfi_buffer_max * sizeof(*V_pfi_buffer));
710 		/* no need to zero buffer */
711 		free(V_pfi_buffer, PFI_MTYPE);
712 		V_pfi_buffer = p;
713 		V_pfi_buffer_max = new_max;
714 	}
715 	if (af == AF_INET && net > 32)
716 		net = 128;
717 	p = V_pfi_buffer + V_pfi_buffer_cnt++;
718 	bzero(p, sizeof(*p));
719 	p->pfra_af = af;
720 	p->pfra_net = net;
721 	if (af == AF_INET)
722 		p->pfra_ip4addr = ((struct sockaddr_in *)sa)->sin_addr;
723 	else if (af == AF_INET6) {
724 		p->pfra_ip6addr = ((struct sockaddr_in6 *)sa)->sin6_addr;
725 		if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr))
726 			p->pfra_ip6addr.s6_addr16[1] = 0;
727 	}
728 	/* mask network address bits */
729 	if (net < 128)
730 		((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8));
731 	for (i = (p->pfra_net+7)/8; i < sizeof(p->pfra_u); i++)
732 		((caddr_t)p)[i] = 0;
733 }
734 
735 void
736 pfi_dynaddr_remove(struct pfi_dynaddr *dyn)
737 {
738 
739 	KASSERT(dyn->pfid_kif != NULL, ("%s: null pfid_kif", __func__));
740 	KASSERT(dyn->pfid_kt != NULL, ("%s: null pfid_kt", __func__));
741 
742 	TAILQ_REMOVE(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
743 	pfi_kkif_unref(dyn->pfid_kif);
744 	pfr_detach_table(dyn->pfid_kt);
745 	free(dyn, PFI_MTYPE);
746 }
747 
748 void
749 pfi_dynaddr_copyout(struct pf_addr_wrap *aw)
750 {
751 
752 	KASSERT(aw->type == PF_ADDR_DYNIFTL,
753 	    ("%s: type %u", __func__, aw->type));
754 
755 	if (aw->p.dyn == NULL || aw->p.dyn->pfid_kif == NULL)
756 		return;
757 	aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6;
758 }
759 
760 static int
761 pfi_kkif_compare(struct pfi_kkif *p, struct pfi_kkif *q)
762 {
763 	return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ));
764 }
765 
766 void
767 pfi_update_status(const char *name, struct pf_status *pfs)
768 {
769 	struct pfi_kkif		*p;
770 	struct pfi_kif_cmp	 key;
771 	struct ifg_member	 p_member, *ifgm;
772 	CK_STAILQ_HEAD(, ifg_member) ifg_members;
773 	int			 i, j, k;
774 
775 	strlcpy(key.pfik_name, name, sizeof(key.pfik_name));
776 	p = RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&key);
777 	if (p == NULL)
778 		return;
779 
780 	if (p->pfik_group != NULL) {
781 		bcopy(&p->pfik_group->ifg_members, &ifg_members,
782 		    sizeof(ifg_members));
783 	} else {
784 		/* build a temporary list for p only */
785 		bzero(&p_member, sizeof(p_member));
786 		p_member.ifgm_ifp = p->pfik_ifp;
787 		CK_STAILQ_INIT(&ifg_members);
788 		CK_STAILQ_INSERT_TAIL(&ifg_members, &p_member, ifgm_next);
789 	}
790 	if (pfs) {
791 		bzero(pfs->pcounters, sizeof(pfs->pcounters));
792 		bzero(pfs->bcounters, sizeof(pfs->bcounters));
793 	}
794 	CK_STAILQ_FOREACH(ifgm, &ifg_members, ifgm_next) {
795 		if (ifgm->ifgm_ifp == NULL || ifgm->ifgm_ifp->if_pf_kif == NULL)
796 			continue;
797 		p = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif;
798 
799 		/* just clear statistics */
800 		if (pfs == NULL) {
801 			pf_kkif_zero(p);
802 			continue;
803 		}
804 		for (i = 0; i < 2; i++)
805 			for (j = 0; j < 2; j++)
806 				for (k = 0; k < 2; k++) {
807 					pfs->pcounters[i][j][k] +=
808 					    counter_u64_fetch(p->pfik_packets[i][j][k]);
809 					pfs->bcounters[i][j] +=
810 					    counter_u64_fetch(p->pfik_bytes[i][j][k]);
811 				}
812 	}
813 }
814 
815 static void
816 pf_kkif_to_kif(const struct pfi_kkif *kkif, struct pfi_kif *kif)
817 {
818 
819 	bzero(kif, sizeof(*kif));
820 	strlcpy(kif->pfik_name, kkif->pfik_name, sizeof(kif->pfik_name));
821 	for (int i = 0; i < 2; i++) {
822 		for (int j = 0; j < 2; j++) {
823 			for (int k = 0; k < 2; k++) {
824 				kif->pfik_packets[i][j][k] =
825 				    counter_u64_fetch(kkif->pfik_packets[i][j][k]);
826 				kif->pfik_bytes[i][j][k] =
827 				    counter_u64_fetch(kkif->pfik_bytes[i][j][k]);
828 			}
829 		}
830 	}
831 	kif->pfik_flags = kkif->pfik_flags;
832 	kif->pfik_tzero = kkif->pfik_tzero;
833 	kif->pfik_rulerefs = kkif->pfik_rulerefs;
834 }
835 
836 void
837 pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size)
838 {
839 	struct epoch_tracker et;
840 	struct pfi_kkif	*p, *nextp;
841 	int		 n = 0;
842 
843 	NET_EPOCH_ENTER(et);
844 	for (p = RB_MIN(pfi_ifhead, &V_pfi_ifs); p; p = nextp) {
845 		nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
846 		if (pfi_skip_if(name, p))
847 			continue;
848 		if (*size <= n++)
849 			break;
850 		if (!p->pfik_tzero)
851 			p->pfik_tzero = time_second;
852 		pf_kkif_to_kif(p, buf++);
853 		nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
854 	}
855 	*size = n;
856 	NET_EPOCH_EXIT(et);
857 }
858 
859 static int
860 pfi_skip_if(const char *filter, struct pfi_kkif *p)
861 {
862 	struct ifg_list *i;
863 	int	n;
864 
865 	NET_EPOCH_ASSERT();
866 
867 	if (filter == NULL || !*filter)
868 		return (0);
869 	if (!strcmp(p->pfik_name, filter))
870 		return (0);	/* exact match */
871 	n = strlen(filter);
872 	if (n < 1 || n >= IFNAMSIZ)
873 		return (1);	/* sanity check */
874 	if (filter[n-1] >= '0' && filter[n-1] <= '9')
875 		return (1);	/* group names may not end in a digit */
876 	if (p->pfik_ifp == NULL)
877 		return (1);
878 	CK_STAILQ_FOREACH(i, &p->pfik_ifp->if_groups, ifgl_next)
879 		if (!strncmp(i->ifgl_group->ifg_group, filter, IFNAMSIZ))
880 			return (0); /* iface is in group "filter" */
881 	return (1);
882 }
883 
884 int
885 pfi_set_flags(const char *name, int flags)
886 {
887 	struct epoch_tracker et;
888 	struct pfi_kkif	*p, *kif;
889 
890 	kif = pf_kkif_create(M_NOWAIT);
891 	if (kif == NULL)
892 		return (ENOMEM);
893 
894 	NET_EPOCH_ENTER(et);
895 
896 	kif = pfi_kkif_attach(kif, name);
897 
898 	RB_FOREACH(p, pfi_ifhead, &V_pfi_ifs) {
899 		if (pfi_skip_if(name, p))
900 			continue;
901 		p->pfik_flags |= flags;
902 	}
903 	NET_EPOCH_EXIT(et);
904 	return (0);
905 }
906 
907 int
908 pfi_clear_flags(const char *name, int flags)
909 {
910 	struct epoch_tracker et;
911 	struct pfi_kkif *p, *tmp;
912 
913 	NET_EPOCH_ENTER(et);
914 	RB_FOREACH_SAFE(p, pfi_ifhead, &V_pfi_ifs, tmp) {
915 		if (pfi_skip_if(name, p))
916 			continue;
917 		p->pfik_flags &= ~flags;
918 
919 		if (p->pfik_ifp == NULL && p->pfik_group == NULL &&
920 		    p->pfik_flags == 0 && p->pfik_rulerefs == 0) {
921 			/* Delete this kif. */
922 			RB_REMOVE(pfi_ifhead, &V_pfi_ifs, p);
923 			pf_kkif_free(p);
924 		}
925 	}
926 	NET_EPOCH_EXIT(et);
927 	return (0);
928 }
929 
930 /* from pf_print_state.c */
931 static int
932 pfi_unmask(void *addr)
933 {
934 	struct pf_addr *m = addr;
935 	int i = 31, j = 0, b = 0;
936 	u_int32_t tmp;
937 
938 	while (j < 4 && m->addr32[j] == 0xffffffff) {
939 		b += 32;
940 		j++;
941 	}
942 	if (j < 4) {
943 		tmp = ntohl(m->addr32[j]);
944 		for (i = 31; tmp & (1 << i); --i)
945 			b++;
946 	}
947 	return (b);
948 }
949 
950 static void
951 pfi_attach_ifnet_event(void *arg __unused, struct ifnet *ifp)
952 {
953 	struct epoch_tracker et;
954 	struct pfi_kkif *kif;
955 
956 	if (V_pf_vnet_active == 0) {
957 		/* Avoid teardown race in the least expensive way. */
958 		return;
959 	}
960 	kif = pf_kkif_create(M_NOWAIT);
961 	NET_EPOCH_ENTER(et);
962 	PF_RULES_WLOCK();
963 	pfi_attach_ifnet(ifp, kif);
964 #ifdef ALTQ
965 	pf_altq_ifnet_event(ifp, 0);
966 #endif
967 	PF_RULES_WUNLOCK();
968 	NET_EPOCH_EXIT(et);
969 }
970 
971 static void
972 pfi_detach_ifnet_event(void *arg __unused, struct ifnet *ifp)
973 {
974 	struct epoch_tracker et;
975 	struct pfi_kkif *kif = (struct pfi_kkif *)ifp->if_pf_kif;
976 
977 	if (pfsync_detach_ifnet_ptr)
978 		pfsync_detach_ifnet_ptr(ifp);
979 
980 	if (kif == NULL)
981 		return;
982 
983 	if (V_pf_vnet_active == 0) {
984 		/* Avoid teardown race in the least expensive way. */
985 		return;
986 	}
987 
988 	NET_EPOCH_ENTER(et);
989 	PF_RULES_WLOCK();
990 	V_pfi_update++;
991 	pfi_kkif_update(kif);
992 
993 	if (kif->pfik_ifp)
994 		if_rele(kif->pfik_ifp);
995 
996 	kif->pfik_ifp = NULL;
997 	ifp->if_pf_kif = NULL;
998 #ifdef ALTQ
999 	pf_altq_ifnet_event(ifp, 1);
1000 #endif
1001 	PF_RULES_WUNLOCK();
1002 	NET_EPOCH_EXIT(et);
1003 }
1004 
1005 static void
1006 pfi_attach_group_event(void *arg __unused, struct ifg_group *ifg)
1007 {
1008 	struct epoch_tracker et;
1009 	struct pfi_kkif *kif;
1010 
1011 	if (V_pf_vnet_active == 0) {
1012 		/* Avoid teardown race in the least expensive way. */
1013 		return;
1014 	}
1015 	kif = pf_kkif_create(M_WAITOK);
1016 	NET_EPOCH_ENTER(et);
1017 	PF_RULES_WLOCK();
1018 	pfi_attach_ifgroup(ifg, kif);
1019 	PF_RULES_WUNLOCK();
1020 	NET_EPOCH_EXIT(et);
1021 }
1022 
1023 static void
1024 pfi_change_group_event(void *arg __unused, char *gname)
1025 {
1026 	struct epoch_tracker et;
1027 	struct pfi_kkif *kif;
1028 
1029 	if (V_pf_vnet_active == 0) {
1030 		/* Avoid teardown race in the least expensive way. */
1031 		return;
1032 	}
1033 
1034 	kif = pf_kkif_create(M_WAITOK);
1035 	NET_EPOCH_ENTER(et);
1036 	PF_RULES_WLOCK();
1037 	V_pfi_update++;
1038 	kif = pfi_kkif_attach(kif, gname);
1039 	pfi_kkif_update(kif);
1040 	PF_RULES_WUNLOCK();
1041 	NET_EPOCH_EXIT(et);
1042 }
1043 
1044 static void
1045 pfi_detach_group_event(void *arg __unused, struct ifg_group *ifg)
1046 {
1047 	struct pfi_kkif *kif = (struct pfi_kkif *)ifg->ifg_pf_kif;
1048 
1049 	if (kif == NULL)
1050 		return;
1051 
1052 	if (V_pf_vnet_active == 0) {
1053 		/* Avoid teardown race in the least expensive way. */
1054 		return;
1055 	}
1056 	PF_RULES_WLOCK();
1057 	V_pfi_update++;
1058 
1059 	kif->pfik_group = NULL;
1060 	ifg->ifg_pf_kif = NULL;
1061 	PF_RULES_WUNLOCK();
1062 }
1063 
1064 static void
1065 pfi_ifaddr_event(void *arg __unused, struct ifnet *ifp)
1066 {
1067 
1068 	KASSERT(ifp, ("ifp == NULL"));
1069 
1070 	if (ifp->if_pf_kif == NULL)
1071 		return;
1072 
1073 	if (V_pf_vnet_active == 0) {
1074 		/* Avoid teardown race in the least expensive way. */
1075 		return;
1076 	}
1077 	PF_RULES_WLOCK();
1078 	if (ifp->if_pf_kif) {
1079 		struct epoch_tracker et;
1080 
1081 		V_pfi_update++;
1082 		NET_EPOCH_ENTER(et);
1083 		pfi_kkif_update(ifp->if_pf_kif);
1084 		NET_EPOCH_EXIT(et);
1085 	}
1086 	PF_RULES_WUNLOCK();
1087 }
1088