xref: /freebsd/sys/netpfil/pf/pf_if.c (revision 0f7f3352c8bc463607912e2463d13e52d44a4cae)
1 /*-
2  * Copyright (c) 2001 Daniel Hartmeier
3  * Copyright (c) 2003 Cedric Berger
4  * Copyright (c) 2005 Henning Brauer <henning@openbsd.org>
5  * Copyright (c) 2005 Ryan McBride <mcbride@openbsd.org>
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  *	$OpenBSD: pf_if.c,v 1.54 2008/06/14 16:55:28 mk Exp $
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/eventhandler.h>
45 #include <sys/lock.h>
46 #include <sys/mbuf.h>
47 #include <sys/rwlock.h>
48 #include <sys/socket.h>
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/vnet.h>
53 #include <net/pfvar.h>
54 #include <net/route.h>
55 
56 VNET_DEFINE(struct pfi_kif *,	 pfi_all);
57 static VNET_DEFINE(long, pfi_update);
58 #define	V_pfi_update	VNET(pfi_update)
59 #define PFI_BUFFER_MAX	0x10000
60 
61 VNET_DECLARE(int, pf_vnet_active);
62 #define V_pf_vnet_active	VNET(pf_vnet_active)
63 
64 static VNET_DEFINE(struct pfr_addr *, pfi_buffer);
65 static VNET_DEFINE(int, pfi_buffer_cnt);
66 static VNET_DEFINE(int,	pfi_buffer_max);
67 #define	V_pfi_buffer		 VNET(pfi_buffer)
68 #define	V_pfi_buffer_cnt	 VNET(pfi_buffer_cnt)
69 #define	V_pfi_buffer_max	 VNET(pfi_buffer_max)
70 
71 eventhandler_tag	 pfi_attach_cookie;
72 eventhandler_tag	 pfi_detach_cookie;
73 eventhandler_tag	 pfi_attach_group_cookie;
74 eventhandler_tag	 pfi_change_group_cookie;
75 eventhandler_tag	 pfi_detach_group_cookie;
76 eventhandler_tag	 pfi_ifaddr_event_cookie;
77 
78 static void	 pfi_attach_ifnet(struct ifnet *);
79 static void	 pfi_attach_ifgroup(struct ifg_group *);
80 
81 static void	 pfi_kif_update(struct pfi_kif *);
82 static void	 pfi_dynaddr_update(struct pfi_dynaddr *dyn);
83 static void	 pfi_table_update(struct pfr_ktable *, struct pfi_kif *, int,
84 		    int);
85 static void	 pfi_instance_add(struct ifnet *, int, int);
86 static void	 pfi_address_add(struct sockaddr *, int, int);
87 static int	 pfi_if_compare(struct pfi_kif *, struct pfi_kif *);
88 static int	 pfi_skip_if(const char *, struct pfi_kif *);
89 static int	 pfi_unmask(void *);
90 static void	 pfi_attach_ifnet_event(void * __unused, struct ifnet *);
91 static void	 pfi_detach_ifnet_event(void * __unused, struct ifnet *);
92 static void	 pfi_attach_group_event(void *, struct ifg_group *);
93 static void	 pfi_change_group_event(void *, char *);
94 static void	 pfi_detach_group_event(void *, struct ifg_group *);
95 static void	 pfi_ifaddr_event(void * __unused, struct ifnet *);
96 
97 RB_HEAD(pfi_ifhead, pfi_kif);
98 static RB_PROTOTYPE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare);
99 static RB_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare);
100 static VNET_DEFINE(struct pfi_ifhead, pfi_ifs);
101 #define	V_pfi_ifs	VNET(pfi_ifs)
102 
103 #define	PFI_BUFFER_MAX		0x10000
104 MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interface database");
105 
106 LIST_HEAD(pfi_list, pfi_kif);
107 static VNET_DEFINE(struct pfi_list, pfi_unlinked_kifs);
108 #define	V_pfi_unlinked_kifs	VNET(pfi_unlinked_kifs)
109 static struct mtx pfi_unlnkdkifs_mtx;
110 MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces",
111     MTX_DEF);
112 
113 void
114 pfi_initialize_vnet(void)
115 {
116 	struct ifg_group *ifg;
117 	struct ifnet *ifp;
118 	struct pfi_kif *kif;
119 
120 	V_pfi_buffer_max = 64;
121 	V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
122 	    PFI_MTYPE, M_WAITOK);
123 
124 	kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
125 	PF_RULES_WLOCK();
126 	V_pfi_all = pfi_kif_attach(kif, IFG_ALL);
127 	PF_RULES_WUNLOCK();
128 
129 	IFNET_RLOCK();
130 	TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
131 		pfi_attach_ifgroup(ifg);
132 	TAILQ_FOREACH(ifp, &V_ifnet, if_link)
133 		pfi_attach_ifnet(ifp);
134 	IFNET_RUNLOCK();
135 }
136 
137 void
138 pfi_initialize(void)
139 {
140 
141 	pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
142 	    pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
143 	pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
144 	    pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
145 	pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
146 	    pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
147 	pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
148 	    pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
149 	pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
150 	    pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
151 	pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
152 	    pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
153 }
154 
155 void
156 pfi_cleanup_vnet(void)
157 {
158 	struct pfi_kif *kif;
159 
160 	PF_RULES_WASSERT();
161 
162 	V_pfi_all = NULL;
163 	while ((kif = RB_MIN(pfi_ifhead, &V_pfi_ifs))) {
164 		RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
165 		if (kif->pfik_group)
166 			kif->pfik_group->ifg_pf_kif = NULL;
167 		if (kif->pfik_ifp)
168 			kif->pfik_ifp->if_pf_kif = NULL;
169 		free(kif, PFI_MTYPE);
170 	}
171 
172 	mtx_lock(&pfi_unlnkdkifs_mtx);
173 	while ((kif = LIST_FIRST(&V_pfi_unlinked_kifs))) {
174 		LIST_REMOVE(kif, pfik_list);
175 		free(kif, PFI_MTYPE);
176 	}
177 	mtx_unlock(&pfi_unlnkdkifs_mtx);
178 
179 	free(V_pfi_buffer, PFI_MTYPE);
180 }
181 
182 void
183 pfi_cleanup(void)
184 {
185 
186 	EVENTHANDLER_DEREGISTER(ifnet_arrival_event, pfi_attach_cookie);
187 	EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfi_detach_cookie);
188 	EVENTHANDLER_DEREGISTER(group_attach_event, pfi_attach_group_cookie);
189 	EVENTHANDLER_DEREGISTER(group_change_event, pfi_change_group_cookie);
190 	EVENTHANDLER_DEREGISTER(group_detach_event, pfi_detach_group_cookie);
191 	EVENTHANDLER_DEREGISTER(ifaddr_event, pfi_ifaddr_event_cookie);
192 }
193 
194 struct pfi_kif *
195 pfi_kif_find(const char *kif_name)
196 {
197 	struct pfi_kif_cmp s;
198 
199 	PF_RULES_ASSERT();
200 
201 	bzero(&s, sizeof(s));
202 	strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name));
203 
204 	return (RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kif *)&s));
205 }
206 
207 struct pfi_kif *
208 pfi_kif_attach(struct pfi_kif *kif, const char *kif_name)
209 {
210 	struct pfi_kif *kif1;
211 
212 	PF_RULES_WASSERT();
213 	KASSERT(kif != NULL, ("%s: null kif", __func__));
214 
215 	kif1 = pfi_kif_find(kif_name);
216 	if (kif1 != NULL) {
217 		free(kif, PFI_MTYPE);
218 		return (kif1);
219 	}
220 
221 	bzero(kif, sizeof(*kif));
222 	strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name));
223 	/*
224 	 * It seems that the value of time_second is in unintialzied state
225 	 * when pf sets interface statistics clear time in boot phase if pf
226 	 * was statically linked to kernel. Instead of setting the bogus
227 	 * time value have pfi_get_ifaces handle this case. In
228 	 * pfi_get_ifaces it uses time_second if it sees the time is 0.
229 	 */
230 	kif->pfik_tzero = time_second > 1 ? time_second : 0;
231 	TAILQ_INIT(&kif->pfik_dynaddrs);
232 
233 	RB_INSERT(pfi_ifhead, &V_pfi_ifs, kif);
234 
235 	return (kif);
236 }
237 
238 void
239 pfi_kif_ref(struct pfi_kif *kif)
240 {
241 
242 	PF_RULES_WASSERT();
243 	kif->pfik_rulerefs++;
244 }
245 
246 void
247 pfi_kif_unref(struct pfi_kif *kif)
248 {
249 
250 	PF_RULES_WASSERT();
251 	KASSERT(kif->pfik_rulerefs > 0, ("%s: %p has zero refs", __func__, kif));
252 
253 	kif->pfik_rulerefs--;
254 
255 	if (kif->pfik_rulerefs > 0)
256 		return;
257 
258 	/* kif referencing an existing ifnet or group should exist. */
259 	if (kif->pfik_ifp != NULL || kif->pfik_group != NULL || kif == V_pfi_all)
260 		return;
261 
262 	RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
263 
264 	kif->pfik_flags |= PFI_IFLAG_REFS;
265 
266 	mtx_lock(&pfi_unlnkdkifs_mtx);
267 	LIST_INSERT_HEAD(&V_pfi_unlinked_kifs, kif, pfik_list);
268 	mtx_unlock(&pfi_unlnkdkifs_mtx);
269 }
270 
271 void
272 pfi_kif_purge(void)
273 {
274 	struct pfi_kif *kif, *kif1;
275 
276 	/*
277 	 * Do naive mark-and-sweep garbage collecting of old kifs.
278 	 * Reference flag is raised by pf_purge_expired_states().
279 	 */
280 	mtx_lock(&pfi_unlnkdkifs_mtx);
281 	LIST_FOREACH_SAFE(kif, &V_pfi_unlinked_kifs, pfik_list, kif1) {
282 		if (!(kif->pfik_flags & PFI_IFLAG_REFS)) {
283 			LIST_REMOVE(kif, pfik_list);
284 			free(kif, PFI_MTYPE);
285 		} else
286 			kif->pfik_flags &= ~PFI_IFLAG_REFS;
287 	}
288 	mtx_unlock(&pfi_unlnkdkifs_mtx);
289 }
290 
291 int
292 pfi_kif_match(struct pfi_kif *rule_kif, struct pfi_kif *packet_kif)
293 {
294 	struct ifg_list	*p;
295 
296 	if (rule_kif == NULL || rule_kif == packet_kif)
297 		return (1);
298 
299 	if (rule_kif->pfik_group != NULL)
300 		/* XXXGL: locking? */
301 		TAILQ_FOREACH(p, &packet_kif->pfik_ifp->if_groups, ifgl_next)
302 			if (p->ifgl_group == rule_kif->pfik_group)
303 				return (1);
304 
305 	return (0);
306 }
307 
308 static void
309 pfi_attach_ifnet(struct ifnet *ifp)
310 {
311 	struct pfi_kif *kif;
312 
313 	kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
314 
315 	PF_RULES_WLOCK();
316 	V_pfi_update++;
317 	kif = pfi_kif_attach(kif, ifp->if_xname);
318 
319 	kif->pfik_ifp = ifp;
320 	ifp->if_pf_kif = kif;
321 
322 	pfi_kif_update(kif);
323 	PF_RULES_WUNLOCK();
324 }
325 
326 static void
327 pfi_attach_ifgroup(struct ifg_group *ifg)
328 {
329 	struct pfi_kif *kif;
330 
331 	kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
332 
333 	PF_RULES_WLOCK();
334 	V_pfi_update++;
335 	kif = pfi_kif_attach(kif, ifg->ifg_group);
336 
337 	kif->pfik_group = ifg;
338 	ifg->ifg_pf_kif = kif;
339 	PF_RULES_WUNLOCK();
340 }
341 
342 int
343 pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af)
344 {
345 	switch (af) {
346 #ifdef INET
347 	case AF_INET:
348 		switch (dyn->pfid_acnt4) {
349 		case 0:
350 			return (0);
351 		case 1:
352 			return (PF_MATCHA(0, &dyn->pfid_addr4,
353 			    &dyn->pfid_mask4, a, AF_INET));
354 		default:
355 			return (pfr_match_addr(dyn->pfid_kt, a, AF_INET));
356 		}
357 		break;
358 #endif /* INET */
359 #ifdef INET6
360 	case AF_INET6:
361 		switch (dyn->pfid_acnt6) {
362 		case 0:
363 			return (0);
364 		case 1:
365 			return (PF_MATCHA(0, &dyn->pfid_addr6,
366 			    &dyn->pfid_mask6, a, AF_INET6));
367 		default:
368 			return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6));
369 		}
370 		break;
371 #endif /* INET6 */
372 	default:
373 		return (0);
374 	}
375 }
376 
377 int
378 pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af)
379 {
380 	struct pfi_dynaddr	*dyn;
381 	char			 tblname[PF_TABLE_NAME_SIZE];
382 	struct pf_ruleset	*ruleset = NULL;
383 	struct pfi_kif		*kif;
384 	int			 rv = 0;
385 
386 	PF_RULES_WASSERT();
387 	KASSERT(aw->type == PF_ADDR_DYNIFTL, ("%s: type %u",
388 	    __func__, aw->type));
389 	KASSERT(aw->p.dyn == NULL, ("%s: dyn is %p", __func__, aw->p.dyn));
390 
391 	if ((dyn = malloc(sizeof(*dyn), PFI_MTYPE, M_NOWAIT | M_ZERO)) == NULL)
392 		return (ENOMEM);
393 
394 	if ((kif = malloc(sizeof(*kif), PFI_MTYPE, M_NOWAIT)) == NULL) {
395 		free(dyn, PFI_MTYPE);
396 		return (ENOMEM);
397 	}
398 
399 	if (!strcmp(aw->v.ifname, "self"))
400 		dyn->pfid_kif = pfi_kif_attach(kif, IFG_ALL);
401 	else
402 		dyn->pfid_kif = pfi_kif_attach(kif, aw->v.ifname);
403 	pfi_kif_ref(dyn->pfid_kif);
404 
405 	dyn->pfid_net = pfi_unmask(&aw->v.a.mask);
406 	if (af == AF_INET && dyn->pfid_net == 32)
407 		dyn->pfid_net = 128;
408 	strlcpy(tblname, aw->v.ifname, sizeof(tblname));
409 	if (aw->iflags & PFI_AFLAG_NETWORK)
410 		strlcat(tblname, ":network", sizeof(tblname));
411 	if (aw->iflags & PFI_AFLAG_BROADCAST)
412 		strlcat(tblname, ":broadcast", sizeof(tblname));
413 	if (aw->iflags & PFI_AFLAG_PEER)
414 		strlcat(tblname, ":peer", sizeof(tblname));
415 	if (aw->iflags & PFI_AFLAG_NOALIAS)
416 		strlcat(tblname, ":0", sizeof(tblname));
417 	if (dyn->pfid_net != 128)
418 		snprintf(tblname + strlen(tblname),
419 		    sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net);
420 	if ((ruleset = pf_find_or_create_ruleset(PF_RESERVED_ANCHOR)) == NULL) {
421 		rv = ENOMEM;
422 		goto _bad;
423 	}
424 
425 	if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) {
426 		rv = ENOMEM;
427 		goto _bad;
428 	}
429 
430 	dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE;
431 	dyn->pfid_iflags = aw->iflags;
432 	dyn->pfid_af = af;
433 
434 	TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
435 	aw->p.dyn = dyn;
436 	pfi_kif_update(dyn->pfid_kif);
437 
438 	return (0);
439 
440 _bad:
441 	if (dyn->pfid_kt != NULL)
442 		pfr_detach_table(dyn->pfid_kt);
443 	if (ruleset != NULL)
444 		pf_remove_if_empty_ruleset(ruleset);
445 	if (dyn->pfid_kif != NULL)
446 		pfi_kif_unref(dyn->pfid_kif);
447 	free(dyn, PFI_MTYPE);
448 
449 	return (rv);
450 }
451 
452 static void
453 pfi_kif_update(struct pfi_kif *kif)
454 {
455 	struct ifg_list		*ifgl;
456 	struct pfi_dynaddr	*p;
457 
458 	PF_RULES_WASSERT();
459 
460 	/* update all dynaddr */
461 	TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry)
462 		pfi_dynaddr_update(p);
463 
464 	/* again for all groups kif is member of */
465 	if (kif->pfik_ifp != NULL) {
466 		IF_ADDR_RLOCK(kif->pfik_ifp);
467 		TAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next)
468 			pfi_kif_update((struct pfi_kif *)
469 			    ifgl->ifgl_group->ifg_pf_kif);
470 		IF_ADDR_RUNLOCK(kif->pfik_ifp);
471 	}
472 }
473 
474 static void
475 pfi_dynaddr_update(struct pfi_dynaddr *dyn)
476 {
477 	struct pfi_kif		*kif;
478 	struct pfr_ktable	*kt;
479 
480 	PF_RULES_WASSERT();
481 	KASSERT(dyn && dyn->pfid_kif && dyn->pfid_kt,
482 	    ("%s: bad argument", __func__));
483 
484 	kif = dyn->pfid_kif;
485 	kt = dyn->pfid_kt;
486 
487 	if (kt->pfrkt_larg != V_pfi_update) {
488 		/* this table needs to be brought up-to-date */
489 		pfi_table_update(kt, kif, dyn->pfid_net, dyn->pfid_iflags);
490 		kt->pfrkt_larg = V_pfi_update;
491 	}
492 	pfr_dynaddr_update(kt, dyn);
493 }
494 
495 static void
496 pfi_table_update(struct pfr_ktable *kt, struct pfi_kif *kif, int net, int flags)
497 {
498 	int			 e, size2 = 0;
499 	struct ifg_member	*ifgm;
500 
501 	V_pfi_buffer_cnt = 0;
502 
503 	if (kif->pfik_ifp != NULL)
504 		pfi_instance_add(kif->pfik_ifp, net, flags);
505 	else if (kif->pfik_group != NULL) {
506 		IFNET_RLOCK_NOSLEEP();
507 		TAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, ifgm_next)
508 			pfi_instance_add(ifgm->ifgm_ifp, net, flags);
509 		IFNET_RUNLOCK_NOSLEEP();
510 	}
511 
512 	if ((e = pfr_set_addrs(&kt->pfrkt_t, V_pfi_buffer, V_pfi_buffer_cnt, &size2,
513 	    NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK)))
514 		printf("%s: cannot set %d new addresses into table %s: %d\n",
515 		    __func__, V_pfi_buffer_cnt, kt->pfrkt_name, e);
516 }
517 
518 static void
519 pfi_instance_add(struct ifnet *ifp, int net, int flags)
520 {
521 	struct ifaddr	*ia;
522 	int		 got4 = 0, got6 = 0;
523 	int		 net2, af;
524 
525 	IF_ADDR_RLOCK(ifp);
526 	TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
527 		if (ia->ifa_addr == NULL)
528 			continue;
529 		af = ia->ifa_addr->sa_family;
530 		if (af != AF_INET && af != AF_INET6)
531 			continue;
532 		/*
533 		 * XXX: For point-to-point interfaces, (ifname:0) and IPv4,
534 		 *      jump over addresses without a proper route to work
535 		 *      around a problem with ppp not fully removing the
536 		 *      address used during IPCP.
537 		 */
538 		if ((ifp->if_flags & IFF_POINTOPOINT) &&
539 		    !(ia->ifa_flags & IFA_ROUTE) &&
540 		    (flags & PFI_AFLAG_NOALIAS) && (af == AF_INET))
541 			continue;
542 		if ((flags & PFI_AFLAG_BROADCAST) && af == AF_INET6)
543 			continue;
544 		if ((flags & PFI_AFLAG_BROADCAST) &&
545 		    !(ifp->if_flags & IFF_BROADCAST))
546 			continue;
547 		if ((flags & PFI_AFLAG_PEER) &&
548 		    !(ifp->if_flags & IFF_POINTOPOINT))
549 			continue;
550 		if ((flags & PFI_AFLAG_NETWORK) && af == AF_INET6 &&
551 		    IN6_IS_ADDR_LINKLOCAL(
552 		    &((struct sockaddr_in6 *)ia->ifa_addr)->sin6_addr))
553 			continue;
554 		if (flags & PFI_AFLAG_NOALIAS) {
555 			if (af == AF_INET && got4)
556 				continue;
557 			if (af == AF_INET6 && got6)
558 				continue;
559 		}
560 		if (af == AF_INET)
561 			got4 = 1;
562 		else if (af == AF_INET6)
563 			got6 = 1;
564 		net2 = net;
565 		if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) {
566 			if (af == AF_INET)
567 				net2 = pfi_unmask(&((struct sockaddr_in *)
568 				    ia->ifa_netmask)->sin_addr);
569 			else if (af == AF_INET6)
570 				net2 = pfi_unmask(&((struct sockaddr_in6 *)
571 				    ia->ifa_netmask)->sin6_addr);
572 		}
573 		if (af == AF_INET && net2 > 32)
574 			net2 = 32;
575 		if (flags & PFI_AFLAG_BROADCAST)
576 			pfi_address_add(ia->ifa_broadaddr, af, net2);
577 		else if (flags & PFI_AFLAG_PEER)
578 			pfi_address_add(ia->ifa_dstaddr, af, net2);
579 		else
580 			pfi_address_add(ia->ifa_addr, af, net2);
581 	}
582 	IF_ADDR_RUNLOCK(ifp);
583 }
584 
585 static void
586 pfi_address_add(struct sockaddr *sa, int af, int net)
587 {
588 	struct pfr_addr	*p;
589 	int		 i;
590 
591 	if (V_pfi_buffer_cnt >= V_pfi_buffer_max) {
592 		int		 new_max = V_pfi_buffer_max * 2;
593 
594 		if (new_max > PFI_BUFFER_MAX) {
595 			printf("%s: address buffer full (%d/%d)\n", __func__,
596 			    V_pfi_buffer_cnt, PFI_BUFFER_MAX);
597 			return;
598 		}
599 		p = malloc(new_max * sizeof(*V_pfi_buffer), PFI_MTYPE,
600 		    M_NOWAIT);
601 		if (p == NULL) {
602 			printf("%s: no memory to grow buffer (%d/%d)\n",
603 			    __func__, V_pfi_buffer_cnt, PFI_BUFFER_MAX);
604 			return;
605 		}
606 		memcpy(p, V_pfi_buffer, V_pfi_buffer_max * sizeof(*V_pfi_buffer));
607 		/* no need to zero buffer */
608 		free(V_pfi_buffer, PFI_MTYPE);
609 		V_pfi_buffer = p;
610 		V_pfi_buffer_max = new_max;
611 	}
612 	if (af == AF_INET && net > 32)
613 		net = 128;
614 	p = V_pfi_buffer + V_pfi_buffer_cnt++;
615 	bzero(p, sizeof(*p));
616 	p->pfra_af = af;
617 	p->pfra_net = net;
618 	if (af == AF_INET)
619 		p->pfra_ip4addr = ((struct sockaddr_in *)sa)->sin_addr;
620 	else if (af == AF_INET6) {
621 		p->pfra_ip6addr = ((struct sockaddr_in6 *)sa)->sin6_addr;
622 		if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr))
623 			p->pfra_ip6addr.s6_addr16[1] = 0;
624 	}
625 	/* mask network address bits */
626 	if (net < 128)
627 		((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8));
628 	for (i = (p->pfra_net+7)/8; i < sizeof(p->pfra_u); i++)
629 		((caddr_t)p)[i] = 0;
630 }
631 
632 void
633 pfi_dynaddr_remove(struct pfi_dynaddr *dyn)
634 {
635 
636 	KASSERT(dyn->pfid_kif != NULL, ("%s: null pfid_kif", __func__));
637 	KASSERT(dyn->pfid_kt != NULL, ("%s: null pfid_kt", __func__));
638 
639 	TAILQ_REMOVE(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
640 	pfi_kif_unref(dyn->pfid_kif);
641 	pfr_detach_table(dyn->pfid_kt);
642 	free(dyn, PFI_MTYPE);
643 }
644 
645 void
646 pfi_dynaddr_copyout(struct pf_addr_wrap *aw)
647 {
648 
649 	KASSERT(aw->type == PF_ADDR_DYNIFTL,
650 	    ("%s: type %u", __func__, aw->type));
651 
652 	if (aw->p.dyn == NULL || aw->p.dyn->pfid_kif == NULL)
653 		return;
654 	aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6;
655 }
656 
657 static int
658 pfi_if_compare(struct pfi_kif *p, struct pfi_kif *q)
659 {
660 	return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ));
661 }
662 
663 void
664 pfi_update_status(const char *name, struct pf_status *pfs)
665 {
666 	struct pfi_kif		*p;
667 	struct pfi_kif_cmp	 key;
668 	struct ifg_member	 p_member, *ifgm;
669 	TAILQ_HEAD(, ifg_member) ifg_members;
670 	int			 i, j, k;
671 
672 	strlcpy(key.pfik_name, name, sizeof(key.pfik_name));
673 	p = RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kif *)&key);
674 	if (p == NULL)
675 		return;
676 
677 	if (p->pfik_group != NULL) {
678 		bcopy(&p->pfik_group->ifg_members, &ifg_members,
679 		    sizeof(ifg_members));
680 	} else {
681 		/* build a temporary list for p only */
682 		bzero(&p_member, sizeof(p_member));
683 		p_member.ifgm_ifp = p->pfik_ifp;
684 		TAILQ_INIT(&ifg_members);
685 		TAILQ_INSERT_TAIL(&ifg_members, &p_member, ifgm_next);
686 	}
687 	if (pfs) {
688 		bzero(pfs->pcounters, sizeof(pfs->pcounters));
689 		bzero(pfs->bcounters, sizeof(pfs->bcounters));
690 	}
691 	TAILQ_FOREACH(ifgm, &ifg_members, ifgm_next) {
692 		if (ifgm->ifgm_ifp == NULL || ifgm->ifgm_ifp->if_pf_kif == NULL)
693 			continue;
694 		p = (struct pfi_kif *)ifgm->ifgm_ifp->if_pf_kif;
695 
696 		/* just clear statistics */
697 		if (pfs == NULL) {
698 			bzero(p->pfik_packets, sizeof(p->pfik_packets));
699 			bzero(p->pfik_bytes, sizeof(p->pfik_bytes));
700 			p->pfik_tzero = time_second;
701 			continue;
702 		}
703 		for (i = 0; i < 2; i++)
704 			for (j = 0; j < 2; j++)
705 				for (k = 0; k < 2; k++) {
706 					pfs->pcounters[i][j][k] +=
707 						p->pfik_packets[i][j][k];
708 					pfs->bcounters[i][j] +=
709 						p->pfik_bytes[i][j][k];
710 				}
711 	}
712 }
713 
714 void
715 pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size)
716 {
717 	struct pfi_kif	*p, *nextp;
718 	int		 n = 0;
719 
720 	for (p = RB_MIN(pfi_ifhead, &V_pfi_ifs); p; p = nextp) {
721 		nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
722 		if (pfi_skip_if(name, p))
723 			continue;
724 		if (*size <= n++)
725 			break;
726 		if (!p->pfik_tzero)
727 			p->pfik_tzero = time_second;
728 		bcopy(p, buf++, sizeof(*buf));
729 		nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
730 	}
731 	*size = n;
732 }
733 
734 static int
735 pfi_skip_if(const char *filter, struct pfi_kif *p)
736 {
737 	int	n;
738 
739 	if (filter == NULL || !*filter)
740 		return (0);
741 	if (!strcmp(p->pfik_name, filter))
742 		return (0);	/* exact match */
743 	n = strlen(filter);
744 	if (n < 1 || n >= IFNAMSIZ)
745 		return (1);	/* sanity check */
746 	if (filter[n-1] >= '0' && filter[n-1] <= '9')
747 		return (1);	/* only do exact match in that case */
748 	if (strncmp(p->pfik_name, filter, n))
749 		return (1);	/* prefix doesn't match */
750 	return (p->pfik_name[n] < '0' || p->pfik_name[n] > '9');
751 }
752 
753 int
754 pfi_set_flags(const char *name, int flags)
755 {
756 	struct pfi_kif	*p;
757 
758 	RB_FOREACH(p, pfi_ifhead, &V_pfi_ifs) {
759 		if (pfi_skip_if(name, p))
760 			continue;
761 		p->pfik_flags |= flags;
762 	}
763 	return (0);
764 }
765 
766 int
767 pfi_clear_flags(const char *name, int flags)
768 {
769 	struct pfi_kif	*p;
770 
771 	RB_FOREACH(p, pfi_ifhead, &V_pfi_ifs) {
772 		if (pfi_skip_if(name, p))
773 			continue;
774 		p->pfik_flags &= ~flags;
775 	}
776 	return (0);
777 }
778 
779 /* from pf_print_state.c */
780 static int
781 pfi_unmask(void *addr)
782 {
783 	struct pf_addr *m = addr;
784 	int i = 31, j = 0, b = 0;
785 	u_int32_t tmp;
786 
787 	while (j < 4 && m->addr32[j] == 0xffffffff) {
788 		b += 32;
789 		j++;
790 	}
791 	if (j < 4) {
792 		tmp = ntohl(m->addr32[j]);
793 		for (i = 31; tmp & (1 << i); --i)
794 			b++;
795 	}
796 	return (b);
797 }
798 
799 static void
800 pfi_attach_ifnet_event(void *arg __unused, struct ifnet *ifp)
801 {
802 
803 	CURVNET_SET(ifp->if_vnet);
804 	if (V_pf_vnet_active == 0) {
805 		/* Avoid teardown race in the least expensive way. */
806 		CURVNET_RESTORE();
807 		return;
808 	}
809 	pfi_attach_ifnet(ifp);
810 #ifdef ALTQ
811 	PF_RULES_WLOCK();
812 	pf_altq_ifnet_event(ifp, 0);
813 	PF_RULES_WUNLOCK();
814 #endif
815 	CURVNET_RESTORE();
816 }
817 
818 static void
819 pfi_detach_ifnet_event(void *arg __unused, struct ifnet *ifp)
820 {
821 	struct pfi_kif *kif = (struct pfi_kif *)ifp->if_pf_kif;
822 
823 	if (kif == NULL)
824 		return;
825 
826 	CURVNET_SET(ifp->if_vnet);
827 	if (V_pf_vnet_active == 0) {
828 		/* Avoid teardown race in the least expensive way. */
829 		CURVNET_RESTORE();
830 		return;
831 	}
832 	PF_RULES_WLOCK();
833 	V_pfi_update++;
834 	pfi_kif_update(kif);
835 
836 	kif->pfik_ifp = NULL;
837 	ifp->if_pf_kif = NULL;
838 #ifdef ALTQ
839 	pf_altq_ifnet_event(ifp, 1);
840 #endif
841 	PF_RULES_WUNLOCK();
842 	CURVNET_RESTORE();
843 }
844 
845 static void
846 pfi_attach_group_event(void *arg , struct ifg_group *ifg)
847 {
848 
849 	CURVNET_SET((struct vnet *)arg);
850 	if (V_pf_vnet_active == 0) {
851 		/* Avoid teardown race in the least expensive way. */
852 		CURVNET_RESTORE();
853 		return;
854 	}
855 	pfi_attach_ifgroup(ifg);
856 	CURVNET_RESTORE();
857 }
858 
859 static void
860 pfi_change_group_event(void *arg, char *gname)
861 {
862 	struct pfi_kif *kif;
863 
864 	CURVNET_SET((struct vnet *)arg);
865 	if (V_pf_vnet_active == 0) {
866 		/* Avoid teardown race in the least expensive way. */
867 		CURVNET_RESTORE();
868 		return;
869 	}
870 
871 	kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
872 	PF_RULES_WLOCK();
873 	V_pfi_update++;
874 	kif = pfi_kif_attach(kif, gname);
875 	pfi_kif_update(kif);
876 	PF_RULES_WUNLOCK();
877 	CURVNET_RESTORE();
878 }
879 
880 static void
881 pfi_detach_group_event(void *arg, struct ifg_group *ifg)
882 {
883 	struct pfi_kif *kif = (struct pfi_kif *)ifg->ifg_pf_kif;
884 
885 	if (kif == NULL)
886 		return;
887 
888 	CURVNET_SET((struct vnet *)arg);
889 	if (V_pf_vnet_active == 0) {
890 		/* Avoid teardown race in the least expensive way. */
891 		CURVNET_RESTORE();
892 		return;
893 	}
894 	PF_RULES_WLOCK();
895 	V_pfi_update++;
896 
897 	kif->pfik_group = NULL;
898 	ifg->ifg_pf_kif = NULL;
899 	PF_RULES_WUNLOCK();
900 	CURVNET_RESTORE();
901 }
902 
903 static void
904 pfi_ifaddr_event(void *arg __unused, struct ifnet *ifp)
905 {
906 	if (ifp->if_pf_kif == NULL)
907 		return;
908 
909 	CURVNET_SET(ifp->if_vnet);
910 	if (V_pf_vnet_active == 0) {
911 		/* Avoid teardown race in the least expensive way. */
912 		CURVNET_RESTORE();
913 		return;
914 	}
915 	PF_RULES_WLOCK();
916 	if (ifp && ifp->if_pf_kif) {
917 		V_pfi_update++;
918 		pfi_kif_update(ifp->if_pf_kif);
919 	}
920 	PF_RULES_WUNLOCK();
921 	CURVNET_RESTORE();
922 }
923