xref: /freebsd/sys/netpfil/pf/if_pfsync.c (revision 3b68c491d37196bb76a95bce3c02f7c6d5ba22fd)
1 /*-
2  * SPDX-License-Identifier: (BSD-2-Clause AND ISC)
3  *
4  * Copyright (c) 2002 Michael Shalayeff
5  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*-
31  * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
32  *
33  * Permission to use, copy, modify, and distribute this software for any
34  * purpose with or without fee is hereby granted, provided that the above
35  * copyright notice and this permission notice appear in all copies.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44  */
45 
46 /*
47  * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
48  *
49  * Revisions picked from OpenBSD after revision 1.110 import:
50  * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
51  * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
52  * 1.120, 1.175 - use monotonic time_uptime
53  * 1.122 - reduce number of updates for non-TCP sessions
54  * 1.125, 1.127 - rewrite merge or stale processing
55  * 1.128 - cleanups
56  * 1.146 - bzero() mbuf before sparsely filling it with data
57  * 1.170 - SIOCSIFMTU checks
58  * 1.126, 1.142 - deferred packets processing
59  * 1.173 - correct expire time processing
60  */
61 
62 #include <sys/cdefs.h>
63 #include "opt_inet.h"
64 #include "opt_inet6.h"
65 #include "opt_pf.h"
66 
67 #include <sys/param.h>
68 #include <sys/bus.h>
69 #include <sys/endian.h>
70 #include <sys/interrupt.h>
71 #include <sys/kernel.h>
72 #include <sys/lock.h>
73 #include <sys/mbuf.h>
74 #include <sys/module.h>
75 #include <sys/mutex.h>
76 #include <sys/nv.h>
77 #include <sys/priv.h>
78 #include <sys/smp.h>
79 #include <sys/socket.h>
80 #include <sys/sockio.h>
81 #include <sys/sysctl.h>
82 #include <sys/syslog.h>
83 
84 #include <net/bpf.h>
85 #include <net/if.h>
86 #include <net/if_var.h>
87 #include <net/if_clone.h>
88 #include <net/if_private.h>
89 #include <net/if_types.h>
90 #include <net/vnet.h>
91 #include <net/pfvar.h>
92 #include <net/route.h>
93 #include <net/if_pfsync.h>
94 
95 #include <netinet/if_ether.h>
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet6/in6_var.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip6.h>
101 #include <netinet/ip_carp.h>
102 #include <netinet/ip_var.h>
103 #include <netinet/tcp.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/tcp_seq.h>
106 
107 #include <netinet/ip6.h>
108 #include <netinet6/ip6_var.h>
109 #include <netinet6/scope6_var.h>
110 
111 #include <netpfil/pf/pfsync_nv.h>
112 
113 struct pfsync_bucket;
114 struct pfsync_softc;
115 
116 union inet_template {
117 	struct ip	ipv4;
118 	struct ip6_hdr	ipv6;
119 };
120 
121 #define PFSYNC_MINPKT ( \
122 	sizeof(union inet_template) + \
123 	sizeof(struct pfsync_header) + \
124 	sizeof(struct pfsync_subheader) )
125 
126 static int	pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
127 		    struct pfsync_state_peer *);
128 static int	pfsync_in_clr(struct mbuf *, int, int, int, int);
129 static int	pfsync_in_ins(struct mbuf *, int, int, int, int);
130 static int	pfsync_in_iack(struct mbuf *, int, int, int, int);
131 static int	pfsync_in_upd(struct mbuf *, int, int, int, int);
132 static int	pfsync_in_upd_c(struct mbuf *, int, int, int, int);
133 static int	pfsync_in_ureq(struct mbuf *, int, int, int, int);
134 static int	pfsync_in_del_c(struct mbuf *, int, int, int, int);
135 static int	pfsync_in_bus(struct mbuf *, int, int, int, int);
136 static int	pfsync_in_tdb(struct mbuf *, int, int, int, int);
137 static int	pfsync_in_eof(struct mbuf *, int, int, int, int);
138 static int	pfsync_in_error(struct mbuf *, int, int, int, int);
139 
140 static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = {
141 	pfsync_in_clr,			/* PFSYNC_ACT_CLR */
142 	pfsync_in_ins,			/* PFSYNC_ACT_INS_1301 */
143 	pfsync_in_iack,			/* PFSYNC_ACT_INS_ACK */
144 	pfsync_in_upd,			/* PFSYNC_ACT_UPD_1301 */
145 	pfsync_in_upd_c,		/* PFSYNC_ACT_UPD_C */
146 	pfsync_in_ureq,			/* PFSYNC_ACT_UPD_REQ */
147 	pfsync_in_error,		/* PFSYNC_ACT_DEL */
148 	pfsync_in_del_c,		/* PFSYNC_ACT_DEL_C */
149 	pfsync_in_error,		/* PFSYNC_ACT_INS_F */
150 	pfsync_in_error,		/* PFSYNC_ACT_DEL_F */
151 	pfsync_in_bus,			/* PFSYNC_ACT_BUS */
152 	pfsync_in_tdb,			/* PFSYNC_ACT_TDB */
153 	pfsync_in_eof,			/* PFSYNC_ACT_EOF */
154 	pfsync_in_ins,			/* PFSYNC_ACT_INS_1400 */
155 	pfsync_in_upd,			/* PFSYNC_ACT_UPD_1400 */
156 };
157 
158 struct pfsync_q {
159 	void		(*write)(struct pf_kstate *, void *);
160 	size_t		len;
161 	u_int8_t	action;
162 };
163 
164 /* We have the following sync queues */
165 enum pfsync_q_id {
166 	PFSYNC_Q_INS_1301,
167 	PFSYNC_Q_INS_1400,
168 	PFSYNC_Q_IACK,
169 	PFSYNC_Q_UPD_1301,
170 	PFSYNC_Q_UPD_1400,
171 	PFSYNC_Q_UPD_C,
172 	PFSYNC_Q_DEL_C,
173 	PFSYNC_Q_COUNT,
174 };
175 
176 /* Functions for building messages for given queue */
177 static void	pfsync_out_state_1301(struct pf_kstate *, void *);
178 static void	pfsync_out_state_1400(struct pf_kstate *, void *);
179 static void	pfsync_out_iack(struct pf_kstate *, void *);
180 static void	pfsync_out_upd_c(struct pf_kstate *, void *);
181 static void	pfsync_out_del_c(struct pf_kstate *, void *);
182 
183 /* Attach those functions to queue */
184 static struct pfsync_q pfsync_qs[] = {
185 	{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 },
186 	{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 },
187 	{ pfsync_out_iack,       sizeof(struct pfsync_ins_ack),    PFSYNC_ACT_INS_ACK },
188 	{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 },
189 	{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 },
190 	{ pfsync_out_upd_c,      sizeof(struct pfsync_upd_c),      PFSYNC_ACT_UPD_C },
191 	{ pfsync_out_del_c,      sizeof(struct pfsync_del_c),      PFSYNC_ACT_DEL_C }
192 };
193 
194 /* Map queue to pf_kstate->sync_state */
195 static u_int8_t pfsync_qid_sstate[] = {
196 	PFSYNC_S_INS,   /* PFSYNC_Q_INS_1301 */
197 	PFSYNC_S_INS,   /* PFSYNC_Q_INS_1400 */
198 	PFSYNC_S_IACK,  /* PFSYNC_Q_IACK */
199 	PFSYNC_S_UPD,   /* PFSYNC_Q_UPD_1301 */
200 	PFSYNC_S_UPD,   /* PFSYNC_Q_UPD_1400 */
201 	PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */
202 	PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */
203 };
204 
205 /* Map pf_kstate->sync_state to queue */
206 static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t);
207 
208 static void	pfsync_q_ins(struct pf_kstate *, int sync_state, bool);
209 static void	pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *);
210 
211 static void	pfsync_update_state(struct pf_kstate *);
212 static void	pfsync_tx(struct pfsync_softc *, struct mbuf *);
213 
214 struct pfsync_upd_req_item {
215 	TAILQ_ENTRY(pfsync_upd_req_item)	ur_entry;
216 	struct pfsync_upd_req			ur_msg;
217 };
218 
219 struct pfsync_deferral {
220 	struct pfsync_softc		*pd_sc;
221 	TAILQ_ENTRY(pfsync_deferral)	pd_entry;
222 	struct callout			pd_tmo;
223 
224 	struct pf_kstate		*pd_st;
225 	struct mbuf			*pd_m;
226 };
227 
228 struct pfsync_bucket
229 {
230 	int			b_id;
231 	struct pfsync_softc	*b_sc;
232 	struct mtx		b_mtx;
233 	struct callout		b_tmo;
234 	int			b_flags;
235 #define	PFSYNCF_BUCKET_PUSH	0x00000001
236 
237 	size_t			b_len;
238 	TAILQ_HEAD(, pf_kstate)			b_qs[PFSYNC_Q_COUNT];
239 	TAILQ_HEAD(, pfsync_upd_req_item)	b_upd_req_list;
240 	TAILQ_HEAD(, pfsync_deferral)		b_deferrals;
241 	u_int			b_deferred;
242 	uint8_t			*b_plus;
243 	size_t			b_pluslen;
244 
245 	struct  ifaltq b_snd;
246 };
247 
248 struct pfsync_softc {
249 	/* Configuration */
250 	struct ifnet		*sc_ifp;
251 	struct ifnet		*sc_sync_if;
252 	struct ip_moptions	sc_imo;
253 	struct ip6_moptions	sc_im6o;
254 	struct sockaddr_storage	sc_sync_peer;
255 	uint32_t		sc_flags;
256 	uint8_t			sc_maxupdates;
257 	union inet_template     sc_template;
258 	struct mtx		sc_mtx;
259 	uint32_t		sc_version;
260 
261 	/* Queued data */
262 	struct pfsync_bucket	*sc_buckets;
263 
264 	/* Bulk update info */
265 	struct mtx		sc_bulk_mtx;
266 	uint32_t		sc_ureq_sent;
267 	int			sc_bulk_tries;
268 	uint32_t		sc_ureq_received;
269 	int			sc_bulk_hashid;
270 	uint64_t		sc_bulk_stateid;
271 	uint32_t		sc_bulk_creatorid;
272 	struct callout		sc_bulk_tmo;
273 	struct callout		sc_bulkfail_tmo;
274 };
275 
276 #define	PFSYNC_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
277 #define	PFSYNC_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
278 #define	PFSYNC_LOCK_ASSERT(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
279 
280 #define PFSYNC_BUCKET_LOCK(b)		mtx_lock(&(b)->b_mtx)
281 #define PFSYNC_BUCKET_UNLOCK(b)		mtx_unlock(&(b)->b_mtx)
282 #define PFSYNC_BUCKET_LOCK_ASSERT(b)	mtx_assert(&(b)->b_mtx, MA_OWNED)
283 
284 #define	PFSYNC_BLOCK(sc)	mtx_lock(&(sc)->sc_bulk_mtx)
285 #define	PFSYNC_BUNLOCK(sc)	mtx_unlock(&(sc)->sc_bulk_mtx)
286 #define	PFSYNC_BLOCK_ASSERT(sc)	mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
287 
288 #define PFSYNC_DEFER_TIMEOUT	20
289 
290 static const char pfsyncname[] = "pfsync";
291 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
292 VNET_DEFINE_STATIC(struct pfsync_softc	*, pfsyncif) = NULL;
293 #define	V_pfsyncif		VNET(pfsyncif)
294 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL;
295 #define	V_pfsync_swi_cookie	VNET(pfsync_swi_cookie)
296 VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie);
297 #define	V_pfsync_swi_ie		VNET(pfsync_swi_ie)
298 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats);
299 #define	V_pfsyncstats		VNET(pfsyncstats)
300 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW;
301 #define	V_pfsync_carp_adj	VNET(pfsync_carp_adj)
302 VNET_DEFINE_STATIC(unsigned int, pfsync_defer_timeout) = PFSYNC_DEFER_TIMEOUT;
303 #define	V_pfsync_defer_timeout	VNET(pfsync_defer_timeout)
304 
305 static void	pfsync_timeout(void *);
306 static void	pfsync_push(struct pfsync_bucket *);
307 static void	pfsync_push_all(struct pfsync_softc *);
308 static void	pfsyncintr(void *);
309 static int	pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
310 		    struct in_mfilter *, struct in6_mfilter *);
311 static void	pfsync_multicast_cleanup(struct pfsync_softc *);
312 static void	pfsync_pointers_init(void);
313 static void	pfsync_pointers_uninit(void);
314 static int	pfsync_init(void);
315 static void	pfsync_uninit(void);
316 
317 static unsigned long pfsync_buckets;
318 
319 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
320     "PFSYNC");
321 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
322     &VNET_NAME(pfsyncstats), pfsyncstats,
323     "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
324 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW,
325     &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
326 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN,
327     &pfsync_buckets, 0, "Number of pfsync hash buckets");
328 SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW,
329     &VNET_NAME(pfsync_defer_timeout), 0, "Deferred packet timeout (in ms)");
330 
331 static int	pfsync_clone_create(struct if_clone *, int, caddr_t);
332 static void	pfsync_clone_destroy(struct ifnet *);
333 static int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
334 		    struct pf_state_peer *);
335 static int	pfsyncoutput(struct ifnet *, struct mbuf *,
336 		    const struct sockaddr *, struct route *);
337 static int	pfsyncioctl(struct ifnet *, u_long, caddr_t);
338 
339 static int	pfsync_defer(struct pf_kstate *, struct mbuf *);
340 static void	pfsync_undefer(struct pfsync_deferral *, int);
341 static void	pfsync_undefer_state_locked(struct pf_kstate *, int);
342 static void	pfsync_undefer_state(struct pf_kstate *, int);
343 static void	pfsync_defer_tmo(void *);
344 
345 static void	pfsync_request_update(u_int32_t, u_int64_t);
346 static bool	pfsync_update_state_req(struct pf_kstate *);
347 
348 static void	pfsync_drop(struct pfsync_softc *);
349 static void	pfsync_sendout(int, int);
350 static void	pfsync_send_plus(void *, size_t);
351 
352 static void	pfsync_bulk_start(void);
353 static void	pfsync_bulk_status(u_int8_t);
354 static void	pfsync_bulk_update(void *);
355 static void	pfsync_bulk_fail(void *);
356 
357 static void	pfsync_detach_ifnet(struct ifnet *);
358 
359 static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *,
360     struct pfsync_kstatus *);
361 static int pfsync_kstatus_to_softc(struct pfsync_kstatus *,
362     struct pfsync_softc *);
363 
364 #ifdef IPSEC
365 static void	pfsync_update_net_tdb(struct pfsync_tdb *);
366 #endif
367 static struct pfsync_bucket	*pfsync_get_bucket(struct pfsync_softc *,
368 		    struct pf_kstate *);
369 
370 #define PFSYNC_MAX_BULKTRIES	12
371 
372 VNET_DEFINE(struct if_clone *, pfsync_cloner);
373 #define	V_pfsync_cloner	VNET(pfsync_cloner)
374 
375 const struct in6_addr in6addr_linklocal_pfsync_group =
376 	{{{ 0xff, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
377 	    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0 }}};
378 static int
379 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
380 {
381 	struct pfsync_softc *sc;
382 	struct ifnet *ifp;
383 	struct pfsync_bucket *b;
384 	int c;
385 	enum pfsync_q_id q;
386 
387 	if (unit != 0)
388 		return (EINVAL);
389 
390 	if (! pfsync_buckets)
391 		pfsync_buckets = mp_ncpus * 2;
392 
393 	sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
394 	sc->sc_flags |= PFSYNCF_OK;
395 	sc->sc_maxupdates = 128;
396 	sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT;
397 
398 	ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
399 	if_initname(ifp, pfsyncname, unit);
400 	ifp->if_softc = sc;
401 	ifp->if_ioctl = pfsyncioctl;
402 	ifp->if_output = pfsyncoutput;
403 	ifp->if_type = IFT_PFSYNC;
404 	ifp->if_hdrlen = sizeof(struct pfsync_header);
405 	ifp->if_mtu = ETHERMTU;
406 	mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
407 	mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
408 	callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
409 	callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
410 
411 	if_attach(ifp);
412 
413 	bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
414 
415 	sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets),
416 	    M_PFSYNC, M_ZERO | M_WAITOK);
417 	for (c = 0; c < pfsync_buckets; c++) {
418 		b = &sc->sc_buckets[c];
419 		mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF);
420 
421 		b->b_id = c;
422 		b->b_sc = sc;
423 		b->b_len = PFSYNC_MINPKT;
424 
425 		for (q = 0; q < PFSYNC_Q_COUNT; q++)
426 			TAILQ_INIT(&b->b_qs[q]);
427 
428 		TAILQ_INIT(&b->b_upd_req_list);
429 		TAILQ_INIT(&b->b_deferrals);
430 
431 		callout_init(&b->b_tmo, 1);
432 
433 		b->b_snd.ifq_maxlen = ifqmaxlen;
434 	}
435 
436 	V_pfsyncif = sc;
437 
438 	return (0);
439 }
440 
441 static void
442 pfsync_clone_destroy(struct ifnet *ifp)
443 {
444 	struct pfsync_softc *sc = ifp->if_softc;
445 	struct pfsync_bucket *b;
446 	int c, ret;
447 
448 	for (c = 0; c < pfsync_buckets; c++) {
449 		b = &sc->sc_buckets[c];
450 		/*
451 		 * At this stage, everything should have already been
452 		 * cleared by pfsync_uninit(), and we have only to
453 		 * drain callouts.
454 		 */
455 		PFSYNC_BUCKET_LOCK(b);
456 		while (b->b_deferred > 0) {
457 			struct pfsync_deferral *pd =
458 			    TAILQ_FIRST(&b->b_deferrals);
459 
460 			ret = callout_stop(&pd->pd_tmo);
461 			PFSYNC_BUCKET_UNLOCK(b);
462 			if (ret > 0) {
463 				pfsync_undefer(pd, 1);
464 			} else {
465 				callout_drain(&pd->pd_tmo);
466 			}
467 			PFSYNC_BUCKET_LOCK(b);
468 		}
469 		MPASS(b->b_deferred == 0);
470 		MPASS(TAILQ_EMPTY(&b->b_deferrals));
471 		PFSYNC_BUCKET_UNLOCK(b);
472 
473 		free(b->b_plus, M_PFSYNC);
474 		b->b_plus = NULL;
475 		b->b_pluslen = 0;
476 
477 		callout_drain(&b->b_tmo);
478 	}
479 
480 	callout_drain(&sc->sc_bulkfail_tmo);
481 	callout_drain(&sc->sc_bulk_tmo);
482 
483 	if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
484 		(*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
485 	bpfdetach(ifp);
486 	if_detach(ifp);
487 
488 	pfsync_drop(sc);
489 
490 	if_free(ifp);
491 	pfsync_multicast_cleanup(sc);
492 	mtx_destroy(&sc->sc_mtx);
493 	mtx_destroy(&sc->sc_bulk_mtx);
494 
495 	free(sc->sc_buckets, M_PFSYNC);
496 	free(sc, M_PFSYNC);
497 
498 	V_pfsyncif = NULL;
499 }
500 
501 static int
502 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
503     struct pf_state_peer *d)
504 {
505 	if (s->scrub.scrub_flag && d->scrub == NULL) {
506 		d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
507 		if (d->scrub == NULL)
508 			return (ENOMEM);
509 	}
510 
511 	return (0);
512 }
513 
514 static int
515 pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
516 {
517 	struct pfsync_softc *sc = V_pfsyncif;
518 #ifndef	__NO_STRICT_ALIGNMENT
519 	struct pfsync_state_key key[2];
520 #endif
521 	struct pfsync_state_key *kw, *ks;
522 	struct pf_kstate	*st = NULL;
523 	struct pf_state_key *skw = NULL, *sks = NULL;
524 	struct pf_krule *r = NULL;
525 	struct pfi_kkif	*kif;
526 	int error;
527 
528 	PF_RULES_RASSERT();
529 
530 	if (sp->pfs_1301.creatorid == 0) {
531 		if (V_pf_status.debug >= PF_DEBUG_MISC)
532 			printf("%s: invalid creator id: %08x\n", __func__,
533 			    ntohl(sp->pfs_1301.creatorid));
534 		return (EINVAL);
535 	}
536 
537 	if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) {
538 		if (V_pf_status.debug >= PF_DEBUG_MISC)
539 			printf("%s: unknown interface: %s\n", __func__,
540 			    sp->pfs_1301.ifname);
541 		if (flags & PFSYNC_SI_IOCTL)
542 			return (EINVAL);
543 		return (0);	/* skip this state */
544 	}
545 
546 	/*
547 	 * If the ruleset checksums match or the state is coming from the ioctl,
548 	 * it's safe to associate the state with the rule of that number.
549 	 */
550 	if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) &&
551 	    (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) <
552 	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
553 		r = pf_main_ruleset.rules[
554 		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)];
555 	else
556 		r = &V_pf_default_rule;
557 
558 	if ((r->max_states &&
559 	    counter_u64_fetch(r->states_cur) >= r->max_states))
560 		goto cleanup;
561 
562 	/*
563 	 * XXXGL: consider M_WAITOK in ioctl path after.
564 	 */
565 	st = pf_alloc_state(M_NOWAIT);
566 	if (__predict_false(st == NULL))
567 		goto cleanup;
568 
569 	if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
570 		goto cleanup;
571 
572 #ifndef	__NO_STRICT_ALIGNMENT
573 	bcopy(&sp->pfs_1301.key, key, sizeof(struct pfsync_state_key) * 2);
574 	kw = &key[PF_SK_WIRE];
575 	ks = &key[PF_SK_STACK];
576 #else
577 	kw = &sp->pfs_1301.key[PF_SK_WIRE];
578 	ks = &sp->pfs_1301.key[PF_SK_STACK];
579 #endif
580 
581 	if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) ||
582 	    PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) ||
583 	    kw->port[0] != ks->port[0] ||
584 	    kw->port[1] != ks->port[1]) {
585 		sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
586 		if (sks == NULL)
587 			goto cleanup;
588 	} else
589 		sks = skw;
590 
591 	/* allocate memory for scrub info */
592 	if (pfsync_alloc_scrub_memory(&sp->pfs_1301.src, &st->src) ||
593 	    pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst))
594 		goto cleanup;
595 
596 	/* Copy to state key(s). */
597 	skw->addr[0] = kw->addr[0];
598 	skw->addr[1] = kw->addr[1];
599 	skw->port[0] = kw->port[0];
600 	skw->port[1] = kw->port[1];
601 	skw->proto = sp->pfs_1301.proto;
602 	skw->af = sp->pfs_1301.af;
603 	if (sks != skw) {
604 		sks->addr[0] = ks->addr[0];
605 		sks->addr[1] = ks->addr[1];
606 		sks->port[0] = ks->port[0];
607 		sks->port[1] = ks->port[1];
608 		sks->proto = sp->pfs_1301.proto;
609 		sks->af = sp->pfs_1301.af;
610 	}
611 
612 	/* copy to state */
613 	bcopy(&sp->pfs_1301.rt_addr, &st->rt_addr, sizeof(st->rt_addr));
614 	st->creation = (time_uptime - ntohl(sp->pfs_1301.creation)) * 1000;
615 	st->expire = pf_get_uptime();
616 	if (sp->pfs_1301.expire) {
617 		uint32_t timeout;
618 
619 		timeout = r->timeout[sp->pfs_1301.timeout];
620 		if (!timeout)
621 			timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout];
622 
623 		/* sp->expire may have been adaptively scaled by export. */
624 		st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000;
625 	}
626 
627 	st->direction = sp->pfs_1301.direction;
628 	st->act.log = sp->pfs_1301.log;
629 	st->timeout = sp->pfs_1301.timeout;
630 
631 	switch (msg_version) {
632 		case PFSYNC_MSG_VERSION_1301:
633 			st->state_flags = sp->pfs_1301.state_flags;
634 			/*
635 			 * In FreeBSD 13 pfsync lacks many attributes. Copy them
636 			 * from the rule if possible. If rule can't be matched
637 			 * clear any set options as we can't recover their
638 			 * parameters.
639 			*/
640 			if (r == &V_pf_default_rule) {
641 				st->state_flags &= ~PFSTATE_SETMASK;
642 			} else {
643 				/*
644 				 * Similar to pf_rule_to_actions(). This code
645 				 * won't set the actions properly if they come
646 				 * from multiple "match" rules as only rule
647 				 * creating the state is send over pfsync.
648 				 */
649 				st->act.qid = r->qid;
650 				st->act.pqid = r->pqid;
651 				st->act.rtableid = r->rtableid;
652 				if (r->scrub_flags & PFSTATE_SETTOS)
653 					st->act.set_tos = r->set_tos;
654 				st->act.min_ttl = r->min_ttl;
655 				st->act.max_mss = r->max_mss;
656 				st->state_flags |= (r->scrub_flags &
657 				    (PFSTATE_NODF|PFSTATE_RANDOMID|
658 				    PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|
659 				    PFSTATE_SETPRIO));
660 				if (r->dnpipe || r->dnrpipe) {
661 					if (r->free_flags & PFRULE_DN_IS_PIPE)
662 						st->state_flags |= PFSTATE_DN_IS_PIPE;
663 					else
664 						st->state_flags &= ~PFSTATE_DN_IS_PIPE;
665 				}
666 				st->act.dnpipe = r->dnpipe;
667 				st->act.dnrpipe = r->dnrpipe;
668 			}
669 			break;
670 		case PFSYNC_MSG_VERSION_1400:
671 			st->state_flags = ntohs(sp->pfs_1400.state_flags);
672 			st->act.qid = ntohs(sp->pfs_1400.qid);
673 			st->act.pqid = ntohs(sp->pfs_1400.pqid);
674 			st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe);
675 			st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe);
676 			st->act.rtableid = ntohl(sp->pfs_1400.rtableid);
677 			st->act.min_ttl = sp->pfs_1400.min_ttl;
678 			st->act.set_tos = sp->pfs_1400.set_tos;
679 			st->act.max_mss = ntohs(sp->pfs_1400.max_mss);
680 			st->act.set_prio[0] = sp->pfs_1400.set_prio[0];
681 			st->act.set_prio[1] = sp->pfs_1400.set_prio[1];
682 			st->rt = sp->pfs_1400.rt;
683 			if (st->rt && (st->rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname)) == NULL) {
684 				if (V_pf_status.debug >= PF_DEBUG_MISC)
685 					printf("%s: unknown route interface: %s\n",
686 					    __func__, sp->pfs_1400.rt_ifname);
687 				if (flags & PFSYNC_SI_IOCTL)
688 					error = EINVAL;
689 				else
690 					error = 0;
691 				goto cleanup_keys;
692 			}
693 			break;
694 		default:
695 			panic("%s: Unsupported pfsync_msg_version %d",
696 			    __func__, msg_version);
697 	}
698 
699 	st->id = sp->pfs_1301.id;
700 	st->creatorid = sp->pfs_1301.creatorid;
701 	pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src);
702 	pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
703 
704 	st->rule.ptr = r;
705 	st->nat_rule.ptr = NULL;
706 	st->anchor.ptr = NULL;
707 
708 	st->pfsync_time = time_uptime;
709 	st->sync_state = PFSYNC_S_NONE;
710 
711 	if (!(flags & PFSYNC_SI_IOCTL))
712 		st->state_flags |= PFSTATE_NOSYNC;
713 
714 	if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0)
715 		goto cleanup_state;
716 
717 	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
718 	counter_u64_add(r->states_cur, 1);
719 	counter_u64_add(r->states_tot, 1);
720 
721 	if (!(flags & PFSYNC_SI_IOCTL)) {
722 		st->state_flags &= ~PFSTATE_NOSYNC;
723 		if (st->state_flags & PFSTATE_ACK) {
724 			struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
725 			PFSYNC_BUCKET_LOCK(b);
726 			pfsync_q_ins(st, PFSYNC_S_IACK, true);
727 			PFSYNC_BUCKET_UNLOCK(b);
728 
729 			pfsync_push_all(sc);
730 		}
731 	}
732 	st->state_flags &= ~PFSTATE_ACK;
733 	PF_STATE_UNLOCK(st);
734 
735 	return (0);
736 
737 cleanup:
738 	error = ENOMEM;
739 cleanup_keys:
740 	if (skw == sks)
741 		sks = NULL;
742 	uma_zfree(V_pf_state_key_z, skw);
743 	uma_zfree(V_pf_state_key_z, sks);
744 
745 cleanup_state:	/* pf_state_insert() frees the state keys. */
746 	if (st) {
747 		st->timeout = PFTM_UNLINKED; /* appease an assert */
748 		pf_free_state(st);
749 	}
750 	return (error);
751 }
752 
753 #ifdef INET
754 static int
755 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
756 {
757 	struct pfsync_softc *sc = V_pfsyncif;
758 	struct mbuf *m = *mp;
759 	struct ip *ip = mtod(m, struct ip *);
760 	struct pfsync_header *ph;
761 	struct pfsync_subheader subh;
762 
763 	int offset, len, flags = 0;
764 	int rv;
765 	uint16_t count;
766 
767 	PF_RULES_RLOCK_TRACKER;
768 
769 	*mp = NULL;
770 	V_pfsyncstats.pfsyncs_ipackets++;
771 
772 	/* Verify that we have a sync interface configured. */
773 	if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
774 	    (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
775 		goto done;
776 
777 	/* verify that the packet came in on the right interface */
778 	if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
779 		V_pfsyncstats.pfsyncs_badif++;
780 		goto done;
781 	}
782 
783 	if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
784 	if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
785 	/* verify that the IP TTL is 255. */
786 	if (ip->ip_ttl != PFSYNC_DFLTTL) {
787 		V_pfsyncstats.pfsyncs_badttl++;
788 		goto done;
789 	}
790 
791 	offset = ip->ip_hl << 2;
792 	if (m->m_pkthdr.len < offset + sizeof(*ph)) {
793 		V_pfsyncstats.pfsyncs_hdrops++;
794 		goto done;
795 	}
796 
797 	if (offset + sizeof(*ph) > m->m_len) {
798 		if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
799 			V_pfsyncstats.pfsyncs_hdrops++;
800 			return (IPPROTO_DONE);
801 		}
802 		ip = mtod(m, struct ip *);
803 	}
804 	ph = (struct pfsync_header *)((char *)ip + offset);
805 
806 	/* verify the version */
807 	if (ph->version != PFSYNC_VERSION) {
808 		V_pfsyncstats.pfsyncs_badver++;
809 		goto done;
810 	}
811 
812 	len = ntohs(ph->len) + offset;
813 	if (m->m_pkthdr.len < len) {
814 		V_pfsyncstats.pfsyncs_badlen++;
815 		goto done;
816 	}
817 
818 	/*
819 	 * Trusting pf_chksum during packet processing, as well as seeking
820 	 * in interface name tree, require holding PF_RULES_RLOCK().
821 	 */
822 	PF_RULES_RLOCK();
823 	if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
824 		flags = PFSYNC_SI_CKSUM;
825 
826 	offset += sizeof(*ph);
827 	while (offset <= len - sizeof(subh)) {
828 		m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
829 		offset += sizeof(subh);
830 
831 		if (subh.action >= PFSYNC_ACT_MAX) {
832 			V_pfsyncstats.pfsyncs_badact++;
833 			PF_RULES_RUNLOCK();
834 			goto done;
835 		}
836 
837 		count = ntohs(subh.count);
838 		V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
839 		rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action);
840 		if (rv == -1) {
841 			PF_RULES_RUNLOCK();
842 			return (IPPROTO_DONE);
843 		}
844 
845 		offset += rv;
846 	}
847 	PF_RULES_RUNLOCK();
848 
849 done:
850 	m_freem(m);
851 	return (IPPROTO_DONE);
852 }
853 #endif
854 
855 #ifdef INET6
856 static int
857 pfsync6_input(struct mbuf **mp, int *offp __unused, int proto __unused)
858 {
859 	struct pfsync_softc *sc = V_pfsyncif;
860 	struct mbuf *m = *mp;
861 	struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
862 	struct pfsync_header *ph;
863 	struct pfsync_subheader subh;
864 
865 	int offset, len, flags = 0;
866 	int rv;
867 	uint16_t count;
868 
869 	PF_RULES_RLOCK_TRACKER;
870 
871 	*mp = NULL;
872 	V_pfsyncstats.pfsyncs_ipackets++;
873 
874 	/* Verify that we have a sync interface configured. */
875 	if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
876 	    (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
877 		goto done;
878 
879 	/* verify that the packet came in on the right interface */
880 	if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
881 		V_pfsyncstats.pfsyncs_badif++;
882 		goto done;
883 	}
884 
885 	if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
886 	if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
887 	/* verify that the IP TTL is 255. */
888 	if (ip6->ip6_hlim != PFSYNC_DFLTTL) {
889 		V_pfsyncstats.pfsyncs_badttl++;
890 		goto done;
891 	}
892 
893 
894 	offset = sizeof(*ip6);
895 	if (m->m_pkthdr.len < offset + sizeof(*ph)) {
896 		V_pfsyncstats.pfsyncs_hdrops++;
897 		goto done;
898 	}
899 
900 	if (offset + sizeof(*ph) > m->m_len) {
901 		if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
902 			V_pfsyncstats.pfsyncs_hdrops++;
903 			return (IPPROTO_DONE);
904 		}
905 		ip6 = mtod(m, struct ip6_hdr *);
906 	}
907 	ph = (struct pfsync_header *)((char *)ip6 + offset);
908 
909 	/* verify the version */
910 	if (ph->version != PFSYNC_VERSION) {
911 		V_pfsyncstats.pfsyncs_badver++;
912 		goto done;
913 	}
914 
915 	len = ntohs(ph->len) + offset;
916 	if (m->m_pkthdr.len < len) {
917 		V_pfsyncstats.pfsyncs_badlen++;
918 		goto done;
919 	}
920 
921 	/*
922 	 * Trusting pf_chksum during packet processing, as well as seeking
923 	 * in interface name tree, require holding PF_RULES_RLOCK().
924 	 */
925 	PF_RULES_RLOCK();
926 	if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
927 		flags = PFSYNC_SI_CKSUM;
928 
929 	offset += sizeof(*ph);
930 	while (offset <= len - sizeof(subh)) {
931 		m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
932 		offset += sizeof(subh);
933 
934 		if (subh.action >= PFSYNC_ACT_MAX) {
935 			V_pfsyncstats.pfsyncs_badact++;
936 			PF_RULES_RUNLOCK();
937 			goto done;
938 		}
939 
940 		count = ntohs(subh.count);
941 		V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
942 		rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action);
943 		if (rv == -1) {
944 			PF_RULES_RUNLOCK();
945 			return (IPPROTO_DONE);
946 		}
947 
948 		offset += rv;
949 	}
950 	PF_RULES_RUNLOCK();
951 
952 done:
953 	m_freem(m);
954 	return (IPPROTO_DONE);
955 }
956 #endif
957 
958 static int
959 pfsync_in_clr(struct mbuf *m, int offset, int count, int flags, int action)
960 {
961 	struct pfsync_clr *clr;
962 	struct mbuf *mp;
963 	int len = sizeof(*clr) * count;
964 	int i, offp;
965 	u_int32_t creatorid;
966 
967 	mp = m_pulldown(m, offset, len, &offp);
968 	if (mp == NULL) {
969 		V_pfsyncstats.pfsyncs_badlen++;
970 		return (-1);
971 	}
972 	clr = (struct pfsync_clr *)(mp->m_data + offp);
973 
974 	for (i = 0; i < count; i++) {
975 		creatorid = clr[i].creatorid;
976 
977 		if (clr[i].ifname[0] != '\0' &&
978 		    pfi_kkif_find(clr[i].ifname) == NULL)
979 			continue;
980 
981 		for (int i = 0; i <= pf_hashmask; i++) {
982 			struct pf_idhash *ih = &V_pf_idhash[i];
983 			struct pf_kstate *s;
984 relock:
985 			PF_HASHROW_LOCK(ih);
986 			LIST_FOREACH(s, &ih->states, entry) {
987 				if (s->creatorid == creatorid) {
988 					s->state_flags |= PFSTATE_NOSYNC;
989 					pf_unlink_state(s);
990 					goto relock;
991 				}
992 			}
993 			PF_HASHROW_UNLOCK(ih);
994 		}
995 	}
996 
997 	return (len);
998 }
999 
1000 static int
1001 pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action)
1002 {
1003 	struct mbuf *mp;
1004 	union pfsync_state_union *sa, *sp;
1005 	int i, offp, total_len, msg_version, msg_len;
1006 
1007 	switch (action) {
1008 		case PFSYNC_ACT_INS_1301:
1009 			msg_len = sizeof(struct pfsync_state_1301);
1010 			total_len = msg_len * count;
1011 			msg_version = PFSYNC_MSG_VERSION_1301;
1012 			break;
1013 		case PFSYNC_ACT_INS_1400:
1014 			msg_len = sizeof(struct pfsync_state_1400);
1015 			total_len = msg_len * count;
1016 			msg_version = PFSYNC_MSG_VERSION_1400;
1017 			break;
1018 		default:
1019 			V_pfsyncstats.pfsyncs_badact++;
1020 			return (-1);
1021 	}
1022 
1023 	mp = m_pulldown(m, offset, total_len, &offp);
1024 	if (mp == NULL) {
1025 		V_pfsyncstats.pfsyncs_badlen++;
1026 		return (-1);
1027 	}
1028 	sa = (union pfsync_state_union *)(mp->m_data + offp);
1029 
1030 	for (i = 0; i < count; i++) {
1031 		sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
1032 
1033 		/* Check for invalid values. */
1034 		if (sp->pfs_1301.timeout >= PFTM_MAX ||
1035 		    sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
1036 		    sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST ||
1037 		    sp->pfs_1301.direction > PF_OUT ||
1038 		    (sp->pfs_1301.af != AF_INET &&
1039 		    sp->pfs_1301.af != AF_INET6)) {
1040 			if (V_pf_status.debug >= PF_DEBUG_MISC)
1041 				printf("%s: invalid value\n", __func__);
1042 			V_pfsyncstats.pfsyncs_badval++;
1043 			continue;
1044 		}
1045 
1046 		if (pfsync_state_import(sp, flags, msg_version) == ENOMEM)
1047 			/* Drop out, but process the rest of the actions. */
1048 			break;
1049 	}
1050 
1051 	return (total_len);
1052 }
1053 
1054 static int
1055 pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action)
1056 {
1057 	struct pfsync_ins_ack *ia, *iaa;
1058 	struct pf_kstate *st;
1059 
1060 	struct mbuf *mp;
1061 	int len = count * sizeof(*ia);
1062 	int offp, i;
1063 
1064 	mp = m_pulldown(m, offset, len, &offp);
1065 	if (mp == NULL) {
1066 		V_pfsyncstats.pfsyncs_badlen++;
1067 		return (-1);
1068 	}
1069 	iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1070 
1071 	for (i = 0; i < count; i++) {
1072 		ia = &iaa[i];
1073 
1074 		st = pf_find_state_byid(ia->id, ia->creatorid);
1075 		if (st == NULL)
1076 			continue;
1077 
1078 		if (st->state_flags & PFSTATE_ACK) {
1079 			pfsync_undefer_state(st, 0);
1080 		}
1081 		PF_STATE_UNLOCK(st);
1082 	}
1083 	/*
1084 	 * XXX this is not yet implemented, but we know the size of the
1085 	 * message so we can skip it.
1086 	 */
1087 
1088 	return (count * sizeof(struct pfsync_ins_ack));
1089 }
1090 
1091 static int
1092 pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
1093     struct pfsync_state_peer *dst)
1094 {
1095 	int sync = 0;
1096 
1097 	PF_STATE_LOCK_ASSERT(st);
1098 
1099 	/*
1100 	 * The state should never go backwards except
1101 	 * for syn-proxy states.  Neither should the
1102 	 * sequence window slide backwards.
1103 	 */
1104 	if ((st->src.state > src->state &&
1105 	    (st->src.state < PF_TCPS_PROXY_SRC ||
1106 	    src->state >= PF_TCPS_PROXY_SRC)) ||
1107 
1108 	    (st->src.state == src->state &&
1109 	    SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
1110 		sync++;
1111 	else
1112 		pf_state_peer_ntoh(src, &st->src);
1113 
1114 	if ((st->dst.state > dst->state) ||
1115 
1116 	    (st->dst.state >= TCPS_SYN_SENT &&
1117 	    SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
1118 		sync++;
1119 	else
1120 		pf_state_peer_ntoh(dst, &st->dst);
1121 
1122 	return (sync);
1123 }
1124 
1125 static int
1126 pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action)
1127 {
1128 	struct pfsync_softc *sc = V_pfsyncif;
1129 	union pfsync_state_union *sa, *sp;
1130 	struct pf_kstate *st;
1131 	struct mbuf *mp;
1132 	int sync, offp, i, total_len, msg_len, msg_version;
1133 
1134 	switch (action) {
1135 		case PFSYNC_ACT_UPD_1301:
1136 			msg_len = sizeof(struct pfsync_state_1301);
1137 			total_len = msg_len * count;
1138 			msg_version = PFSYNC_MSG_VERSION_1301;
1139 			break;
1140 		case PFSYNC_ACT_UPD_1400:
1141 			msg_len = sizeof(struct pfsync_state_1400);
1142 			total_len = msg_len * count;
1143 			msg_version = PFSYNC_MSG_VERSION_1400;
1144 			break;
1145 		default:
1146 			V_pfsyncstats.pfsyncs_badact++;
1147 			return (-1);
1148 	}
1149 
1150 	mp = m_pulldown(m, offset, total_len, &offp);
1151 	if (mp == NULL) {
1152 		V_pfsyncstats.pfsyncs_badlen++;
1153 		return (-1);
1154 	}
1155 	sa = (union pfsync_state_union *)(mp->m_data + offp);
1156 
1157 	for (i = 0; i < count; i++) {
1158 		sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
1159 
1160 		/* check for invalid values */
1161 		if (sp->pfs_1301.timeout >= PFTM_MAX ||
1162 		    sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
1163 		    sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) {
1164 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1165 				printf("pfsync_input: PFSYNC_ACT_UPD: "
1166 				    "invalid value\n");
1167 			}
1168 			V_pfsyncstats.pfsyncs_badval++;
1169 			continue;
1170 		}
1171 
1172 		st = pf_find_state_byid(sp->pfs_1301.id, sp->pfs_1301.creatorid);
1173 		if (st == NULL) {
1174 			/* insert the update */
1175 			if (pfsync_state_import(sp, flags, msg_version))
1176 				V_pfsyncstats.pfsyncs_badstate++;
1177 			continue;
1178 		}
1179 
1180 		if (st->state_flags & PFSTATE_ACK) {
1181 			pfsync_undefer_state(st, 1);
1182 		}
1183 
1184 		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
1185 			sync = pfsync_upd_tcp(st, &sp->pfs_1301.src, &sp->pfs_1301.dst);
1186 		else {
1187 			sync = 0;
1188 
1189 			/*
1190 			 * Non-TCP protocol state machine always go
1191 			 * forwards
1192 			 */
1193 			if (st->src.state > sp->pfs_1301.src.state)
1194 				sync++;
1195 			else
1196 				pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src);
1197 			if (st->dst.state > sp->pfs_1301.dst.state)
1198 				sync++;
1199 			else
1200 				pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
1201 		}
1202 		if (sync < 2) {
1203 			pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst);
1204 			pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
1205 			st->expire = pf_get_uptime();
1206 			st->timeout = sp->pfs_1301.timeout;
1207 		}
1208 		st->pfsync_time = time_uptime;
1209 
1210 		if (sync) {
1211 			V_pfsyncstats.pfsyncs_stale++;
1212 
1213 			pfsync_update_state(st);
1214 			PF_STATE_UNLOCK(st);
1215 			pfsync_push_all(sc);
1216 			continue;
1217 		}
1218 		PF_STATE_UNLOCK(st);
1219 	}
1220 
1221 	return (total_len);
1222 }
1223 
1224 static int
1225 pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags, int action)
1226 {
1227 	struct pfsync_softc *sc = V_pfsyncif;
1228 	struct pfsync_upd_c *ua, *up;
1229 	struct pf_kstate *st;
1230 	int len = count * sizeof(*up);
1231 	int sync;
1232 	struct mbuf *mp;
1233 	int offp, i;
1234 
1235 	mp = m_pulldown(m, offset, len, &offp);
1236 	if (mp == NULL) {
1237 		V_pfsyncstats.pfsyncs_badlen++;
1238 		return (-1);
1239 	}
1240 	ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1241 
1242 	for (i = 0; i < count; i++) {
1243 		up = &ua[i];
1244 
1245 		/* check for invalid values */
1246 		if (up->timeout >= PFTM_MAX ||
1247 		    up->src.state > PF_TCPS_PROXY_DST ||
1248 		    up->dst.state > PF_TCPS_PROXY_DST) {
1249 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1250 				printf("pfsync_input: "
1251 				    "PFSYNC_ACT_UPD_C: "
1252 				    "invalid value\n");
1253 			}
1254 			V_pfsyncstats.pfsyncs_badval++;
1255 			continue;
1256 		}
1257 
1258 		st = pf_find_state_byid(up->id, up->creatorid);
1259 		if (st == NULL) {
1260 			/* We don't have this state. Ask for it. */
1261 			PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
1262 			pfsync_request_update(up->creatorid, up->id);
1263 			PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
1264 			continue;
1265 		}
1266 
1267 		if (st->state_flags & PFSTATE_ACK) {
1268 			pfsync_undefer_state(st, 1);
1269 		}
1270 
1271 		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
1272 			sync = pfsync_upd_tcp(st, &up->src, &up->dst);
1273 		else {
1274 			sync = 0;
1275 
1276 			/*
1277 			 * Non-TCP protocol state machine always go
1278 			 * forwards
1279 			 */
1280 			if (st->src.state > up->src.state)
1281 				sync++;
1282 			else
1283 				pf_state_peer_ntoh(&up->src, &st->src);
1284 			if (st->dst.state > up->dst.state)
1285 				sync++;
1286 			else
1287 				pf_state_peer_ntoh(&up->dst, &st->dst);
1288 		}
1289 		if (sync < 2) {
1290 			pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1291 			pf_state_peer_ntoh(&up->dst, &st->dst);
1292 			st->expire = pf_get_uptime();
1293 			st->timeout = up->timeout;
1294 		}
1295 		st->pfsync_time = time_uptime;
1296 
1297 		if (sync) {
1298 			V_pfsyncstats.pfsyncs_stale++;
1299 
1300 			pfsync_update_state(st);
1301 			PF_STATE_UNLOCK(st);
1302 			pfsync_push_all(sc);
1303 			continue;
1304 		}
1305 		PF_STATE_UNLOCK(st);
1306 	}
1307 
1308 	return (len);
1309 }
1310 
1311 static int
1312 pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags, int action)
1313 {
1314 	struct pfsync_upd_req *ur, *ura;
1315 	struct mbuf *mp;
1316 	int len = count * sizeof(*ur);
1317 	int i, offp;
1318 
1319 	struct pf_kstate *st;
1320 
1321 	mp = m_pulldown(m, offset, len, &offp);
1322 	if (mp == NULL) {
1323 		V_pfsyncstats.pfsyncs_badlen++;
1324 		return (-1);
1325 	}
1326 	ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1327 
1328 	for (i = 0; i < count; i++) {
1329 		ur = &ura[i];
1330 
1331 		if (ur->id == 0 && ur->creatorid == 0)
1332 			pfsync_bulk_start();
1333 		else {
1334 			st = pf_find_state_byid(ur->id, ur->creatorid);
1335 			if (st == NULL) {
1336 				V_pfsyncstats.pfsyncs_badstate++;
1337 				continue;
1338 			}
1339 			if (st->state_flags & PFSTATE_NOSYNC) {
1340 				PF_STATE_UNLOCK(st);
1341 				continue;
1342 			}
1343 
1344 			pfsync_update_state_req(st);
1345 			PF_STATE_UNLOCK(st);
1346 		}
1347 	}
1348 
1349 	return (len);
1350 }
1351 
1352 static int
1353 pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags, int action)
1354 {
1355 	struct mbuf *mp;
1356 	struct pfsync_del_c *sa, *sp;
1357 	struct pf_kstate *st;
1358 	int len = count * sizeof(*sp);
1359 	int offp, i;
1360 
1361 	mp = m_pulldown(m, offset, len, &offp);
1362 	if (mp == NULL) {
1363 		V_pfsyncstats.pfsyncs_badlen++;
1364 		return (-1);
1365 	}
1366 	sa = (struct pfsync_del_c *)(mp->m_data + offp);
1367 
1368 	for (i = 0; i < count; i++) {
1369 		sp = &sa[i];
1370 
1371 		st = pf_find_state_byid(sp->id, sp->creatorid);
1372 		if (st == NULL) {
1373 			V_pfsyncstats.pfsyncs_badstate++;
1374 			continue;
1375 		}
1376 
1377 		st->state_flags |= PFSTATE_NOSYNC;
1378 		pf_unlink_state(st);
1379 	}
1380 
1381 	return (len);
1382 }
1383 
1384 static int
1385 pfsync_in_bus(struct mbuf *m, int offset, int count, int flags, int action)
1386 {
1387 	struct pfsync_softc *sc = V_pfsyncif;
1388 	struct pfsync_bus *bus;
1389 	struct mbuf *mp;
1390 	int len = count * sizeof(*bus);
1391 	int offp;
1392 
1393 	PFSYNC_BLOCK(sc);
1394 
1395 	/* If we're not waiting for a bulk update, who cares. */
1396 	if (sc->sc_ureq_sent == 0) {
1397 		PFSYNC_BUNLOCK(sc);
1398 		return (len);
1399 	}
1400 
1401 	mp = m_pulldown(m, offset, len, &offp);
1402 	if (mp == NULL) {
1403 		PFSYNC_BUNLOCK(sc);
1404 		V_pfsyncstats.pfsyncs_badlen++;
1405 		return (-1);
1406 	}
1407 	bus = (struct pfsync_bus *)(mp->m_data + offp);
1408 
1409 	switch (bus->status) {
1410 	case PFSYNC_BUS_START:
1411 		callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1412 		    V_pf_limits[PF_LIMIT_STATES].limit /
1413 		    ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1414 		    sizeof(union pfsync_state_union)),
1415 		    pfsync_bulk_fail, sc);
1416 		if (V_pf_status.debug >= PF_DEBUG_MISC)
1417 			printf("pfsync: received bulk update start\n");
1418 		break;
1419 
1420 	case PFSYNC_BUS_END:
1421 		if (time_uptime - ntohl(bus->endtime) >=
1422 		    sc->sc_ureq_sent) {
1423 			/* that's it, we're happy */
1424 			sc->sc_ureq_sent = 0;
1425 			sc->sc_bulk_tries = 0;
1426 			callout_stop(&sc->sc_bulkfail_tmo);
1427 			if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1428 				(*carp_demote_adj_p)(-V_pfsync_carp_adj,
1429 				    "pfsync bulk done");
1430 			sc->sc_flags |= PFSYNCF_OK;
1431 			if (V_pf_status.debug >= PF_DEBUG_MISC)
1432 				printf("pfsync: received valid "
1433 				    "bulk update end\n");
1434 		} else {
1435 			if (V_pf_status.debug >= PF_DEBUG_MISC)
1436 				printf("pfsync: received invalid "
1437 				    "bulk update end: bad timestamp\n");
1438 		}
1439 		break;
1440 	}
1441 	PFSYNC_BUNLOCK(sc);
1442 
1443 	return (len);
1444 }
1445 
1446 static int
1447 pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags, int action)
1448 {
1449 	int len = count * sizeof(struct pfsync_tdb);
1450 
1451 #if defined(IPSEC)
1452 	struct pfsync_tdb *tp;
1453 	struct mbuf *mp;
1454 	int offp;
1455 	int i;
1456 	int s;
1457 
1458 	mp = m_pulldown(m, offset, len, &offp);
1459 	if (mp == NULL) {
1460 		V_pfsyncstats.pfsyncs_badlen++;
1461 		return (-1);
1462 	}
1463 	tp = (struct pfsync_tdb *)(mp->m_data + offp);
1464 
1465 	for (i = 0; i < count; i++)
1466 		pfsync_update_net_tdb(&tp[i]);
1467 #endif
1468 
1469 	return (len);
1470 }
1471 
1472 #if defined(IPSEC)
1473 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1474 static void
1475 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1476 {
1477 	struct tdb		*tdb;
1478 	int			 s;
1479 
1480 	/* check for invalid values */
1481 	if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1482 	    (pt->dst.sa.sa_family != AF_INET &&
1483 	    pt->dst.sa.sa_family != AF_INET6))
1484 		goto bad;
1485 
1486 	tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1487 	if (tdb) {
1488 		pt->rpl = ntohl(pt->rpl);
1489 		pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
1490 
1491 		/* Neither replay nor byte counter should ever decrease. */
1492 		if (pt->rpl < tdb->tdb_rpl ||
1493 		    pt->cur_bytes < tdb->tdb_cur_bytes) {
1494 			goto bad;
1495 		}
1496 
1497 		tdb->tdb_rpl = pt->rpl;
1498 		tdb->tdb_cur_bytes = pt->cur_bytes;
1499 	}
1500 	return;
1501 
1502 bad:
1503 	if (V_pf_status.debug >= PF_DEBUG_MISC)
1504 		printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1505 		    "invalid value\n");
1506 	V_pfsyncstats.pfsyncs_badstate++;
1507 	return;
1508 }
1509 #endif
1510 
1511 static int
1512 pfsync_in_eof(struct mbuf *m, int offset, int count, int flags, int action)
1513 {
1514 	/* check if we are at the right place in the packet */
1515 	if (offset != m->m_pkthdr.len)
1516 		V_pfsyncstats.pfsyncs_badlen++;
1517 
1518 	/* we're done. free and let the caller return */
1519 	m_freem(m);
1520 	return (-1);
1521 }
1522 
1523 static int
1524 pfsync_in_error(struct mbuf *m, int offset, int count, int flags, int action)
1525 {
1526 	V_pfsyncstats.pfsyncs_badact++;
1527 
1528 	m_freem(m);
1529 	return (-1);
1530 }
1531 
1532 static int
1533 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
1534 	struct route *rt)
1535 {
1536 	m_freem(m);
1537 	return (0);
1538 }
1539 
1540 /* ARGSUSED */
1541 static int
1542 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1543 {
1544 	struct pfsync_softc *sc = ifp->if_softc;
1545 	struct ifreq *ifr = (struct ifreq *)data;
1546 	struct pfsyncreq pfsyncr;
1547 	size_t nvbuflen;
1548 	int error;
1549 	int c;
1550 
1551 	switch (cmd) {
1552 	case SIOCSIFFLAGS:
1553 		PFSYNC_LOCK(sc);
1554 		if (ifp->if_flags & IFF_UP) {
1555 			ifp->if_drv_flags |= IFF_DRV_RUNNING;
1556 			PFSYNC_UNLOCK(sc);
1557 			pfsync_pointers_init();
1558 		} else {
1559 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1560 			PFSYNC_UNLOCK(sc);
1561 			pfsync_pointers_uninit();
1562 		}
1563 		break;
1564 	case SIOCSIFMTU:
1565 		if (!sc->sc_sync_if ||
1566 		    ifr->ifr_mtu <= PFSYNC_MINPKT ||
1567 		    ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1568 			return (EINVAL);
1569 		if (ifr->ifr_mtu < ifp->if_mtu) {
1570 			for (c = 0; c < pfsync_buckets; c++) {
1571 				PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
1572 				if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT)
1573 					pfsync_sendout(1, c);
1574 				PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
1575 			}
1576 		}
1577 		ifp->if_mtu = ifr->ifr_mtu;
1578 		break;
1579 	case SIOCGETPFSYNC:
1580 		bzero(&pfsyncr, sizeof(pfsyncr));
1581 		PFSYNC_LOCK(sc);
1582 		if (sc->sc_sync_if) {
1583 			strlcpy(pfsyncr.pfsyncr_syncdev,
1584 			    sc->sc_sync_if->if_xname, IFNAMSIZ);
1585 		}
1586 		pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
1587 		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1588 		pfsyncr.pfsyncr_defer = sc->sc_flags;
1589 		PFSYNC_UNLOCK(sc);
1590 		return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
1591 		    sizeof(pfsyncr)));
1592 
1593 	case SIOCGETPFSYNCNV:
1594 	    {
1595 		nvlist_t *nvl_syncpeer;
1596 		nvlist_t *nvl = nvlist_create(0);
1597 
1598 		if (nvl == NULL)
1599 			return (ENOMEM);
1600 
1601 		if (sc->sc_sync_if)
1602 			nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname);
1603 		nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates);
1604 		nvlist_add_number(nvl, "flags", sc->sc_flags);
1605 		nvlist_add_number(nvl, "version", sc->sc_version);
1606 		if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL)
1607 			nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer);
1608 
1609 		void *packed = NULL;
1610 		packed = nvlist_pack(nvl, &nvbuflen);
1611 		if (packed == NULL) {
1612 			free(packed, M_NVLIST);
1613 			nvlist_destroy(nvl);
1614 			return (ENOMEM);
1615 		}
1616 
1617 		if (nvbuflen > ifr->ifr_cap_nv.buf_length) {
1618 			ifr->ifr_cap_nv.length = nvbuflen;
1619 			ifr->ifr_cap_nv.buffer = NULL;
1620 			free(packed, M_NVLIST);
1621 			nvlist_destroy(nvl);
1622 			return (EFBIG);
1623 		}
1624 
1625 		ifr->ifr_cap_nv.length = nvbuflen;
1626 		error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen);
1627 
1628 		nvlist_destroy(nvl);
1629 		nvlist_destroy(nvl_syncpeer);
1630 		free(packed, M_NVLIST);
1631 		break;
1632 	    }
1633 
1634 	case SIOCSETPFSYNC:
1635 	    {
1636 		struct pfsync_kstatus status;
1637 
1638 		if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1639 			return (error);
1640 		if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr,
1641 		    sizeof(pfsyncr))))
1642 			return (error);
1643 
1644 		memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
1645 		pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status);
1646 
1647 		error = pfsync_kstatus_to_softc(&status, sc);
1648 		return (error);
1649 	    }
1650 	case SIOCSETPFSYNCNV:
1651 	    {
1652 		struct pfsync_kstatus status;
1653 		void *data;
1654 		nvlist_t *nvl;
1655 
1656 		if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1657 			return (error);
1658 		if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE)
1659 			return (EINVAL);
1660 
1661 		data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK);
1662 
1663 		if ((error = copyin(ifr->ifr_cap_nv.buffer, data,
1664 		    ifr->ifr_cap_nv.length)) != 0) {
1665 			free(data, M_TEMP);
1666 			return (error);
1667 		}
1668 
1669 		if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) {
1670 			free(data, M_TEMP);
1671 			return (EINVAL);
1672 		}
1673 
1674 		memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
1675 		pfsync_nvstatus_to_kstatus(nvl, &status);
1676 
1677 		nvlist_destroy(nvl);
1678 		free(data, M_TEMP);
1679 
1680 		error = pfsync_kstatus_to_softc(&status, sc);
1681 		return (error);
1682 	    }
1683 	default:
1684 		return (ENOTTY);
1685 	}
1686 
1687 	return (0);
1688 }
1689 
1690 static void
1691 pfsync_out_state_1301(struct pf_kstate *st, void *buf)
1692 {
1693 	union pfsync_state_union *sp = buf;
1694 
1695 	pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1301);
1696 }
1697 
1698 static void
1699 pfsync_out_state_1400(struct pf_kstate *st, void *buf)
1700 {
1701 	union pfsync_state_union *sp = buf;
1702 
1703 	pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1400);
1704 }
1705 
1706 static void
1707 pfsync_out_iack(struct pf_kstate *st, void *buf)
1708 {
1709 	struct pfsync_ins_ack *iack = buf;
1710 
1711 	iack->id = st->id;
1712 	iack->creatorid = st->creatorid;
1713 }
1714 
1715 static void
1716 pfsync_out_upd_c(struct pf_kstate *st, void *buf)
1717 {
1718 	struct pfsync_upd_c *up = buf;
1719 
1720 	bzero(up, sizeof(*up));
1721 	up->id = st->id;
1722 	pf_state_peer_hton(&st->src, &up->src);
1723 	pf_state_peer_hton(&st->dst, &up->dst);
1724 	up->creatorid = st->creatorid;
1725 	up->timeout = st->timeout;
1726 }
1727 
1728 static void
1729 pfsync_out_del_c(struct pf_kstate *st, void *buf)
1730 {
1731 	struct pfsync_del_c *dp = buf;
1732 
1733 	dp->id = st->id;
1734 	dp->creatorid = st->creatorid;
1735 	st->state_flags |= PFSTATE_NOSYNC;
1736 }
1737 
1738 static void
1739 pfsync_drop(struct pfsync_softc *sc)
1740 {
1741 	struct pf_kstate *st, *next;
1742 	struct pfsync_upd_req_item *ur;
1743 	struct pfsync_bucket *b;
1744 	int c;
1745 	enum pfsync_q_id q;
1746 
1747 	for (c = 0; c < pfsync_buckets; c++) {
1748 		b = &sc->sc_buckets[c];
1749 		for (q = 0; q < PFSYNC_Q_COUNT; q++) {
1750 			if (TAILQ_EMPTY(&b->b_qs[q]))
1751 				continue;
1752 
1753 			TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) {
1754 				KASSERT(st->sync_state == pfsync_qid_sstate[q],
1755 					("%s: st->sync_state == q",
1756 						__func__));
1757 				st->sync_state = PFSYNC_S_NONE;
1758 				pf_release_state(st);
1759 			}
1760 			TAILQ_INIT(&b->b_qs[q]);
1761 		}
1762 
1763 		while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1764 			TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1765 			free(ur, M_PFSYNC);
1766 		}
1767 
1768 		b->b_len = PFSYNC_MINPKT;
1769 		free(b->b_plus, M_PFSYNC);
1770 		b->b_plus = NULL;
1771 		b->b_pluslen = 0;
1772 	}
1773 }
1774 
1775 static void
1776 pfsync_sendout(int schedswi, int c)
1777 {
1778 	struct pfsync_softc *sc = V_pfsyncif;
1779 	struct ifnet *ifp = sc->sc_ifp;
1780 	struct mbuf *m;
1781 	struct pfsync_header *ph;
1782 	struct pfsync_subheader *subh;
1783 	struct pf_kstate *st, *st_next;
1784 	struct pfsync_upd_req_item *ur;
1785 	struct pfsync_bucket *b = &sc->sc_buckets[c];
1786 	size_t len;
1787 	int aflen, offset, count = 0;
1788 	enum pfsync_q_id q;
1789 
1790 	KASSERT(sc != NULL, ("%s: null sc", __func__));
1791 	KASSERT(b->b_len > PFSYNC_MINPKT,
1792 	    ("%s: sc_len %zu", __func__, b->b_len));
1793 	PFSYNC_BUCKET_LOCK_ASSERT(b);
1794 
1795 	if (!bpf_peers_present(ifp->if_bpf) && sc->sc_sync_if == NULL) {
1796 		pfsync_drop(sc);
1797 		return;
1798 	}
1799 
1800 	m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR);
1801 	if (m == NULL) {
1802 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1803 		V_pfsyncstats.pfsyncs_onomem++;
1804 		return;
1805 	}
1806 	m->m_data += max_linkhdr;
1807 	bzero(m->m_data, b->b_len);
1808 
1809 	len = b->b_len;
1810 
1811 	/* build the ip header */
1812 	switch (sc->sc_sync_peer.ss_family) {
1813 #ifdef INET
1814 	case AF_INET:
1815 	    {
1816 		struct ip *ip;
1817 
1818 		ip = mtod(m, struct ip *);
1819 		bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip));
1820 		aflen = offset = sizeof(*ip);
1821 
1822 		len -= sizeof(union inet_template) - sizeof(struct ip);
1823 		ip->ip_len = htons(len);
1824 		ip_fillid(ip);
1825 		break;
1826 	    }
1827 #endif
1828 #ifdef INET6
1829 	case AF_INET6:
1830 		{
1831 		struct ip6_hdr *ip6;
1832 
1833 		ip6 = mtod(m, struct ip6_hdr *);
1834 		bcopy(&sc->sc_template.ipv6, ip6, sizeof(*ip6));
1835 		aflen = offset = sizeof(*ip6);
1836 
1837 		len -= sizeof(union inet_template) - sizeof(struct ip6_hdr);
1838 		ip6->ip6_plen = htons(len);
1839 		break;
1840 		}
1841 #endif
1842 	default:
1843 		m_freem(m);
1844 		return;
1845 	}
1846 	m->m_len = m->m_pkthdr.len = len;
1847 
1848 	/* build the pfsync header */
1849 	ph = (struct pfsync_header *)(m->m_data + offset);
1850 	offset += sizeof(*ph);
1851 
1852 	ph->version = PFSYNC_VERSION;
1853 	ph->len = htons(len - aflen);
1854 	bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1855 
1856 	/* walk the queues */
1857 	for (q = 0; q < PFSYNC_Q_COUNT; q++) {
1858 		if (TAILQ_EMPTY(&b->b_qs[q]))
1859 			continue;
1860 
1861 		subh = (struct pfsync_subheader *)(m->m_data + offset);
1862 		offset += sizeof(*subh);
1863 
1864 		count = 0;
1865 		TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) {
1866 			KASSERT(st->sync_state == pfsync_qid_sstate[q],
1867 				("%s: st->sync_state == q",
1868 					__func__));
1869 			/*
1870 			 * XXXGL: some of write methods do unlocked reads
1871 			 * of state data :(
1872 			 */
1873 			pfsync_qs[q].write(st, m->m_data + offset);
1874 			offset += pfsync_qs[q].len;
1875 			st->sync_state = PFSYNC_S_NONE;
1876 			pf_release_state(st);
1877 			count++;
1878 		}
1879 		TAILQ_INIT(&b->b_qs[q]);
1880 
1881 		subh->action = pfsync_qs[q].action;
1882 		subh->count = htons(count);
1883 		V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
1884 	}
1885 
1886 	if (!TAILQ_EMPTY(&b->b_upd_req_list)) {
1887 		subh = (struct pfsync_subheader *)(m->m_data + offset);
1888 		offset += sizeof(*subh);
1889 
1890 		count = 0;
1891 		while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1892 			TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1893 
1894 			bcopy(&ur->ur_msg, m->m_data + offset,
1895 			    sizeof(ur->ur_msg));
1896 			offset += sizeof(ur->ur_msg);
1897 			free(ur, M_PFSYNC);
1898 			count++;
1899 		}
1900 
1901 		subh->action = PFSYNC_ACT_UPD_REQ;
1902 		subh->count = htons(count);
1903 		V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
1904 	}
1905 
1906 	/* has someone built a custom region for us to add? */
1907 	if (b->b_plus != NULL) {
1908 		bcopy(b->b_plus, m->m_data + offset, b->b_pluslen);
1909 		offset += b->b_pluslen;
1910 
1911 		free(b->b_plus, M_PFSYNC);
1912 		b->b_plus = NULL;
1913 		b->b_pluslen = 0;
1914 	}
1915 
1916 	subh = (struct pfsync_subheader *)(m->m_data + offset);
1917 	offset += sizeof(*subh);
1918 
1919 	subh->action = PFSYNC_ACT_EOF;
1920 	subh->count = htons(1);
1921 	V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
1922 
1923 	/* we're done, let's put it on the wire */
1924 	if (bpf_peers_present(ifp->if_bpf)) {
1925 		m->m_data += aflen;
1926 		m->m_len = m->m_pkthdr.len = len - aflen;
1927 		bpf_mtap(ifp->if_bpf, m);
1928 		m->m_data -= aflen;
1929 		m->m_len = m->m_pkthdr.len = len;
1930 	}
1931 
1932 	if (sc->sc_sync_if == NULL) {
1933 		b->b_len = PFSYNC_MINPKT;
1934 		m_freem(m);
1935 		return;
1936 	}
1937 
1938 	if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1939 	if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1940 	b->b_len = PFSYNC_MINPKT;
1941 
1942 	if (!_IF_QFULL(&b->b_snd))
1943 		_IF_ENQUEUE(&b->b_snd, m);
1944 	else {
1945 		m_freem(m);
1946 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
1947 	}
1948 	if (schedswi)
1949 		swi_sched(V_pfsync_swi_cookie, 0);
1950 }
1951 
1952 static void
1953 pfsync_insert_state(struct pf_kstate *st)
1954 {
1955 	struct pfsync_softc *sc = V_pfsyncif;
1956 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1957 
1958 	if (st->state_flags & PFSTATE_NOSYNC)
1959 		return;
1960 
1961 	if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
1962 	    st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1963 		st->state_flags |= PFSTATE_NOSYNC;
1964 		return;
1965 	}
1966 
1967 	KASSERT(st->sync_state == PFSYNC_S_NONE,
1968 		("%s: st->sync_state %u", __func__, st->sync_state));
1969 
1970 	PFSYNC_BUCKET_LOCK(b);
1971 	if (b->b_len == PFSYNC_MINPKT)
1972 		callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
1973 
1974 	pfsync_q_ins(st, PFSYNC_S_INS, true);
1975 	PFSYNC_BUCKET_UNLOCK(b);
1976 
1977 	st->sync_updates = 0;
1978 }
1979 
1980 static int
1981 pfsync_defer(struct pf_kstate *st, struct mbuf *m)
1982 {
1983 	struct pfsync_softc *sc = V_pfsyncif;
1984 	struct pfsync_deferral *pd;
1985 	struct pfsync_bucket *b;
1986 
1987 	if (m->m_flags & (M_BCAST|M_MCAST))
1988 		return (0);
1989 
1990 	if (sc == NULL)
1991 		return (0);
1992 
1993 	b = pfsync_get_bucket(sc, st);
1994 
1995 	PFSYNC_LOCK(sc);
1996 
1997 	if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) ||
1998 	    !(sc->sc_flags & PFSYNCF_DEFER)) {
1999 		PFSYNC_UNLOCK(sc);
2000 		return (0);
2001 	}
2002 
2003 	PFSYNC_BUCKET_LOCK(b);
2004 	PFSYNC_UNLOCK(sc);
2005 
2006 	if (b->b_deferred >= 128)
2007 		pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0);
2008 
2009 	pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
2010 	if (pd == NULL) {
2011 		PFSYNC_BUCKET_UNLOCK(b);
2012 		return (0);
2013 	}
2014 	b->b_deferred++;
2015 
2016 	m->m_flags |= M_SKIP_FIREWALL;
2017 	st->state_flags |= PFSTATE_ACK;
2018 
2019 	pd->pd_sc = sc;
2020 	pd->pd_st = st;
2021 	pf_ref_state(st);
2022 	pd->pd_m = m;
2023 
2024 	TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry);
2025 	callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED);
2026 	callout_reset(&pd->pd_tmo, (V_pfsync_defer_timeout * hz) / 1000,
2027 	    pfsync_defer_tmo, pd);
2028 
2029 	pfsync_push(b);
2030 	PFSYNC_BUCKET_UNLOCK(b);
2031 
2032 	return (1);
2033 }
2034 
2035 static void
2036 pfsync_undefer(struct pfsync_deferral *pd, int drop)
2037 {
2038 	struct pfsync_softc *sc = pd->pd_sc;
2039 	struct mbuf *m = pd->pd_m;
2040 	struct pf_kstate *st = pd->pd_st;
2041 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2042 
2043 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2044 
2045 	TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
2046 	b->b_deferred--;
2047 	pd->pd_st->state_flags &= ~PFSTATE_ACK;	/* XXX: locking! */
2048 	free(pd, M_PFSYNC);
2049 	pf_release_state(st);
2050 
2051 	if (drop)
2052 		m_freem(m);
2053 	else {
2054 		_IF_ENQUEUE(&b->b_snd, m);
2055 		pfsync_push(b);
2056 	}
2057 }
2058 
2059 static void
2060 pfsync_defer_tmo(void *arg)
2061 {
2062 	struct epoch_tracker et;
2063 	struct pfsync_deferral *pd = arg;
2064 	struct pfsync_softc *sc = pd->pd_sc;
2065 	struct mbuf *m = pd->pd_m;
2066 	struct pf_kstate *st = pd->pd_st;
2067 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2068 
2069 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2070 
2071 	TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
2072 	b->b_deferred--;
2073 	pd->pd_st->state_flags &= ~PFSTATE_ACK;	/* XXX: locking! */
2074 	PFSYNC_BUCKET_UNLOCK(b);
2075 	free(pd, M_PFSYNC);
2076 
2077 	if (sc->sc_sync_if == NULL) {
2078 		pf_release_state(st);
2079 		m_freem(m);
2080 		return;
2081 	}
2082 
2083 	NET_EPOCH_ENTER(et);
2084 	CURVNET_SET(sc->sc_sync_if->if_vnet);
2085 
2086 	pfsync_tx(sc, m);
2087 
2088 	pf_release_state(st);
2089 
2090 	CURVNET_RESTORE();
2091 	NET_EPOCH_EXIT(et);
2092 }
2093 
2094 static void
2095 pfsync_undefer_state_locked(struct pf_kstate *st, int drop)
2096 {
2097 	struct pfsync_softc *sc = V_pfsyncif;
2098 	struct pfsync_deferral *pd;
2099 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2100 
2101 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2102 
2103 	TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) {
2104 		 if (pd->pd_st == st) {
2105 			if (callout_stop(&pd->pd_tmo) > 0)
2106 				pfsync_undefer(pd, drop);
2107 
2108 			return;
2109 		}
2110 	}
2111 
2112 	panic("%s: unable to find deferred state", __func__);
2113 }
2114 
2115 static void
2116 pfsync_undefer_state(struct pf_kstate *st, int drop)
2117 {
2118 	struct pfsync_softc *sc = V_pfsyncif;
2119 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2120 
2121 	PFSYNC_BUCKET_LOCK(b);
2122 	pfsync_undefer_state_locked(st, drop);
2123 	PFSYNC_BUCKET_UNLOCK(b);
2124 }
2125 
2126 static struct pfsync_bucket*
2127 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st)
2128 {
2129 	int c = PF_IDHASH(st) % pfsync_buckets;
2130 	return &sc->sc_buckets[c];
2131 }
2132 
2133 static void
2134 pfsync_update_state(struct pf_kstate *st)
2135 {
2136 	struct pfsync_softc *sc = V_pfsyncif;
2137 	bool sync = false, ref = true;
2138 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2139 
2140 	PF_STATE_LOCK_ASSERT(st);
2141 	PFSYNC_BUCKET_LOCK(b);
2142 
2143 	if (st->state_flags & PFSTATE_ACK)
2144 		pfsync_undefer_state_locked(st, 0);
2145 	if (st->state_flags & PFSTATE_NOSYNC) {
2146 		if (st->sync_state != PFSYNC_S_NONE)
2147 			pfsync_q_del(st, true, b);
2148 		PFSYNC_BUCKET_UNLOCK(b);
2149 		return;
2150 	}
2151 
2152 	if (b->b_len == PFSYNC_MINPKT)
2153 		callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
2154 
2155 	switch (st->sync_state) {
2156 	case PFSYNC_S_UPD_C:
2157 	case PFSYNC_S_UPD:
2158 	case PFSYNC_S_INS:
2159 		/* we're already handling it */
2160 
2161 		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
2162 			st->sync_updates++;
2163 			if (st->sync_updates >= sc->sc_maxupdates)
2164 				sync = true;
2165 		}
2166 		break;
2167 
2168 	case PFSYNC_S_IACK:
2169 		pfsync_q_del(st, false, b);
2170 		ref = false;
2171 		/* FALLTHROUGH */
2172 
2173 	case PFSYNC_S_NONE:
2174 		pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
2175 		st->sync_updates = 0;
2176 		break;
2177 
2178 	default:
2179 		panic("%s: unexpected sync state %d", __func__, st->sync_state);
2180 	}
2181 
2182 	if (sync || (time_uptime - st->pfsync_time) < 2)
2183 		pfsync_push(b);
2184 
2185 	PFSYNC_BUCKET_UNLOCK(b);
2186 }
2187 
2188 static void
2189 pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2190 {
2191 	struct pfsync_softc *sc = V_pfsyncif;
2192 	struct pfsync_bucket *b = &sc->sc_buckets[0];
2193 	struct pfsync_upd_req_item *item;
2194 	size_t nlen = sizeof(struct pfsync_upd_req);
2195 
2196 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2197 
2198 	/*
2199 	 * This code does a bit to prevent multiple update requests for the
2200 	 * same state being generated. It searches current subheader queue,
2201 	 * but it doesn't lookup into queue of already packed datagrams.
2202 	 */
2203 	TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry)
2204 		if (item->ur_msg.id == id &&
2205 		    item->ur_msg.creatorid == creatorid)
2206 			return;
2207 
2208 	item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
2209 	if (item == NULL)
2210 		return; /* XXX stats */
2211 
2212 	item->ur_msg.id = id;
2213 	item->ur_msg.creatorid = creatorid;
2214 
2215 	if (TAILQ_EMPTY(&b->b_upd_req_list))
2216 		nlen += sizeof(struct pfsync_subheader);
2217 
2218 	if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
2219 		pfsync_sendout(0, 0);
2220 
2221 		nlen = sizeof(struct pfsync_subheader) +
2222 		    sizeof(struct pfsync_upd_req);
2223 	}
2224 
2225 	TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry);
2226 	b->b_len += nlen;
2227 
2228 	pfsync_push(b);
2229 }
2230 
2231 static bool
2232 pfsync_update_state_req(struct pf_kstate *st)
2233 {
2234 	struct pfsync_softc *sc = V_pfsyncif;
2235 	bool ref = true, full = false;
2236 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2237 
2238 	PF_STATE_LOCK_ASSERT(st);
2239 	PFSYNC_BUCKET_LOCK(b);
2240 
2241 	if (st->state_flags & PFSTATE_NOSYNC) {
2242 		if (st->sync_state != PFSYNC_S_NONE)
2243 			pfsync_q_del(st, true, b);
2244 		PFSYNC_BUCKET_UNLOCK(b);
2245 		return (full);
2246 	}
2247 
2248 	switch (st->sync_state) {
2249 	case PFSYNC_S_UPD_C:
2250 	case PFSYNC_S_IACK:
2251 		pfsync_q_del(st, false, b);
2252 		ref = false;
2253 		/* FALLTHROUGH */
2254 
2255 	case PFSYNC_S_NONE:
2256 		pfsync_q_ins(st, PFSYNC_S_UPD, ref);
2257 		pfsync_push(b);
2258 		break;
2259 
2260 	case PFSYNC_S_INS:
2261 	case PFSYNC_S_UPD:
2262 	case PFSYNC_S_DEL_C:
2263 		/* we're already handling it */
2264 		break;
2265 
2266 	default:
2267 		panic("%s: unexpected sync state %d", __func__, st->sync_state);
2268 	}
2269 
2270 	if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(union pfsync_state_union))
2271 		full = true;
2272 
2273 	PFSYNC_BUCKET_UNLOCK(b);
2274 
2275 	return (full);
2276 }
2277 
2278 static void
2279 pfsync_delete_state(struct pf_kstate *st)
2280 {
2281 	struct pfsync_softc *sc = V_pfsyncif;
2282 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2283 	bool ref = true;
2284 
2285 	PFSYNC_BUCKET_LOCK(b);
2286 	if (st->state_flags & PFSTATE_ACK)
2287 		pfsync_undefer_state_locked(st, 1);
2288 	if (st->state_flags & PFSTATE_NOSYNC) {
2289 		if (st->sync_state != PFSYNC_S_NONE)
2290 			pfsync_q_del(st, true, b);
2291 		PFSYNC_BUCKET_UNLOCK(b);
2292 		return;
2293 	}
2294 
2295 	if (b->b_len == PFSYNC_MINPKT)
2296 		callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
2297 
2298 	switch (st->sync_state) {
2299 	case PFSYNC_S_INS:
2300 		/* We never got to tell the world so just forget about it. */
2301 		pfsync_q_del(st, true, b);
2302 		break;
2303 
2304 	case PFSYNC_S_UPD_C:
2305 	case PFSYNC_S_UPD:
2306 	case PFSYNC_S_IACK:
2307 		pfsync_q_del(st, false, b);
2308 		ref = false;
2309 		/* FALLTHROUGH */
2310 
2311 	case PFSYNC_S_NONE:
2312 		pfsync_q_ins(st, PFSYNC_S_DEL_C, ref);
2313 		break;
2314 
2315 	default:
2316 		panic("%s: unexpected sync state %d", __func__, st->sync_state);
2317 	}
2318 
2319 	PFSYNC_BUCKET_UNLOCK(b);
2320 }
2321 
2322 static void
2323 pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2324 {
2325 	struct {
2326 		struct pfsync_subheader subh;
2327 		struct pfsync_clr clr;
2328 	} __packed r;
2329 
2330 	bzero(&r, sizeof(r));
2331 
2332 	r.subh.action = PFSYNC_ACT_CLR;
2333 	r.subh.count = htons(1);
2334 	V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
2335 
2336 	strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2337 	r.clr.creatorid = creatorid;
2338 
2339 	pfsync_send_plus(&r, sizeof(r));
2340 }
2341 
2342 static enum pfsync_q_id
2343 pfsync_sstate_to_qid(u_int8_t sync_state)
2344 {
2345 	struct pfsync_softc *sc = V_pfsyncif;
2346 
2347 	switch (sync_state) {
2348 		case PFSYNC_S_INS:
2349 			switch (sc->sc_version) {
2350 				case PFSYNC_MSG_VERSION_1301:
2351 					return PFSYNC_Q_INS_1301;
2352 				case PFSYNC_MSG_VERSION_1400:
2353 					return PFSYNC_Q_INS_1400;
2354 			}
2355 			break;
2356 		case PFSYNC_S_IACK:
2357 			return PFSYNC_Q_IACK;
2358 		case PFSYNC_S_UPD:
2359 			switch (sc->sc_version) {
2360 				case PFSYNC_MSG_VERSION_1301:
2361 					return PFSYNC_Q_UPD_1301;
2362 				case PFSYNC_MSG_VERSION_1400:
2363 					return PFSYNC_Q_UPD_1400;
2364 			}
2365 			break;
2366 		case PFSYNC_S_UPD_C:
2367 			return PFSYNC_Q_UPD_C;
2368 		case PFSYNC_S_DEL_C:
2369 			return PFSYNC_Q_DEL_C;
2370 		default:
2371 			panic("%s: Unsupported st->sync_state 0x%02x",
2372 			__func__, sync_state);
2373 	}
2374 
2375 	panic("%s: Unsupported pfsync_msg_version %d",
2376 	    __func__, sc->sc_version);
2377 }
2378 
2379 static void
2380 pfsync_q_ins(struct pf_kstate *st, int sync_state, bool ref)
2381 {
2382 	enum pfsync_q_id q = pfsync_sstate_to_qid(sync_state);
2383 	struct pfsync_softc *sc = V_pfsyncif;
2384 	size_t nlen = pfsync_qs[q].len;
2385 	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2386 
2387 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2388 
2389 	KASSERT(st->sync_state == PFSYNC_S_NONE,
2390 		("%s: st->sync_state %u", __func__, st->sync_state));
2391 	KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
2392 	    b->b_len));
2393 
2394 	if (TAILQ_EMPTY(&b->b_qs[q]))
2395 		nlen += sizeof(struct pfsync_subheader);
2396 
2397 	if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
2398 		pfsync_sendout(1, b->b_id);
2399 
2400 		nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2401 	}
2402 
2403 	b->b_len += nlen;
2404 	TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list);
2405 	st->sync_state = pfsync_qid_sstate[q];
2406 	if (ref)
2407 		pf_ref_state(st);
2408 }
2409 
2410 static void
2411 pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b)
2412 {
2413 	enum pfsync_q_id q;
2414 
2415 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2416 	KASSERT(st->sync_state != PFSYNC_S_NONE,
2417 		("%s: st->sync_state != PFSYNC_S_NONE", __func__));
2418 
2419 	q =  pfsync_sstate_to_qid(st->sync_state);
2420 	b->b_len -= pfsync_qs[q].len;
2421 	TAILQ_REMOVE(&b->b_qs[q], st, sync_list);
2422 	st->sync_state = PFSYNC_S_NONE;
2423 	if (unref)
2424 		pf_release_state(st);
2425 
2426 	if (TAILQ_EMPTY(&b->b_qs[q]))
2427 		b->b_len -= sizeof(struct pfsync_subheader);
2428 }
2429 
2430 static void
2431 pfsync_bulk_start(void)
2432 {
2433 	struct pfsync_softc *sc = V_pfsyncif;
2434 
2435 	if (V_pf_status.debug >= PF_DEBUG_MISC)
2436 		printf("pfsync: received bulk update request\n");
2437 
2438 	PFSYNC_BLOCK(sc);
2439 
2440 	sc->sc_ureq_received = time_uptime;
2441 	sc->sc_bulk_hashid = 0;
2442 	sc->sc_bulk_stateid = 0;
2443 	pfsync_bulk_status(PFSYNC_BUS_START);
2444 	callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2445 	PFSYNC_BUNLOCK(sc);
2446 }
2447 
2448 static void
2449 pfsync_bulk_update(void *arg)
2450 {
2451 	struct pfsync_softc *sc = arg;
2452 	struct pf_kstate *s;
2453 	int i;
2454 
2455 	PFSYNC_BLOCK_ASSERT(sc);
2456 	CURVNET_SET(sc->sc_ifp->if_vnet);
2457 
2458 	/*
2459 	 * Start with last state from previous invocation.
2460 	 * It may had gone, in this case start from the
2461 	 * hash slot.
2462 	 */
2463 	s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
2464 
2465 	if (s != NULL)
2466 		i = PF_IDHASH(s);
2467 	else
2468 		i = sc->sc_bulk_hashid;
2469 
2470 	for (; i <= pf_hashmask; i++) {
2471 		struct pf_idhash *ih = &V_pf_idhash[i];
2472 
2473 		if (s != NULL)
2474 			PF_HASHROW_ASSERT(ih);
2475 		else {
2476 			PF_HASHROW_LOCK(ih);
2477 			s = LIST_FIRST(&ih->states);
2478 		}
2479 
2480 		for (; s; s = LIST_NEXT(s, entry)) {
2481 			if (s->sync_state == PFSYNC_S_NONE &&
2482 			    s->timeout < PFTM_MAX &&
2483 			    s->pfsync_time <= sc->sc_ureq_received) {
2484 				if (pfsync_update_state_req(s)) {
2485 					/* We've filled a packet. */
2486 					sc->sc_bulk_hashid = i;
2487 					sc->sc_bulk_stateid = s->id;
2488 					sc->sc_bulk_creatorid = s->creatorid;
2489 					PF_HASHROW_UNLOCK(ih);
2490 					callout_reset(&sc->sc_bulk_tmo, 1,
2491 					    pfsync_bulk_update, sc);
2492 					goto full;
2493 				}
2494 			}
2495 		}
2496 		PF_HASHROW_UNLOCK(ih);
2497 	}
2498 
2499 	/* We're done. */
2500 	pfsync_bulk_status(PFSYNC_BUS_END);
2501 full:
2502 	CURVNET_RESTORE();
2503 }
2504 
2505 static void
2506 pfsync_bulk_status(u_int8_t status)
2507 {
2508 	struct {
2509 		struct pfsync_subheader subh;
2510 		struct pfsync_bus bus;
2511 	} __packed r;
2512 
2513 	struct pfsync_softc *sc = V_pfsyncif;
2514 
2515 	bzero(&r, sizeof(r));
2516 
2517 	r.subh.action = PFSYNC_ACT_BUS;
2518 	r.subh.count = htons(1);
2519 	V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
2520 
2521 	r.bus.creatorid = V_pf_status.hostid;
2522 	r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2523 	r.bus.status = status;
2524 
2525 	pfsync_send_plus(&r, sizeof(r));
2526 }
2527 
2528 static void
2529 pfsync_bulk_fail(void *arg)
2530 {
2531 	struct pfsync_softc *sc = arg;
2532 	struct pfsync_bucket *b = &sc->sc_buckets[0];
2533 
2534 	CURVNET_SET(sc->sc_ifp->if_vnet);
2535 
2536 	PFSYNC_BLOCK_ASSERT(sc);
2537 
2538 	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2539 		/* Try again */
2540 		callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2541 		    pfsync_bulk_fail, V_pfsyncif);
2542 		PFSYNC_BUCKET_LOCK(b);
2543 		pfsync_request_update(0, 0);
2544 		PFSYNC_BUCKET_UNLOCK(b);
2545 	} else {
2546 		/* Pretend like the transfer was ok. */
2547 		sc->sc_ureq_sent = 0;
2548 		sc->sc_bulk_tries = 0;
2549 		PFSYNC_LOCK(sc);
2550 		if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
2551 			(*carp_demote_adj_p)(-V_pfsync_carp_adj,
2552 			    "pfsync bulk fail");
2553 		sc->sc_flags |= PFSYNCF_OK;
2554 		PFSYNC_UNLOCK(sc);
2555 		if (V_pf_status.debug >= PF_DEBUG_MISC)
2556 			printf("pfsync: failed to receive bulk update\n");
2557 	}
2558 
2559 	CURVNET_RESTORE();
2560 }
2561 
2562 static void
2563 pfsync_send_plus(void *plus, size_t pluslen)
2564 {
2565 	struct pfsync_softc *sc = V_pfsyncif;
2566 	struct pfsync_bucket *b = &sc->sc_buckets[0];
2567 	uint8_t *newplus;
2568 
2569 	PFSYNC_BUCKET_LOCK(b);
2570 
2571 	if (b->b_len + pluslen > sc->sc_ifp->if_mtu)
2572 		pfsync_sendout(1, b->b_id);
2573 
2574 	newplus = malloc(pluslen + b->b_pluslen, M_PFSYNC, M_NOWAIT);
2575 	if (newplus == NULL)
2576 		goto out;
2577 
2578 	if (b->b_plus != NULL) {
2579 		memcpy(newplus, b->b_plus, b->b_pluslen);
2580 		free(b->b_plus, M_PFSYNC);
2581 	} else {
2582 		MPASS(b->b_pluslen == 0);
2583 	}
2584 	memcpy(newplus + b->b_pluslen, plus, pluslen);
2585 
2586 	b->b_plus = newplus;
2587 	b->b_pluslen += pluslen;
2588 	b->b_len += pluslen;
2589 
2590 	pfsync_sendout(1, b->b_id);
2591 
2592 out:
2593 	PFSYNC_BUCKET_UNLOCK(b);
2594 }
2595 
2596 static void
2597 pfsync_timeout(void *arg)
2598 {
2599 	struct pfsync_bucket *b = arg;
2600 
2601 	CURVNET_SET(b->b_sc->sc_ifp->if_vnet);
2602 	PFSYNC_BUCKET_LOCK(b);
2603 	pfsync_push(b);
2604 	PFSYNC_BUCKET_UNLOCK(b);
2605 	CURVNET_RESTORE();
2606 }
2607 
2608 static void
2609 pfsync_push(struct pfsync_bucket *b)
2610 {
2611 
2612 	PFSYNC_BUCKET_LOCK_ASSERT(b);
2613 
2614 	b->b_flags |= PFSYNCF_BUCKET_PUSH;
2615 	swi_sched(V_pfsync_swi_cookie, 0);
2616 }
2617 
2618 static void
2619 pfsync_push_all(struct pfsync_softc *sc)
2620 {
2621 	int c;
2622 	struct pfsync_bucket *b;
2623 
2624 	for (c = 0; c < pfsync_buckets; c++) {
2625 		b = &sc->sc_buckets[c];
2626 
2627 		PFSYNC_BUCKET_LOCK(b);
2628 		pfsync_push(b);
2629 		PFSYNC_BUCKET_UNLOCK(b);
2630 	}
2631 }
2632 
2633 static void
2634 pfsync_tx(struct pfsync_softc *sc, struct mbuf *m)
2635 {
2636 	struct ip *ip;
2637 	int af, error = 0;
2638 
2639 	ip = mtod(m, struct ip *);
2640 	MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4));
2641 
2642 	af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6;
2643 
2644 	/*
2645 	 * We distinguish between a deferral packet and our
2646 	 * own pfsync packet based on M_SKIP_FIREWALL
2647 	 * flag. This is XXX.
2648 	 */
2649 	switch (af) {
2650 #ifdef INET
2651 	case AF_INET:
2652 		if (m->m_flags & M_SKIP_FIREWALL) {
2653 			error = ip_output(m, NULL, NULL, 0,
2654 			    NULL, NULL);
2655 		} else {
2656 			error = ip_output(m, NULL, NULL,
2657 			    IP_RAWOUTPUT, &sc->sc_imo, NULL);
2658 		}
2659 		break;
2660 #endif
2661 #ifdef INET6
2662 	case AF_INET6:
2663 		if (m->m_flags & M_SKIP_FIREWALL) {
2664 			error = ip6_output(m, NULL, NULL, 0,
2665 			    NULL, NULL, NULL);
2666 		} else {
2667 			error = ip6_output(m, NULL, NULL, 0,
2668 				&sc->sc_im6o, NULL, NULL);
2669 		}
2670 		break;
2671 #endif
2672 	}
2673 
2674 	if (error == 0)
2675 		V_pfsyncstats.pfsyncs_opackets++;
2676 	else
2677 		V_pfsyncstats.pfsyncs_oerrors++;
2678 
2679 }
2680 
2681 static void
2682 pfsyncintr(void *arg)
2683 {
2684 	struct epoch_tracker et;
2685 	struct pfsync_softc *sc = arg;
2686 	struct pfsync_bucket *b;
2687 	struct mbuf *m, *n;
2688 	int c;
2689 
2690 	NET_EPOCH_ENTER(et);
2691 	CURVNET_SET(sc->sc_ifp->if_vnet);
2692 
2693 	for (c = 0; c < pfsync_buckets; c++) {
2694 		b = &sc->sc_buckets[c];
2695 
2696 		PFSYNC_BUCKET_LOCK(b);
2697 		if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) {
2698 			pfsync_sendout(0, b->b_id);
2699 			b->b_flags &= ~PFSYNCF_BUCKET_PUSH;
2700 		}
2701 		_IF_DEQUEUE_ALL(&b->b_snd, m);
2702 		PFSYNC_BUCKET_UNLOCK(b);
2703 
2704 		for (; m != NULL; m = n) {
2705 			n = m->m_nextpkt;
2706 			m->m_nextpkt = NULL;
2707 
2708 			pfsync_tx(sc, m);
2709 		}
2710 	}
2711 	CURVNET_RESTORE();
2712 	NET_EPOCH_EXIT(et);
2713 }
2714 
2715 static int
2716 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
2717     struct in_mfilter* imf, struct in6_mfilter* im6f)
2718 {
2719 #ifdef  INET
2720 	struct ip_moptions *imo = &sc->sc_imo;
2721 #endif
2722 #ifdef INET6
2723 	struct ip6_moptions *im6o = &sc->sc_im6o;
2724 	struct sockaddr_in6 *syncpeer_sa6 = NULL;
2725 #endif
2726 
2727 	if (!(ifp->if_flags & IFF_MULTICAST))
2728 		return (EADDRNOTAVAIL);
2729 
2730 	switch (sc->sc_sync_peer.ss_family) {
2731 #ifdef INET
2732 	case AF_INET:
2733 	{
2734 		int error;
2735 
2736 		ip_mfilter_init(&imo->imo_head);
2737 		imo->imo_multicast_vif = -1;
2738 		if ((error = in_joingroup(ifp,
2739 		    &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL,
2740 		    &imf->imf_inm)) != 0)
2741 			return (error);
2742 
2743 		ip_mfilter_insert(&imo->imo_head, imf);
2744 		imo->imo_multicast_ifp = ifp;
2745 		imo->imo_multicast_ttl = PFSYNC_DFLTTL;
2746 		imo->imo_multicast_loop = 0;
2747 		break;
2748 	}
2749 #endif
2750 #ifdef INET6
2751 	case AF_INET6:
2752 	{
2753 		int error;
2754 
2755 		syncpeer_sa6 = (struct sockaddr_in6 *)&sc->sc_sync_peer;
2756 		if ((error = in6_setscope(&syncpeer_sa6->sin6_addr, ifp, NULL)))
2757 			return (error);
2758 
2759 		ip6_mfilter_init(&im6o->im6o_head);
2760 		if ((error = in6_joingroup(ifp, &syncpeer_sa6->sin6_addr, NULL,
2761 		    &(im6f->im6f_in6m), 0)) != 0)
2762 			return (error);
2763 
2764 		ip6_mfilter_insert(&im6o->im6o_head, im6f);
2765 		im6o->im6o_multicast_ifp = ifp;
2766 		im6o->im6o_multicast_hlim = PFSYNC_DFLTTL;
2767 		im6o->im6o_multicast_loop = 0;
2768 		break;
2769 	}
2770 #endif
2771 	}
2772 
2773 	return (0);
2774 }
2775 
2776 static void
2777 pfsync_multicast_cleanup(struct pfsync_softc *sc)
2778 {
2779 #ifdef INET
2780 	struct ip_moptions *imo = &sc->sc_imo;
2781 	struct in_mfilter *imf;
2782 
2783 	while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
2784 		ip_mfilter_remove(&imo->imo_head, imf);
2785 		in_leavegroup(imf->imf_inm, NULL);
2786 		ip_mfilter_free(imf);
2787 	}
2788 	imo->imo_multicast_ifp = NULL;
2789 #endif
2790 
2791 #ifdef INET6
2792 	struct ip6_moptions *im6o = &sc->sc_im6o;
2793 	struct in6_mfilter *im6f;
2794 
2795 	while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) {
2796 		ip6_mfilter_remove(&im6o->im6o_head, im6f);
2797 		in6_leavegroup(im6f->im6f_in6m, NULL);
2798 		ip6_mfilter_free(im6f);
2799 	}
2800 	im6o->im6o_multicast_ifp = NULL;
2801 #endif
2802 }
2803 
2804 void
2805 pfsync_detach_ifnet(struct ifnet *ifp)
2806 {
2807 	struct pfsync_softc *sc = V_pfsyncif;
2808 
2809 	if (sc == NULL)
2810 		return;
2811 
2812 	PFSYNC_LOCK(sc);
2813 
2814 	if (sc->sc_sync_if == ifp) {
2815 		/* We don't need mutlicast cleanup here, because the interface
2816 		 * is going away. We do need to ensure we don't try to do
2817 		 * cleanup later.
2818 		 */
2819 		ip_mfilter_init(&sc->sc_imo.imo_head);
2820 		sc->sc_imo.imo_multicast_ifp = NULL;
2821 		sc->sc_im6o.im6o_multicast_ifp = NULL;
2822 		sc->sc_sync_if = NULL;
2823 	}
2824 
2825 	PFSYNC_UNLOCK(sc);
2826 }
2827 
2828 static int
2829 pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status)
2830 {
2831 	struct sockaddr_storage sa;
2832 	status->maxupdates = pfsyncr->pfsyncr_maxupdates;
2833 	status->flags = pfsyncr->pfsyncr_defer;
2834 
2835 	strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ);
2836 
2837 	memset(&sa, 0, sizeof(sa));
2838 	if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) {
2839 		struct sockaddr_in *in = (struct sockaddr_in *)&sa;
2840 		in->sin_family = AF_INET;
2841 		in->sin_len = sizeof(*in);
2842 		in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr;
2843 	}
2844 	status->syncpeer = sa;
2845 
2846 	return 0;
2847 }
2848 
2849 static int
2850 pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc)
2851 {
2852 	struct ifnet *sifp;
2853 	struct in_mfilter *imf = NULL;
2854 	struct in6_mfilter *im6f = NULL;
2855 	int error;
2856 	int c;
2857 
2858 	if ((status->maxupdates < 0) || (status->maxupdates > 255))
2859 		return (EINVAL);
2860 
2861 	if (status->syncdev[0] == '\0')
2862 		sifp = NULL;
2863 	else if ((sifp = ifunit_ref(status->syncdev)) == NULL)
2864 		return (EINVAL);
2865 
2866 	switch (status->syncpeer.ss_family) {
2867 #ifdef INET
2868 	case AF_UNSPEC:
2869 	case AF_INET: {
2870 		struct sockaddr_in *status_sin;
2871 		status_sin = (struct sockaddr_in *)&(status->syncpeer);
2872 		if (sifp != NULL) {
2873 			if (status_sin->sin_addr.s_addr == 0 ||
2874 			    status_sin->sin_addr.s_addr ==
2875 			    htonl(INADDR_PFSYNC_GROUP)) {
2876 				status_sin->sin_family = AF_INET;
2877 				status_sin->sin_len = sizeof(*status_sin);
2878 				status_sin->sin_addr.s_addr =
2879 				    htonl(INADDR_PFSYNC_GROUP);
2880 			}
2881 
2882 			if (IN_MULTICAST(ntohl(status_sin->sin_addr.s_addr))) {
2883 				imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
2884 			}
2885 		}
2886 		break;
2887 	}
2888 #endif
2889 #ifdef INET6
2890 	case AF_INET6: {
2891 		struct sockaddr_in6 *status_sin6;
2892 		status_sin6 = (struct sockaddr_in6*)&(status->syncpeer);
2893 		if (sifp != NULL) {
2894 			if (IN6_IS_ADDR_UNSPECIFIED(&status_sin6->sin6_addr) ||
2895 			    IN6_ARE_ADDR_EQUAL(&status_sin6->sin6_addr,
2896 				&in6addr_linklocal_pfsync_group)) {
2897 				status_sin6->sin6_family = AF_INET6;
2898 				status_sin6->sin6_len = sizeof(*status_sin6);
2899 				status_sin6->sin6_addr =
2900 				    in6addr_linklocal_pfsync_group;
2901 			}
2902 
2903 			if (IN6_IS_ADDR_MULTICAST(&status_sin6->sin6_addr)) {
2904 				im6f = ip6_mfilter_alloc(M_WAITOK, 0, 0);
2905 			}
2906 		}
2907 		break;
2908 	}
2909 #endif
2910 	}
2911 
2912 	PFSYNC_LOCK(sc);
2913 
2914 	switch (status->version) {
2915 		case PFSYNC_MSG_VERSION_UNSPECIFIED:
2916 			sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT;
2917 			break;
2918 		case PFSYNC_MSG_VERSION_1301:
2919 		case PFSYNC_MSG_VERSION_1400:
2920 			sc->sc_version = status->version;
2921 			break;
2922 		default:
2923 			PFSYNC_UNLOCK(sc);
2924 			return (EINVAL);
2925 	}
2926 
2927 	switch (status->syncpeer.ss_family) {
2928 	case AF_INET: {
2929 		struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer);
2930 		struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer;
2931 		sc_sin->sin_family = AF_INET;
2932 		sc_sin->sin_len = sizeof(*sc_sin);
2933 		if (status_sin->sin_addr.s_addr == 0) {
2934 			sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
2935 		} else {
2936 			sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr;
2937 		}
2938 		break;
2939 	}
2940 	case AF_INET6: {
2941 		struct sockaddr_in6 *status_sin = (struct sockaddr_in6 *)&(status->syncpeer);
2942 		struct sockaddr_in6 *sc_sin = (struct sockaddr_in6 *)&sc->sc_sync_peer;
2943 		sc_sin->sin6_family = AF_INET6;
2944 		sc_sin->sin6_len = sizeof(*sc_sin);
2945 		if(IN6_IS_ADDR_UNSPECIFIED(&status_sin->sin6_addr)) {
2946 			sc_sin->sin6_addr = in6addr_linklocal_pfsync_group;
2947 		} else {
2948 			sc_sin->sin6_addr = status_sin->sin6_addr;
2949 		}
2950 		break;
2951 	}
2952 	}
2953 
2954 	sc->sc_maxupdates = status->maxupdates;
2955 	if (status->flags & PFSYNCF_DEFER) {
2956 		sc->sc_flags |= PFSYNCF_DEFER;
2957 		V_pfsync_defer_ptr = pfsync_defer;
2958 	} else {
2959 		sc->sc_flags &= ~PFSYNCF_DEFER;
2960 		V_pfsync_defer_ptr = NULL;
2961 	}
2962 
2963 	if (sifp == NULL) {
2964 		if (sc->sc_sync_if)
2965 			if_rele(sc->sc_sync_if);
2966 		sc->sc_sync_if = NULL;
2967 		pfsync_multicast_cleanup(sc);
2968 		PFSYNC_UNLOCK(sc);
2969 		return (0);
2970 	}
2971 
2972 	for (c = 0; c < pfsync_buckets; c++) {
2973 		PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
2974 		if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT &&
2975 		    (sifp->if_mtu < sc->sc_ifp->if_mtu ||
2976 			(sc->sc_sync_if != NULL &&
2977 			    sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
2978 			sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
2979 			pfsync_sendout(1, c);
2980 		PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
2981 	}
2982 
2983 	pfsync_multicast_cleanup(sc);
2984 
2985 	if (((sc->sc_sync_peer.ss_family == AF_INET) &&
2986 	    IN_MULTICAST(ntohl(((struct sockaddr_in *)
2987 	        &sc->sc_sync_peer)->sin_addr.s_addr))) ||
2988 	    ((sc->sc_sync_peer.ss_family == AF_INET6) &&
2989 	    IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6*)
2990 	        &sc->sc_sync_peer)->sin6_addr))) {
2991 		error = pfsync_multicast_setup(sc, sifp, imf, im6f);
2992 		if (error) {
2993 			if_rele(sifp);
2994 			PFSYNC_UNLOCK(sc);
2995 #ifdef INET
2996 			if (imf != NULL)
2997 				ip_mfilter_free(imf);
2998 #endif
2999 #ifdef INET6
3000 			if (im6f != NULL)
3001 				ip6_mfilter_free(im6f);
3002 #endif
3003 			return (error);
3004 		}
3005 	}
3006 	if (sc->sc_sync_if)
3007 		if_rele(sc->sc_sync_if);
3008 	sc->sc_sync_if = sifp;
3009 
3010 	switch (sc->sc_sync_peer.ss_family) {
3011 #ifdef INET
3012 	case AF_INET: {
3013 		struct ip *ip;
3014 		ip = &sc->sc_template.ipv4;
3015 		bzero(ip, sizeof(*ip));
3016 		ip->ip_v = IPVERSION;
3017 		ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2;
3018 		ip->ip_tos = IPTOS_LOWDELAY;
3019 		/* len and id are set later. */
3020 		ip->ip_off = htons(IP_DF);
3021 		ip->ip_ttl = PFSYNC_DFLTTL;
3022 		ip->ip_p = IPPROTO_PFSYNC;
3023 		ip->ip_src.s_addr = INADDR_ANY;
3024 		ip->ip_dst = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
3025 		break;
3026 	}
3027 #endif
3028 #ifdef INET6
3029 	case AF_INET6: {
3030 		struct ip6_hdr *ip6;
3031 		ip6 = &sc->sc_template.ipv6;
3032 		bzero(ip6, sizeof(*ip6));
3033 		ip6->ip6_vfc = IPV6_VERSION;
3034 		ip6->ip6_hlim = PFSYNC_DFLTTL;
3035 		ip6->ip6_nxt = IPPROTO_PFSYNC;
3036 		ip6->ip6_dst = ((struct sockaddr_in6 *)&sc->sc_sync_peer)->sin6_addr;
3037 
3038 		struct epoch_tracker et;
3039 		NET_EPOCH_ENTER(et);
3040 		in6_selectsrc_addr(if_getfib(sc->sc_sync_if), &ip6->ip6_dst, 0,
3041 		    sc->sc_sync_if, &ip6->ip6_src, NULL);
3042 		NET_EPOCH_EXIT(et);
3043 		break;
3044 	}
3045 #endif
3046 	}
3047 
3048 	/* Request a full state table update. */
3049 	if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
3050 		(*carp_demote_adj_p)(V_pfsync_carp_adj,
3051 		    "pfsync bulk start");
3052 	sc->sc_flags &= ~PFSYNCF_OK;
3053 	if (V_pf_status.debug >= PF_DEBUG_MISC)
3054 		printf("pfsync: requesting bulk update\n");
3055 	PFSYNC_UNLOCK(sc);
3056 	PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
3057 	pfsync_request_update(0, 0);
3058 	PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
3059 	PFSYNC_BLOCK(sc);
3060 	sc->sc_ureq_sent = time_uptime;
3061 	callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc);
3062 	PFSYNC_BUNLOCK(sc);
3063 	return (0);
3064 }
3065 
3066 static void
3067 pfsync_pointers_init(void)
3068 {
3069 
3070 	PF_RULES_WLOCK();
3071 	V_pfsync_state_import_ptr = pfsync_state_import;
3072 	V_pfsync_insert_state_ptr = pfsync_insert_state;
3073 	V_pfsync_update_state_ptr = pfsync_update_state;
3074 	V_pfsync_delete_state_ptr = pfsync_delete_state;
3075 	V_pfsync_clear_states_ptr = pfsync_clear_states;
3076 	V_pfsync_defer_ptr = pfsync_defer;
3077 	PF_RULES_WUNLOCK();
3078 }
3079 
3080 static void
3081 pfsync_pointers_uninit(void)
3082 {
3083 
3084 	PF_RULES_WLOCK();
3085 	V_pfsync_state_import_ptr = NULL;
3086 	V_pfsync_insert_state_ptr = NULL;
3087 	V_pfsync_update_state_ptr = NULL;
3088 	V_pfsync_delete_state_ptr = NULL;
3089 	V_pfsync_clear_states_ptr = NULL;
3090 	V_pfsync_defer_ptr = NULL;
3091 	PF_RULES_WUNLOCK();
3092 }
3093 
3094 static void
3095 vnet_pfsync_init(const void *unused __unused)
3096 {
3097 	int error;
3098 
3099 	V_pfsync_cloner = if_clone_simple(pfsyncname,
3100 	    pfsync_clone_create, pfsync_clone_destroy, 1);
3101 	error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif,
3102 	    SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
3103 	if (error) {
3104 		if_clone_detach(V_pfsync_cloner);
3105 		log(LOG_INFO, "swi_add() failed in %s\n", __func__);
3106 	}
3107 
3108 	pfsync_pointers_init();
3109 }
3110 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
3111     vnet_pfsync_init, NULL);
3112 
3113 static void
3114 vnet_pfsync_uninit(const void *unused __unused)
3115 {
3116 	int ret __diagused;
3117 
3118 	pfsync_pointers_uninit();
3119 
3120 	if_clone_detach(V_pfsync_cloner);
3121 	ret = swi_remove(V_pfsync_swi_cookie);
3122 	MPASS(ret == 0);
3123 	ret = intr_event_destroy(V_pfsync_swi_ie);
3124 	MPASS(ret == 0);
3125 }
3126 
3127 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH,
3128     vnet_pfsync_uninit, NULL);
3129 
3130 static int
3131 pfsync_init(void)
3132 {
3133 	int error;
3134 
3135 	pfsync_detach_ifnet_ptr = pfsync_detach_ifnet;
3136 
3137 #ifdef INET
3138 	error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL);
3139 	if (error)
3140 		return (error);
3141 #endif
3142 #ifdef INET6
3143 	error = ip6proto_register(IPPROTO_PFSYNC, pfsync6_input, NULL);
3144 	if (error) {
3145 		ipproto_unregister(IPPROTO_PFSYNC);
3146 		return (error);
3147 	}
3148 #endif
3149 
3150 	return (0);
3151 }
3152 
3153 static void
3154 pfsync_uninit(void)
3155 {
3156 	pfsync_detach_ifnet_ptr = NULL;
3157 
3158 #ifdef INET
3159 	ipproto_unregister(IPPROTO_PFSYNC);
3160 #endif
3161 #ifdef INET6
3162 	ip6proto_unregister(IPPROTO_PFSYNC);
3163 #endif
3164 }
3165 
3166 static int
3167 pfsync_modevent(module_t mod, int type, void *data)
3168 {
3169 	int error = 0;
3170 
3171 	switch (type) {
3172 	case MOD_LOAD:
3173 		error = pfsync_init();
3174 		break;
3175 	case MOD_UNLOAD:
3176 		pfsync_uninit();
3177 		break;
3178 	default:
3179 		error = EINVAL;
3180 		break;
3181 	}
3182 
3183 	return (error);
3184 }
3185 
3186 static moduledata_t pfsync_mod = {
3187 	pfsyncname,
3188 	pfsync_modevent,
3189 	0
3190 };
3191 
3192 #define PFSYNC_MODVER 1
3193 
3194 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
3195 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
3196 MODULE_VERSION(pfsync, PFSYNC_MODVER);
3197 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3198