1 /*- 2 * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND ISC) 3 * 4 * Copyright (c) 2002 Michael Shalayeff 5 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /*- 31 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 32 * 33 * Permission to use, copy, modify, and distribute this software for any 34 * purpose with or without fee is hereby granted, provided that the above 35 * copyright notice and this permission notice appear in all copies. 36 * 37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 44 */ 45 46 /* 47 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ 48 * 49 * Revisions picked from OpenBSD after revision 1.110 import: 50 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input() 51 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 52 * 1.120, 1.175 - use monotonic time_uptime 53 * 1.122 - reduce number of updates for non-TCP sessions 54 * 1.125, 1.127 - rewrite merge or stale processing 55 * 1.128 - cleanups 56 * 1.146 - bzero() mbuf before sparsely filling it with data 57 * 1.170 - SIOCSIFMTU checks 58 * 1.126, 1.142 - deferred packets processing 59 * 1.173 - correct expire time processing 60 */ 61 62 #include <sys/cdefs.h> 63 __FBSDID("$FreeBSD$"); 64 65 #include "opt_inet.h" 66 #include "opt_inet6.h" 67 #include "opt_pf.h" 68 69 #include <sys/param.h> 70 #include <sys/bus.h> 71 #include <sys/endian.h> 72 #include <sys/interrupt.h> 73 #include <sys/kernel.h> 74 #include <sys/lock.h> 75 #include <sys/mbuf.h> 76 #include <sys/module.h> 77 #include <sys/mutex.h> 78 #include <sys/priv.h> 79 #include <sys/smp.h> 80 #include <sys/socket.h> 81 #include <sys/sockio.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 85 #include <net/bpf.h> 86 #include <net/if.h> 87 #include <net/if_var.h> 88 #include <net/if_clone.h> 89 #include <net/if_types.h> 90 #include <net/vnet.h> 91 #include <net/pfvar.h> 92 #include <net/if_pfsync.h> 93 94 #include <netinet/if_ether.h> 95 #include <netinet/in.h> 96 #include <netinet/in_var.h> 97 #include <netinet/ip.h> 98 #include <netinet/ip_carp.h> 99 #include <netinet/ip_var.h> 100 #include <netinet/tcp.h> 101 #include <netinet/tcp_fsm.h> 102 #include <netinet/tcp_seq.h> 103 104 #define PFSYNC_MINPKT ( \ 105 sizeof(struct ip) + \ 106 sizeof(struct pfsync_header) + \ 107 sizeof(struct pfsync_subheader) ) 108 109 struct pfsync_bucket; 110 111 static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *, 112 struct pfsync_state_peer *); 113 static int pfsync_in_clr(struct mbuf *, int, int, int); 114 static int pfsync_in_ins(struct mbuf *, int, int, int); 115 static int pfsync_in_iack(struct mbuf *, int, int, int); 116 static int pfsync_in_upd(struct mbuf *, int, int, int); 117 static int pfsync_in_upd_c(struct mbuf *, int, int, int); 118 static int pfsync_in_ureq(struct mbuf *, int, int, int); 119 static int pfsync_in_del(struct mbuf *, int, int, int); 120 static int pfsync_in_del_c(struct mbuf *, int, int, int); 121 static int pfsync_in_bus(struct mbuf *, int, int, int); 122 static int pfsync_in_tdb(struct mbuf *, int, int, int); 123 static int pfsync_in_eof(struct mbuf *, int, int, int); 124 static int pfsync_in_error(struct mbuf *, int, int, int); 125 126 static int (*pfsync_acts[])(struct mbuf *, int, int, int) = { 127 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 128 pfsync_in_ins, /* PFSYNC_ACT_INS */ 129 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 130 pfsync_in_upd, /* PFSYNC_ACT_UPD */ 131 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 132 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 133 pfsync_in_del, /* PFSYNC_ACT_DEL */ 134 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 135 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 136 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 137 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 138 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 139 pfsync_in_eof /* PFSYNC_ACT_EOF */ 140 }; 141 142 struct pfsync_q { 143 void (*write)(struct pf_kstate *, void *); 144 size_t len; 145 u_int8_t action; 146 }; 147 148 /* we have one of these for every PFSYNC_S_ */ 149 static void pfsync_out_state(struct pf_kstate *, void *); 150 static void pfsync_out_iack(struct pf_kstate *, void *); 151 static void pfsync_out_upd_c(struct pf_kstate *, void *); 152 static void pfsync_out_del(struct pf_kstate *, void *); 153 154 static struct pfsync_q pfsync_qs[] = { 155 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, 156 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 157 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, 158 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 159 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 160 }; 161 162 static void pfsync_q_ins(struct pf_kstate *, int, bool); 163 static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *); 164 165 static void pfsync_update_state(struct pf_kstate *); 166 167 struct pfsync_upd_req_item { 168 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 169 struct pfsync_upd_req ur_msg; 170 }; 171 172 struct pfsync_deferral { 173 struct pfsync_softc *pd_sc; 174 TAILQ_ENTRY(pfsync_deferral) pd_entry; 175 u_int pd_refs; 176 struct callout pd_tmo; 177 178 struct pf_kstate *pd_st; 179 struct mbuf *pd_m; 180 }; 181 182 struct pfsync_sofct; 183 184 struct pfsync_bucket 185 { 186 int b_id; 187 struct pfsync_softc *b_sc; 188 struct mtx b_mtx; 189 struct callout b_tmo; 190 int b_flags; 191 #define PFSYNCF_BUCKET_PUSH 0x00000001 192 193 size_t b_len; 194 TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_S_COUNT]; 195 TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list; 196 TAILQ_HEAD(, pfsync_deferral) b_deferrals; 197 u_int b_deferred; 198 void *b_plus; 199 size_t b_pluslen; 200 201 struct ifaltq b_snd; 202 }; 203 204 struct pfsync_softc { 205 /* Configuration */ 206 struct ifnet *sc_ifp; 207 struct ifnet *sc_sync_if; 208 struct ip_moptions sc_imo; 209 struct in_addr sc_sync_peer; 210 uint32_t sc_flags; 211 uint8_t sc_maxupdates; 212 struct ip sc_template; 213 struct mtx sc_mtx; 214 215 /* Queued data */ 216 struct pfsync_bucket *sc_buckets; 217 218 /* Bulk update info */ 219 struct mtx sc_bulk_mtx; 220 uint32_t sc_ureq_sent; 221 int sc_bulk_tries; 222 uint32_t sc_ureq_received; 223 int sc_bulk_hashid; 224 uint64_t sc_bulk_stateid; 225 uint32_t sc_bulk_creatorid; 226 struct callout sc_bulk_tmo; 227 struct callout sc_bulkfail_tmo; 228 }; 229 230 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 231 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 232 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 233 234 #define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx) 235 #define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx) 236 #define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED) 237 238 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) 239 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) 240 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) 241 242 static const char pfsyncname[] = "pfsync"; 243 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data"); 244 VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL; 245 #define V_pfsyncif VNET(pfsyncif) 246 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL; 247 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) 248 VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie); 249 #define V_pfsync_swi_ie VNET(pfsync_swi_ie) 250 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats); 251 #define V_pfsyncstats VNET(pfsyncstats) 252 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW; 253 #define V_pfsync_carp_adj VNET(pfsync_carp_adj) 254 255 static void pfsync_timeout(void *); 256 static void pfsync_push(struct pfsync_bucket *); 257 static void pfsync_push_all(struct pfsync_softc *); 258 static void pfsyncintr(void *); 259 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, 260 struct in_mfilter *imf); 261 static void pfsync_multicast_cleanup(struct pfsync_softc *); 262 static void pfsync_pointers_init(void); 263 static void pfsync_pointers_uninit(void); 264 static int pfsync_init(void); 265 static void pfsync_uninit(void); 266 267 static unsigned long pfsync_buckets; 268 269 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 270 "PFSYNC"); 271 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, 272 &VNET_NAME(pfsyncstats), pfsyncstats, 273 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 274 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, 275 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 276 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN, 277 &pfsync_buckets, 0, "Number of pfsync hash buckets"); 278 279 static int pfsync_clone_create(struct if_clone *, int, caddr_t); 280 static void pfsync_clone_destroy(struct ifnet *); 281 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 282 struct pf_state_peer *); 283 static int pfsyncoutput(struct ifnet *, struct mbuf *, 284 const struct sockaddr *, struct route *); 285 static int pfsyncioctl(struct ifnet *, u_long, caddr_t); 286 287 static int pfsync_defer(struct pf_kstate *, struct mbuf *); 288 static void pfsync_undefer(struct pfsync_deferral *, int); 289 static void pfsync_undefer_state(struct pf_kstate *, int); 290 static void pfsync_defer_tmo(void *); 291 292 static void pfsync_request_update(u_int32_t, u_int64_t); 293 static bool pfsync_update_state_req(struct pf_kstate *); 294 295 static void pfsync_drop(struct pfsync_softc *); 296 static void pfsync_sendout(int, int); 297 static void pfsync_send_plus(void *, size_t); 298 299 static void pfsync_bulk_start(void); 300 static void pfsync_bulk_status(u_int8_t); 301 static void pfsync_bulk_update(void *); 302 static void pfsync_bulk_fail(void *); 303 304 static void pfsync_detach_ifnet(struct ifnet *); 305 #ifdef IPSEC 306 static void pfsync_update_net_tdb(struct pfsync_tdb *); 307 #endif 308 static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *, 309 struct pf_kstate *); 310 311 #define PFSYNC_MAX_BULKTRIES 12 312 #define PFSYNC_DEFER_TIMEOUT ((20 * hz) / 1000) 313 314 VNET_DEFINE(struct if_clone *, pfsync_cloner); 315 #define V_pfsync_cloner VNET(pfsync_cloner) 316 317 static int 318 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 319 { 320 struct pfsync_softc *sc; 321 struct ifnet *ifp; 322 struct pfsync_bucket *b; 323 int c, q; 324 325 if (unit != 0) 326 return (EINVAL); 327 328 if (! pfsync_buckets) 329 pfsync_buckets = mp_ncpus * 2; 330 331 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); 332 sc->sc_flags |= PFSYNCF_OK; 333 sc->sc_maxupdates = 128; 334 335 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 336 if (ifp == NULL) { 337 free(sc, M_PFSYNC); 338 return (ENOSPC); 339 } 340 if_initname(ifp, pfsyncname, unit); 341 ifp->if_softc = sc; 342 ifp->if_ioctl = pfsyncioctl; 343 ifp->if_output = pfsyncoutput; 344 ifp->if_type = IFT_PFSYNC; 345 ifp->if_hdrlen = sizeof(struct pfsync_header); 346 ifp->if_mtu = ETHERMTU; 347 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); 348 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); 349 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); 350 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); 351 352 if_attach(ifp); 353 354 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 355 356 sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets), 357 M_PFSYNC, M_ZERO | M_WAITOK); 358 for (c = 0; c < pfsync_buckets; c++) { 359 b = &sc->sc_buckets[c]; 360 mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF); 361 362 b->b_id = c; 363 b->b_sc = sc; 364 b->b_len = PFSYNC_MINPKT; 365 366 for (q = 0; q < PFSYNC_S_COUNT; q++) 367 TAILQ_INIT(&b->b_qs[q]); 368 369 TAILQ_INIT(&b->b_upd_req_list); 370 TAILQ_INIT(&b->b_deferrals); 371 372 callout_init(&b->b_tmo, 1); 373 374 b->b_snd.ifq_maxlen = ifqmaxlen; 375 } 376 377 V_pfsyncif = sc; 378 379 return (0); 380 } 381 382 static void 383 pfsync_clone_destroy(struct ifnet *ifp) 384 { 385 struct pfsync_softc *sc = ifp->if_softc; 386 struct pfsync_bucket *b; 387 int c; 388 389 for (c = 0; c < pfsync_buckets; c++) { 390 b = &sc->sc_buckets[c]; 391 /* 392 * At this stage, everything should have already been 393 * cleared by pfsync_uninit(), and we have only to 394 * drain callouts. 395 */ 396 while (b->b_deferred > 0) { 397 struct pfsync_deferral *pd = 398 TAILQ_FIRST(&b->b_deferrals); 399 400 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 401 b->b_deferred--; 402 if (callout_stop(&pd->pd_tmo) > 0) { 403 pf_release_state(pd->pd_st); 404 m_freem(pd->pd_m); 405 free(pd, M_PFSYNC); 406 } else { 407 pd->pd_refs++; 408 callout_drain(&pd->pd_tmo); 409 free(pd, M_PFSYNC); 410 } 411 } 412 413 callout_drain(&b->b_tmo); 414 } 415 416 callout_drain(&sc->sc_bulkfail_tmo); 417 callout_drain(&sc->sc_bulk_tmo); 418 419 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 420 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 421 bpfdetach(ifp); 422 if_detach(ifp); 423 424 pfsync_drop(sc); 425 426 if_free(ifp); 427 pfsync_multicast_cleanup(sc); 428 mtx_destroy(&sc->sc_mtx); 429 mtx_destroy(&sc->sc_bulk_mtx); 430 431 free(sc->sc_buckets, M_PFSYNC); 432 free(sc, M_PFSYNC); 433 434 V_pfsyncif = NULL; 435 } 436 437 static int 438 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 439 struct pf_state_peer *d) 440 { 441 if (s->scrub.scrub_flag && d->scrub == NULL) { 442 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); 443 if (d->scrub == NULL) 444 return (ENOMEM); 445 } 446 447 return (0); 448 } 449 450 static int 451 pfsync_state_import(struct pfsync_state *sp, int flags) 452 { 453 struct pfsync_softc *sc = V_pfsyncif; 454 #ifndef __NO_STRICT_ALIGNMENT 455 struct pfsync_state_key key[2]; 456 #endif 457 struct pfsync_state_key *kw, *ks; 458 struct pf_kstate *st = NULL; 459 struct pf_state_key *skw = NULL, *sks = NULL; 460 struct pf_krule *r = NULL; 461 struct pfi_kkif *kif; 462 int error; 463 464 PF_RULES_RASSERT(); 465 466 if (sp->creatorid == 0) { 467 if (V_pf_status.debug >= PF_DEBUG_MISC) 468 printf("%s: invalid creator id: %08x\n", __func__, 469 ntohl(sp->creatorid)); 470 return (EINVAL); 471 } 472 473 if ((kif = pfi_kkif_find(sp->ifname)) == NULL) { 474 if (V_pf_status.debug >= PF_DEBUG_MISC) 475 printf("%s: unknown interface: %s\n", __func__, 476 sp->ifname); 477 if (flags & PFSYNC_SI_IOCTL) 478 return (EINVAL); 479 return (0); /* skip this state */ 480 } 481 482 /* 483 * If the ruleset checksums match or the state is coming from the ioctl, 484 * it's safe to associate the state with the rule of that number. 485 */ 486 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 487 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 488 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 489 r = pf_main_ruleset.rules[ 490 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 491 else 492 r = &V_pf_default_rule; 493 494 if ((r->max_states && 495 counter_u64_fetch(r->states_cur) >= r->max_states)) 496 goto cleanup; 497 498 /* 499 * XXXGL: consider M_WAITOK in ioctl path after. 500 */ 501 st = pf_alloc_state(M_NOWAIT); 502 if (__predict_false(st == NULL)) 503 goto cleanup; 504 505 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL) 506 goto cleanup; 507 508 #ifndef __NO_STRICT_ALIGNMENT 509 bcopy(&sp->key, key, sizeof(struct pfsync_state_key) * 2); 510 kw = &key[PF_SK_WIRE]; 511 ks = &key[PF_SK_STACK]; 512 #else 513 kw = &sp->key[PF_SK_WIRE]; 514 ks = &sp->key[PF_SK_STACK]; 515 #endif 516 517 if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->af) || 518 PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->af) || 519 kw->port[0] != ks->port[0] || 520 kw->port[1] != ks->port[1]) { 521 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 522 if (sks == NULL) 523 goto cleanup; 524 } else 525 sks = skw; 526 527 /* allocate memory for scrub info */ 528 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 529 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 530 goto cleanup; 531 532 /* Copy to state key(s). */ 533 skw->addr[0] = kw->addr[0]; 534 skw->addr[1] = kw->addr[1]; 535 skw->port[0] = kw->port[0]; 536 skw->port[1] = kw->port[1]; 537 skw->proto = sp->proto; 538 skw->af = sp->af; 539 if (sks != skw) { 540 sks->addr[0] = ks->addr[0]; 541 sks->addr[1] = ks->addr[1]; 542 sks->port[0] = ks->port[0]; 543 sks->port[1] = ks->port[1]; 544 sks->proto = sp->proto; 545 sks->af = sp->af; 546 } 547 548 /* copy to state */ 549 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 550 st->creation = time_uptime - ntohl(sp->creation); 551 st->expire = time_uptime; 552 if (sp->expire) { 553 uint32_t timeout; 554 555 timeout = r->timeout[sp->timeout]; 556 if (!timeout) 557 timeout = V_pf_default_rule.timeout[sp->timeout]; 558 559 /* sp->expire may have been adaptively scaled by export. */ 560 st->expire -= timeout - ntohl(sp->expire); 561 } 562 563 st->direction = sp->direction; 564 st->log = sp->log; 565 st->timeout = sp->timeout; 566 st->state_flags = sp->state_flags; 567 568 st->id = sp->id; 569 st->creatorid = sp->creatorid; 570 pf_state_peer_ntoh(&sp->src, &st->src); 571 pf_state_peer_ntoh(&sp->dst, &st->dst); 572 573 st->rule.ptr = r; 574 st->nat_rule.ptr = NULL; 575 st->anchor.ptr = NULL; 576 st->rt_kif = NULL; 577 578 st->pfsync_time = time_uptime; 579 st->sync_state = PFSYNC_S_NONE; 580 581 if (!(flags & PFSYNC_SI_IOCTL)) 582 st->state_flags |= PFSTATE_NOSYNC; 583 584 if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0) 585 goto cleanup_state; 586 587 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 588 counter_u64_add(r->states_cur, 1); 589 counter_u64_add(r->states_tot, 1); 590 591 if (!(flags & PFSYNC_SI_IOCTL)) { 592 st->state_flags &= ~PFSTATE_NOSYNC; 593 if (st->state_flags & PFSTATE_ACK) { 594 pfsync_q_ins(st, PFSYNC_S_IACK, true); 595 pfsync_push_all(sc); 596 } 597 } 598 st->state_flags &= ~PFSTATE_ACK; 599 PF_STATE_UNLOCK(st); 600 601 return (0); 602 603 cleanup: 604 error = ENOMEM; 605 if (skw == sks) 606 sks = NULL; 607 if (skw != NULL) 608 uma_zfree(V_pf_state_key_z, skw); 609 if (sks != NULL) 610 uma_zfree(V_pf_state_key_z, sks); 611 612 cleanup_state: /* pf_state_insert() frees the state keys. */ 613 if (st) { 614 pf_free_state(st); 615 } 616 return (error); 617 } 618 619 static int 620 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused) 621 { 622 struct pfsync_softc *sc = V_pfsyncif; 623 struct mbuf *m = *mp; 624 struct ip *ip = mtod(m, struct ip *); 625 struct pfsync_header *ph; 626 struct pfsync_subheader subh; 627 628 int offset, len, flags = 0; 629 int rv; 630 uint16_t count; 631 632 PF_RULES_RLOCK_TRACKER; 633 634 *mp = NULL; 635 V_pfsyncstats.pfsyncs_ipackets++; 636 637 /* Verify that we have a sync interface configured. */ 638 if (!sc || !sc->sc_sync_if || !V_pf_status.running || 639 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 640 goto done; 641 642 /* verify that the packet came in on the right interface */ 643 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 644 V_pfsyncstats.pfsyncs_badif++; 645 goto done; 646 } 647 648 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 649 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 650 /* verify that the IP TTL is 255. */ 651 if (ip->ip_ttl != PFSYNC_DFLTTL) { 652 V_pfsyncstats.pfsyncs_badttl++; 653 goto done; 654 } 655 656 offset = ip->ip_hl << 2; 657 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 658 V_pfsyncstats.pfsyncs_hdrops++; 659 goto done; 660 } 661 662 if (offset + sizeof(*ph) > m->m_len) { 663 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 664 V_pfsyncstats.pfsyncs_hdrops++; 665 return (IPPROTO_DONE); 666 } 667 ip = mtod(m, struct ip *); 668 } 669 ph = (struct pfsync_header *)((char *)ip + offset); 670 671 /* verify the version */ 672 if (ph->version != PFSYNC_VERSION) { 673 V_pfsyncstats.pfsyncs_badver++; 674 goto done; 675 } 676 677 len = ntohs(ph->len) + offset; 678 if (m->m_pkthdr.len < len) { 679 V_pfsyncstats.pfsyncs_badlen++; 680 goto done; 681 } 682 683 /* 684 * Trusting pf_chksum during packet processing, as well as seeking 685 * in interface name tree, require holding PF_RULES_RLOCK(). 686 */ 687 PF_RULES_RLOCK(); 688 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 689 flags = PFSYNC_SI_CKSUM; 690 691 offset += sizeof(*ph); 692 while (offset <= len - sizeof(subh)) { 693 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 694 offset += sizeof(subh); 695 696 if (subh.action >= PFSYNC_ACT_MAX) { 697 V_pfsyncstats.pfsyncs_badact++; 698 PF_RULES_RUNLOCK(); 699 goto done; 700 } 701 702 count = ntohs(subh.count); 703 V_pfsyncstats.pfsyncs_iacts[subh.action] += count; 704 rv = (*pfsync_acts[subh.action])(m, offset, count, flags); 705 if (rv == -1) { 706 PF_RULES_RUNLOCK(); 707 return (IPPROTO_DONE); 708 } 709 710 offset += rv; 711 } 712 PF_RULES_RUNLOCK(); 713 714 done: 715 m_freem(m); 716 return (IPPROTO_DONE); 717 } 718 719 static int 720 pfsync_in_clr(struct mbuf *m, int offset, int count, int flags) 721 { 722 struct pfsync_clr *clr; 723 struct mbuf *mp; 724 int len = sizeof(*clr) * count; 725 int i, offp; 726 u_int32_t creatorid; 727 728 mp = m_pulldown(m, offset, len, &offp); 729 if (mp == NULL) { 730 V_pfsyncstats.pfsyncs_badlen++; 731 return (-1); 732 } 733 clr = (struct pfsync_clr *)(mp->m_data + offp); 734 735 for (i = 0; i < count; i++) { 736 creatorid = clr[i].creatorid; 737 738 if (clr[i].ifname[0] != '\0' && 739 pfi_kkif_find(clr[i].ifname) == NULL) 740 continue; 741 742 for (int i = 0; i <= pf_hashmask; i++) { 743 struct pf_idhash *ih = &V_pf_idhash[i]; 744 struct pf_kstate *s; 745 relock: 746 PF_HASHROW_LOCK(ih); 747 LIST_FOREACH(s, &ih->states, entry) { 748 if (s->creatorid == creatorid) { 749 s->state_flags |= PFSTATE_NOSYNC; 750 pf_unlink_state(s); 751 goto relock; 752 } 753 } 754 PF_HASHROW_UNLOCK(ih); 755 } 756 } 757 758 return (len); 759 } 760 761 static int 762 pfsync_in_ins(struct mbuf *m, int offset, int count, int flags) 763 { 764 struct mbuf *mp; 765 struct pfsync_state *sa, *sp; 766 int len = sizeof(*sp) * count; 767 int i, offp; 768 769 mp = m_pulldown(m, offset, len, &offp); 770 if (mp == NULL) { 771 V_pfsyncstats.pfsyncs_badlen++; 772 return (-1); 773 } 774 sa = (struct pfsync_state *)(mp->m_data + offp); 775 776 for (i = 0; i < count; i++) { 777 sp = &sa[i]; 778 779 /* Check for invalid values. */ 780 if (sp->timeout >= PFTM_MAX || 781 sp->src.state > PF_TCPS_PROXY_DST || 782 sp->dst.state > PF_TCPS_PROXY_DST || 783 sp->direction > PF_OUT || 784 (sp->af != AF_INET && sp->af != AF_INET6)) { 785 if (V_pf_status.debug >= PF_DEBUG_MISC) 786 printf("%s: invalid value\n", __func__); 787 V_pfsyncstats.pfsyncs_badval++; 788 continue; 789 } 790 791 if (pfsync_state_import(sp, flags) == ENOMEM) 792 /* Drop out, but process the rest of the actions. */ 793 break; 794 } 795 796 return (len); 797 } 798 799 static int 800 pfsync_in_iack(struct mbuf *m, int offset, int count, int flags) 801 { 802 struct pfsync_ins_ack *ia, *iaa; 803 struct pf_kstate *st; 804 805 struct mbuf *mp; 806 int len = count * sizeof(*ia); 807 int offp, i; 808 809 mp = m_pulldown(m, offset, len, &offp); 810 if (mp == NULL) { 811 V_pfsyncstats.pfsyncs_badlen++; 812 return (-1); 813 } 814 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 815 816 for (i = 0; i < count; i++) { 817 ia = &iaa[i]; 818 819 st = pf_find_state_byid(ia->id, ia->creatorid); 820 if (st == NULL) 821 continue; 822 823 if (st->state_flags & PFSTATE_ACK) { 824 pfsync_undefer_state(st, 0); 825 } 826 PF_STATE_UNLOCK(st); 827 } 828 /* 829 * XXX this is not yet implemented, but we know the size of the 830 * message so we can skip it. 831 */ 832 833 return (count * sizeof(struct pfsync_ins_ack)); 834 } 835 836 static int 837 pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src, 838 struct pfsync_state_peer *dst) 839 { 840 int sync = 0; 841 842 PF_STATE_LOCK_ASSERT(st); 843 844 /* 845 * The state should never go backwards except 846 * for syn-proxy states. Neither should the 847 * sequence window slide backwards. 848 */ 849 if ((st->src.state > src->state && 850 (st->src.state < PF_TCPS_PROXY_SRC || 851 src->state >= PF_TCPS_PROXY_SRC)) || 852 853 (st->src.state == src->state && 854 SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))) 855 sync++; 856 else 857 pf_state_peer_ntoh(src, &st->src); 858 859 if ((st->dst.state > dst->state) || 860 861 (st->dst.state >= TCPS_SYN_SENT && 862 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))) 863 sync++; 864 else 865 pf_state_peer_ntoh(dst, &st->dst); 866 867 return (sync); 868 } 869 870 static int 871 pfsync_in_upd(struct mbuf *m, int offset, int count, int flags) 872 { 873 struct pfsync_softc *sc = V_pfsyncif; 874 struct pfsync_state *sa, *sp; 875 struct pf_kstate *st; 876 int sync; 877 878 struct mbuf *mp; 879 int len = count * sizeof(*sp); 880 int offp, i; 881 882 mp = m_pulldown(m, offset, len, &offp); 883 if (mp == NULL) { 884 V_pfsyncstats.pfsyncs_badlen++; 885 return (-1); 886 } 887 sa = (struct pfsync_state *)(mp->m_data + offp); 888 889 for (i = 0; i < count; i++) { 890 sp = &sa[i]; 891 892 /* check for invalid values */ 893 if (sp->timeout >= PFTM_MAX || 894 sp->src.state > PF_TCPS_PROXY_DST || 895 sp->dst.state > PF_TCPS_PROXY_DST) { 896 if (V_pf_status.debug >= PF_DEBUG_MISC) { 897 printf("pfsync_input: PFSYNC_ACT_UPD: " 898 "invalid value\n"); 899 } 900 V_pfsyncstats.pfsyncs_badval++; 901 continue; 902 } 903 904 st = pf_find_state_byid(sp->id, sp->creatorid); 905 if (st == NULL) { 906 /* insert the update */ 907 if (pfsync_state_import(sp, flags)) 908 V_pfsyncstats.pfsyncs_badstate++; 909 continue; 910 } 911 912 if (st->state_flags & PFSTATE_ACK) { 913 pfsync_undefer_state(st, 1); 914 } 915 916 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 917 sync = pfsync_upd_tcp(st, &sp->src, &sp->dst); 918 else { 919 sync = 0; 920 921 /* 922 * Non-TCP protocol state machine always go 923 * forwards 924 */ 925 if (st->src.state > sp->src.state) 926 sync++; 927 else 928 pf_state_peer_ntoh(&sp->src, &st->src); 929 if (st->dst.state > sp->dst.state) 930 sync++; 931 else 932 pf_state_peer_ntoh(&sp->dst, &st->dst); 933 } 934 if (sync < 2) { 935 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 936 pf_state_peer_ntoh(&sp->dst, &st->dst); 937 st->expire = time_uptime; 938 st->timeout = sp->timeout; 939 } 940 st->pfsync_time = time_uptime; 941 942 if (sync) { 943 V_pfsyncstats.pfsyncs_stale++; 944 945 pfsync_update_state(st); 946 PF_STATE_UNLOCK(st); 947 pfsync_push_all(sc); 948 continue; 949 } 950 PF_STATE_UNLOCK(st); 951 } 952 953 return (len); 954 } 955 956 static int 957 pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags) 958 { 959 struct pfsync_softc *sc = V_pfsyncif; 960 struct pfsync_upd_c *ua, *up; 961 struct pf_kstate *st; 962 int len = count * sizeof(*up); 963 int sync; 964 struct mbuf *mp; 965 int offp, i; 966 967 mp = m_pulldown(m, offset, len, &offp); 968 if (mp == NULL) { 969 V_pfsyncstats.pfsyncs_badlen++; 970 return (-1); 971 } 972 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 973 974 for (i = 0; i < count; i++) { 975 up = &ua[i]; 976 977 /* check for invalid values */ 978 if (up->timeout >= PFTM_MAX || 979 up->src.state > PF_TCPS_PROXY_DST || 980 up->dst.state > PF_TCPS_PROXY_DST) { 981 if (V_pf_status.debug >= PF_DEBUG_MISC) { 982 printf("pfsync_input: " 983 "PFSYNC_ACT_UPD_C: " 984 "invalid value\n"); 985 } 986 V_pfsyncstats.pfsyncs_badval++; 987 continue; 988 } 989 990 st = pf_find_state_byid(up->id, up->creatorid); 991 if (st == NULL) { 992 /* We don't have this state. Ask for it. */ 993 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); 994 pfsync_request_update(up->creatorid, up->id); 995 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); 996 continue; 997 } 998 999 if (st->state_flags & PFSTATE_ACK) { 1000 pfsync_undefer_state(st, 1); 1001 } 1002 1003 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 1004 sync = pfsync_upd_tcp(st, &up->src, &up->dst); 1005 else { 1006 sync = 0; 1007 1008 /* 1009 * Non-TCP protocol state machine always go 1010 * forwards 1011 */ 1012 if (st->src.state > up->src.state) 1013 sync++; 1014 else 1015 pf_state_peer_ntoh(&up->src, &st->src); 1016 if (st->dst.state > up->dst.state) 1017 sync++; 1018 else 1019 pf_state_peer_ntoh(&up->dst, &st->dst); 1020 } 1021 if (sync < 2) { 1022 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1023 pf_state_peer_ntoh(&up->dst, &st->dst); 1024 st->expire = time_uptime; 1025 st->timeout = up->timeout; 1026 } 1027 st->pfsync_time = time_uptime; 1028 1029 if (sync) { 1030 V_pfsyncstats.pfsyncs_stale++; 1031 1032 pfsync_update_state(st); 1033 PF_STATE_UNLOCK(st); 1034 pfsync_push_all(sc); 1035 continue; 1036 } 1037 PF_STATE_UNLOCK(st); 1038 } 1039 1040 return (len); 1041 } 1042 1043 static int 1044 pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags) 1045 { 1046 struct pfsync_upd_req *ur, *ura; 1047 struct mbuf *mp; 1048 int len = count * sizeof(*ur); 1049 int i, offp; 1050 1051 struct pf_kstate *st; 1052 1053 mp = m_pulldown(m, offset, len, &offp); 1054 if (mp == NULL) { 1055 V_pfsyncstats.pfsyncs_badlen++; 1056 return (-1); 1057 } 1058 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1059 1060 for (i = 0; i < count; i++) { 1061 ur = &ura[i]; 1062 1063 if (ur->id == 0 && ur->creatorid == 0) 1064 pfsync_bulk_start(); 1065 else { 1066 st = pf_find_state_byid(ur->id, ur->creatorid); 1067 if (st == NULL) { 1068 V_pfsyncstats.pfsyncs_badstate++; 1069 continue; 1070 } 1071 if (st->state_flags & PFSTATE_NOSYNC) { 1072 PF_STATE_UNLOCK(st); 1073 continue; 1074 } 1075 1076 pfsync_update_state_req(st); 1077 PF_STATE_UNLOCK(st); 1078 } 1079 } 1080 1081 return (len); 1082 } 1083 1084 static int 1085 pfsync_in_del(struct mbuf *m, int offset, int count, int flags) 1086 { 1087 struct mbuf *mp; 1088 struct pfsync_state *sa, *sp; 1089 struct pf_kstate *st; 1090 int len = count * sizeof(*sp); 1091 int offp, i; 1092 1093 mp = m_pulldown(m, offset, len, &offp); 1094 if (mp == NULL) { 1095 V_pfsyncstats.pfsyncs_badlen++; 1096 return (-1); 1097 } 1098 sa = (struct pfsync_state *)(mp->m_data + offp); 1099 1100 for (i = 0; i < count; i++) { 1101 sp = &sa[i]; 1102 1103 st = pf_find_state_byid(sp->id, sp->creatorid); 1104 if (st == NULL) { 1105 V_pfsyncstats.pfsyncs_badstate++; 1106 continue; 1107 } 1108 st->state_flags |= PFSTATE_NOSYNC; 1109 pf_unlink_state(st); 1110 } 1111 1112 return (len); 1113 } 1114 1115 static int 1116 pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags) 1117 { 1118 struct mbuf *mp; 1119 struct pfsync_del_c *sa, *sp; 1120 struct pf_kstate *st; 1121 int len = count * sizeof(*sp); 1122 int offp, i; 1123 1124 mp = m_pulldown(m, offset, len, &offp); 1125 if (mp == NULL) { 1126 V_pfsyncstats.pfsyncs_badlen++; 1127 return (-1); 1128 } 1129 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1130 1131 for (i = 0; i < count; i++) { 1132 sp = &sa[i]; 1133 1134 st = pf_find_state_byid(sp->id, sp->creatorid); 1135 if (st == NULL) { 1136 V_pfsyncstats.pfsyncs_badstate++; 1137 continue; 1138 } 1139 1140 st->state_flags |= PFSTATE_NOSYNC; 1141 pf_unlink_state(st); 1142 } 1143 1144 return (len); 1145 } 1146 1147 static int 1148 pfsync_in_bus(struct mbuf *m, int offset, int count, int flags) 1149 { 1150 struct pfsync_softc *sc = V_pfsyncif; 1151 struct pfsync_bus *bus; 1152 struct mbuf *mp; 1153 int len = count * sizeof(*bus); 1154 int offp; 1155 1156 PFSYNC_BLOCK(sc); 1157 1158 /* If we're not waiting for a bulk update, who cares. */ 1159 if (sc->sc_ureq_sent == 0) { 1160 PFSYNC_BUNLOCK(sc); 1161 return (len); 1162 } 1163 1164 mp = m_pulldown(m, offset, len, &offp); 1165 if (mp == NULL) { 1166 PFSYNC_BUNLOCK(sc); 1167 V_pfsyncstats.pfsyncs_badlen++; 1168 return (-1); 1169 } 1170 bus = (struct pfsync_bus *)(mp->m_data + offp); 1171 1172 switch (bus->status) { 1173 case PFSYNC_BUS_START: 1174 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1175 V_pf_limits[PF_LIMIT_STATES].limit / 1176 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / 1177 sizeof(struct pfsync_state)), 1178 pfsync_bulk_fail, sc); 1179 if (V_pf_status.debug >= PF_DEBUG_MISC) 1180 printf("pfsync: received bulk update start\n"); 1181 break; 1182 1183 case PFSYNC_BUS_END: 1184 if (time_uptime - ntohl(bus->endtime) >= 1185 sc->sc_ureq_sent) { 1186 /* that's it, we're happy */ 1187 sc->sc_ureq_sent = 0; 1188 sc->sc_bulk_tries = 0; 1189 callout_stop(&sc->sc_bulkfail_tmo); 1190 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 1191 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1192 "pfsync bulk done"); 1193 sc->sc_flags |= PFSYNCF_OK; 1194 if (V_pf_status.debug >= PF_DEBUG_MISC) 1195 printf("pfsync: received valid " 1196 "bulk update end\n"); 1197 } else { 1198 if (V_pf_status.debug >= PF_DEBUG_MISC) 1199 printf("pfsync: received invalid " 1200 "bulk update end: bad timestamp\n"); 1201 } 1202 break; 1203 } 1204 PFSYNC_BUNLOCK(sc); 1205 1206 return (len); 1207 } 1208 1209 static int 1210 pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags) 1211 { 1212 int len = count * sizeof(struct pfsync_tdb); 1213 1214 #if defined(IPSEC) 1215 struct pfsync_tdb *tp; 1216 struct mbuf *mp; 1217 int offp; 1218 int i; 1219 int s; 1220 1221 mp = m_pulldown(m, offset, len, &offp); 1222 if (mp == NULL) { 1223 V_pfsyncstats.pfsyncs_badlen++; 1224 return (-1); 1225 } 1226 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1227 1228 for (i = 0; i < count; i++) 1229 pfsync_update_net_tdb(&tp[i]); 1230 #endif 1231 1232 return (len); 1233 } 1234 1235 #if defined(IPSEC) 1236 /* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1237 static void 1238 pfsync_update_net_tdb(struct pfsync_tdb *pt) 1239 { 1240 struct tdb *tdb; 1241 int s; 1242 1243 /* check for invalid values */ 1244 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1245 (pt->dst.sa.sa_family != AF_INET && 1246 pt->dst.sa.sa_family != AF_INET6)) 1247 goto bad; 1248 1249 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1250 if (tdb) { 1251 pt->rpl = ntohl(pt->rpl); 1252 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes); 1253 1254 /* Neither replay nor byte counter should ever decrease. */ 1255 if (pt->rpl < tdb->tdb_rpl || 1256 pt->cur_bytes < tdb->tdb_cur_bytes) { 1257 goto bad; 1258 } 1259 1260 tdb->tdb_rpl = pt->rpl; 1261 tdb->tdb_cur_bytes = pt->cur_bytes; 1262 } 1263 return; 1264 1265 bad: 1266 if (V_pf_status.debug >= PF_DEBUG_MISC) 1267 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1268 "invalid value\n"); 1269 V_pfsyncstats.pfsyncs_badstate++; 1270 return; 1271 } 1272 #endif 1273 1274 static int 1275 pfsync_in_eof(struct mbuf *m, int offset, int count, int flags) 1276 { 1277 /* check if we are at the right place in the packet */ 1278 if (offset != m->m_pkthdr.len) 1279 V_pfsyncstats.pfsyncs_badlen++; 1280 1281 /* we're done. free and let the caller return */ 1282 m_freem(m); 1283 return (-1); 1284 } 1285 1286 static int 1287 pfsync_in_error(struct mbuf *m, int offset, int count, int flags) 1288 { 1289 V_pfsyncstats.pfsyncs_badact++; 1290 1291 m_freem(m); 1292 return (-1); 1293 } 1294 1295 static int 1296 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1297 struct route *rt) 1298 { 1299 m_freem(m); 1300 return (0); 1301 } 1302 1303 /* ARGSUSED */ 1304 static int 1305 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1306 { 1307 struct pfsync_softc *sc = ifp->if_softc; 1308 struct ifreq *ifr = (struct ifreq *)data; 1309 struct pfsyncreq pfsyncr; 1310 int error; 1311 int c; 1312 1313 switch (cmd) { 1314 case SIOCSIFFLAGS: 1315 PFSYNC_LOCK(sc); 1316 if (ifp->if_flags & IFF_UP) { 1317 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1318 PFSYNC_UNLOCK(sc); 1319 pfsync_pointers_init(); 1320 } else { 1321 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1322 PFSYNC_UNLOCK(sc); 1323 pfsync_pointers_uninit(); 1324 } 1325 break; 1326 case SIOCSIFMTU: 1327 if (!sc->sc_sync_if || 1328 ifr->ifr_mtu <= PFSYNC_MINPKT || 1329 ifr->ifr_mtu > sc->sc_sync_if->if_mtu) 1330 return (EINVAL); 1331 if (ifr->ifr_mtu < ifp->if_mtu) { 1332 for (c = 0; c < pfsync_buckets; c++) { 1333 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); 1334 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT) 1335 pfsync_sendout(1, c); 1336 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); 1337 } 1338 } 1339 ifp->if_mtu = ifr->ifr_mtu; 1340 break; 1341 case SIOCGETPFSYNC: 1342 bzero(&pfsyncr, sizeof(pfsyncr)); 1343 PFSYNC_LOCK(sc); 1344 if (sc->sc_sync_if) { 1345 strlcpy(pfsyncr.pfsyncr_syncdev, 1346 sc->sc_sync_if->if_xname, IFNAMSIZ); 1347 } 1348 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1349 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1350 pfsyncr.pfsyncr_defer = sc->sc_flags; 1351 PFSYNC_UNLOCK(sc); 1352 return (copyout(&pfsyncr, ifr_data_get_ptr(ifr), 1353 sizeof(pfsyncr))); 1354 1355 case SIOCSETPFSYNC: 1356 { 1357 struct in_mfilter *imf = NULL; 1358 struct ifnet *sifp; 1359 struct ip *ip; 1360 1361 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1362 return (error); 1363 if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr, 1364 sizeof(pfsyncr)))) 1365 return (error); 1366 1367 if (pfsyncr.pfsyncr_maxupdates > 255) 1368 return (EINVAL); 1369 1370 if (pfsyncr.pfsyncr_syncdev[0] == 0) 1371 sifp = NULL; 1372 else if ((sifp = ifunit_ref(pfsyncr.pfsyncr_syncdev)) == NULL) 1373 return (EINVAL); 1374 1375 if (sifp != NULL && ( 1376 pfsyncr.pfsyncr_syncpeer.s_addr == 0 || 1377 pfsyncr.pfsyncr_syncpeer.s_addr == 1378 htonl(INADDR_PFSYNC_GROUP))) 1379 imf = ip_mfilter_alloc(M_WAITOK, 0, 0); 1380 1381 PFSYNC_LOCK(sc); 1382 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1383 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP); 1384 else 1385 sc->sc_sync_peer.s_addr = 1386 pfsyncr.pfsyncr_syncpeer.s_addr; 1387 1388 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1389 if (pfsyncr.pfsyncr_defer & PFSYNCF_DEFER) { 1390 sc->sc_flags |= PFSYNCF_DEFER; 1391 V_pfsync_defer_ptr = pfsync_defer; 1392 } else { 1393 sc->sc_flags &= ~PFSYNCF_DEFER; 1394 V_pfsync_defer_ptr = NULL; 1395 } 1396 1397 if (sifp == NULL) { 1398 if (sc->sc_sync_if) 1399 if_rele(sc->sc_sync_if); 1400 sc->sc_sync_if = NULL; 1401 pfsync_multicast_cleanup(sc); 1402 PFSYNC_UNLOCK(sc); 1403 break; 1404 } 1405 1406 for (c = 0; c < pfsync_buckets; c++) { 1407 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); 1408 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT && 1409 (sifp->if_mtu < sc->sc_ifp->if_mtu || 1410 (sc->sc_sync_if != NULL && 1411 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 1412 sifp->if_mtu < MCLBYTES - sizeof(struct ip))) 1413 pfsync_sendout(1, c); 1414 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); 1415 } 1416 1417 pfsync_multicast_cleanup(sc); 1418 1419 if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) { 1420 error = pfsync_multicast_setup(sc, sifp, imf); 1421 if (error) { 1422 if_rele(sifp); 1423 ip_mfilter_free(imf); 1424 PFSYNC_UNLOCK(sc); 1425 return (error); 1426 } 1427 } 1428 if (sc->sc_sync_if) 1429 if_rele(sc->sc_sync_if); 1430 sc->sc_sync_if = sifp; 1431 1432 ip = &sc->sc_template; 1433 bzero(ip, sizeof(*ip)); 1434 ip->ip_v = IPVERSION; 1435 ip->ip_hl = sizeof(sc->sc_template) >> 2; 1436 ip->ip_tos = IPTOS_LOWDELAY; 1437 /* len and id are set later. */ 1438 ip->ip_off = htons(IP_DF); 1439 ip->ip_ttl = PFSYNC_DFLTTL; 1440 ip->ip_p = IPPROTO_PFSYNC; 1441 ip->ip_src.s_addr = INADDR_ANY; 1442 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr; 1443 1444 /* Request a full state table update. */ 1445 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 1446 (*carp_demote_adj_p)(V_pfsync_carp_adj, 1447 "pfsync bulk start"); 1448 sc->sc_flags &= ~PFSYNCF_OK; 1449 if (V_pf_status.debug >= PF_DEBUG_MISC) 1450 printf("pfsync: requesting bulk update\n"); 1451 PFSYNC_UNLOCK(sc); 1452 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); 1453 pfsync_request_update(0, 0); 1454 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); 1455 PFSYNC_BLOCK(sc); 1456 sc->sc_ureq_sent = time_uptime; 1457 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, 1458 sc); 1459 PFSYNC_BUNLOCK(sc); 1460 1461 break; 1462 } 1463 default: 1464 return (ENOTTY); 1465 } 1466 1467 return (0); 1468 } 1469 1470 static void 1471 pfsync_out_state(struct pf_kstate *st, void *buf) 1472 { 1473 struct pfsync_state *sp = buf; 1474 1475 pfsync_state_export(sp, st); 1476 } 1477 1478 static void 1479 pfsync_out_iack(struct pf_kstate *st, void *buf) 1480 { 1481 struct pfsync_ins_ack *iack = buf; 1482 1483 iack->id = st->id; 1484 iack->creatorid = st->creatorid; 1485 } 1486 1487 static void 1488 pfsync_out_upd_c(struct pf_kstate *st, void *buf) 1489 { 1490 struct pfsync_upd_c *up = buf; 1491 1492 bzero(up, sizeof(*up)); 1493 up->id = st->id; 1494 pf_state_peer_hton(&st->src, &up->src); 1495 pf_state_peer_hton(&st->dst, &up->dst); 1496 up->creatorid = st->creatorid; 1497 up->timeout = st->timeout; 1498 } 1499 1500 static void 1501 pfsync_out_del(struct pf_kstate *st, void *buf) 1502 { 1503 struct pfsync_del_c *dp = buf; 1504 1505 dp->id = st->id; 1506 dp->creatorid = st->creatorid; 1507 st->state_flags |= PFSTATE_NOSYNC; 1508 } 1509 1510 static void 1511 pfsync_drop(struct pfsync_softc *sc) 1512 { 1513 struct pf_kstate *st, *next; 1514 struct pfsync_upd_req_item *ur; 1515 struct pfsync_bucket *b; 1516 int c, q; 1517 1518 for (c = 0; c < pfsync_buckets; c++) { 1519 b = &sc->sc_buckets[c]; 1520 for (q = 0; q < PFSYNC_S_COUNT; q++) { 1521 if (TAILQ_EMPTY(&b->b_qs[q])) 1522 continue; 1523 1524 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { 1525 KASSERT(st->sync_state == q, 1526 ("%s: st->sync_state == q", 1527 __func__)); 1528 st->sync_state = PFSYNC_S_NONE; 1529 pf_release_state(st); 1530 } 1531 TAILQ_INIT(&b->b_qs[q]); 1532 } 1533 1534 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { 1535 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); 1536 free(ur, M_PFSYNC); 1537 } 1538 1539 b->b_len = PFSYNC_MINPKT; 1540 b->b_plus = NULL; 1541 } 1542 } 1543 1544 static void 1545 pfsync_sendout(int schedswi, int c) 1546 { 1547 struct pfsync_softc *sc = V_pfsyncif; 1548 struct ifnet *ifp = sc->sc_ifp; 1549 struct mbuf *m; 1550 struct ip *ip; 1551 struct pfsync_header *ph; 1552 struct pfsync_subheader *subh; 1553 struct pf_kstate *st, *st_next; 1554 struct pfsync_upd_req_item *ur; 1555 struct pfsync_bucket *b = &sc->sc_buckets[c]; 1556 int offset; 1557 int q, count = 0; 1558 1559 KASSERT(sc != NULL, ("%s: null sc", __func__)); 1560 KASSERT(b->b_len > PFSYNC_MINPKT, 1561 ("%s: sc_len %zu", __func__, b->b_len)); 1562 PFSYNC_BUCKET_LOCK_ASSERT(b); 1563 1564 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 1565 pfsync_drop(sc); 1566 return; 1567 } 1568 1569 m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR); 1570 if (m == NULL) { 1571 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 1572 V_pfsyncstats.pfsyncs_onomem++; 1573 return; 1574 } 1575 m->m_data += max_linkhdr; 1576 m->m_len = m->m_pkthdr.len = b->b_len; 1577 1578 /* build the ip header */ 1579 ip = (struct ip *)m->m_data; 1580 bcopy(&sc->sc_template, ip, sizeof(*ip)); 1581 offset = sizeof(*ip); 1582 1583 ip->ip_len = htons(m->m_pkthdr.len); 1584 ip_fillid(ip); 1585 1586 /* build the pfsync header */ 1587 ph = (struct pfsync_header *)(m->m_data + offset); 1588 bzero(ph, sizeof(*ph)); 1589 offset += sizeof(*ph); 1590 1591 ph->version = PFSYNC_VERSION; 1592 ph->len = htons(b->b_len - sizeof(*ip)); 1593 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 1594 1595 /* walk the queues */ 1596 for (q = 0; q < PFSYNC_S_COUNT; q++) { 1597 if (TAILQ_EMPTY(&b->b_qs[q])) 1598 continue; 1599 1600 subh = (struct pfsync_subheader *)(m->m_data + offset); 1601 offset += sizeof(*subh); 1602 1603 count = 0; 1604 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) { 1605 KASSERT(st->sync_state == q, 1606 ("%s: st->sync_state == q", 1607 __func__)); 1608 /* 1609 * XXXGL: some of write methods do unlocked reads 1610 * of state data :( 1611 */ 1612 pfsync_qs[q].write(st, m->m_data + offset); 1613 offset += pfsync_qs[q].len; 1614 st->sync_state = PFSYNC_S_NONE; 1615 pf_release_state(st); 1616 count++; 1617 } 1618 TAILQ_INIT(&b->b_qs[q]); 1619 1620 bzero(subh, sizeof(*subh)); 1621 subh->action = pfsync_qs[q].action; 1622 subh->count = htons(count); 1623 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; 1624 } 1625 1626 if (!TAILQ_EMPTY(&b->b_upd_req_list)) { 1627 subh = (struct pfsync_subheader *)(m->m_data + offset); 1628 offset += sizeof(*subh); 1629 1630 count = 0; 1631 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { 1632 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); 1633 1634 bcopy(&ur->ur_msg, m->m_data + offset, 1635 sizeof(ur->ur_msg)); 1636 offset += sizeof(ur->ur_msg); 1637 free(ur, M_PFSYNC); 1638 count++; 1639 } 1640 1641 bzero(subh, sizeof(*subh)); 1642 subh->action = PFSYNC_ACT_UPD_REQ; 1643 subh->count = htons(count); 1644 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count; 1645 } 1646 1647 /* has someone built a custom region for us to add? */ 1648 if (b->b_plus != NULL) { 1649 bcopy(b->b_plus, m->m_data + offset, b->b_pluslen); 1650 offset += b->b_pluslen; 1651 1652 b->b_plus = NULL; 1653 } 1654 1655 subh = (struct pfsync_subheader *)(m->m_data + offset); 1656 offset += sizeof(*subh); 1657 1658 bzero(subh, sizeof(*subh)); 1659 subh->action = PFSYNC_ACT_EOF; 1660 subh->count = htons(1); 1661 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++; 1662 1663 /* we're done, let's put it on the wire */ 1664 if (ifp->if_bpf) { 1665 m->m_data += sizeof(*ip); 1666 m->m_len = m->m_pkthdr.len = b->b_len - sizeof(*ip); 1667 BPF_MTAP(ifp, m); 1668 m->m_data -= sizeof(*ip); 1669 m->m_len = m->m_pkthdr.len = b->b_len; 1670 } 1671 1672 if (sc->sc_sync_if == NULL) { 1673 b->b_len = PFSYNC_MINPKT; 1674 m_freem(m); 1675 return; 1676 } 1677 1678 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); 1679 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); 1680 b->b_len = PFSYNC_MINPKT; 1681 1682 if (!_IF_QFULL(&b->b_snd)) 1683 _IF_ENQUEUE(&b->b_snd, m); 1684 else { 1685 m_freem(m); 1686 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); 1687 } 1688 if (schedswi) 1689 swi_sched(V_pfsync_swi_cookie, 0); 1690 } 1691 1692 static void 1693 pfsync_insert_state(struct pf_kstate *st) 1694 { 1695 struct pfsync_softc *sc = V_pfsyncif; 1696 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1697 1698 if (st->state_flags & PFSTATE_NOSYNC) 1699 return; 1700 1701 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || 1702 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 1703 st->state_flags |= PFSTATE_NOSYNC; 1704 return; 1705 } 1706 1707 KASSERT(st->sync_state == PFSYNC_S_NONE, 1708 ("%s: st->sync_state %u", __func__, st->sync_state)); 1709 1710 PFSYNC_BUCKET_LOCK(b); 1711 if (b->b_len == PFSYNC_MINPKT) 1712 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 1713 1714 pfsync_q_ins(st, PFSYNC_S_INS, true); 1715 PFSYNC_BUCKET_UNLOCK(b); 1716 1717 st->sync_updates = 0; 1718 } 1719 1720 static int 1721 pfsync_defer(struct pf_kstate *st, struct mbuf *m) 1722 { 1723 struct pfsync_softc *sc = V_pfsyncif; 1724 struct pfsync_deferral *pd; 1725 struct pfsync_bucket *b; 1726 1727 if (m->m_flags & (M_BCAST|M_MCAST)) 1728 return (0); 1729 1730 if (sc == NULL) 1731 return (0); 1732 1733 b = pfsync_get_bucket(sc, st); 1734 1735 PFSYNC_LOCK(sc); 1736 1737 if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) || 1738 !(sc->sc_flags & PFSYNCF_DEFER)) { 1739 PFSYNC_UNLOCK(sc); 1740 return (0); 1741 } 1742 1743 PFSYNC_BUCKET_LOCK(b); 1744 PFSYNC_UNLOCK(sc); 1745 1746 if (b->b_deferred >= 128) 1747 pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0); 1748 1749 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); 1750 if (pd == NULL) { 1751 PFSYNC_BUCKET_UNLOCK(b); 1752 return (0); 1753 } 1754 b->b_deferred++; 1755 1756 m->m_flags |= M_SKIP_FIREWALL; 1757 st->state_flags |= PFSTATE_ACK; 1758 1759 pd->pd_sc = sc; 1760 pd->pd_refs = 0; 1761 pd->pd_st = st; 1762 pf_ref_state(st); 1763 pd->pd_m = m; 1764 1765 TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry); 1766 callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED); 1767 callout_reset(&pd->pd_tmo, PFSYNC_DEFER_TIMEOUT, pfsync_defer_tmo, pd); 1768 1769 pfsync_push(b); 1770 PFSYNC_BUCKET_UNLOCK(b); 1771 1772 return (1); 1773 } 1774 1775 static void 1776 pfsync_undefer(struct pfsync_deferral *pd, int drop) 1777 { 1778 struct pfsync_softc *sc = pd->pd_sc; 1779 struct mbuf *m = pd->pd_m; 1780 struct pf_kstate *st = pd->pd_st; 1781 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1782 1783 PFSYNC_BUCKET_LOCK_ASSERT(b); 1784 1785 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 1786 b->b_deferred--; 1787 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 1788 free(pd, M_PFSYNC); 1789 pf_release_state(st); 1790 1791 if (drop) 1792 m_freem(m); 1793 else { 1794 _IF_ENQUEUE(&b->b_snd, m); 1795 pfsync_push(b); 1796 } 1797 } 1798 1799 static void 1800 pfsync_defer_tmo(void *arg) 1801 { 1802 struct epoch_tracker et; 1803 struct pfsync_deferral *pd = arg; 1804 struct pfsync_softc *sc = pd->pd_sc; 1805 struct mbuf *m = pd->pd_m; 1806 struct pf_kstate *st = pd->pd_st; 1807 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1808 1809 PFSYNC_BUCKET_LOCK_ASSERT(b); 1810 1811 NET_EPOCH_ENTER(et); 1812 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 1813 1814 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 1815 b->b_deferred--; 1816 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 1817 if (pd->pd_refs == 0) 1818 free(pd, M_PFSYNC); 1819 PFSYNC_BUCKET_UNLOCK(b); 1820 1821 ip_output(m, NULL, NULL, 0, NULL, NULL); 1822 1823 pf_release_state(st); 1824 1825 CURVNET_RESTORE(); 1826 NET_EPOCH_EXIT(et); 1827 } 1828 1829 static void 1830 pfsync_undefer_state(struct pf_kstate *st, int drop) 1831 { 1832 struct pfsync_softc *sc = V_pfsyncif; 1833 struct pfsync_deferral *pd; 1834 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1835 1836 PFSYNC_BUCKET_LOCK(b); 1837 1838 TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) { 1839 if (pd->pd_st == st) { 1840 if (callout_stop(&pd->pd_tmo) > 0) 1841 pfsync_undefer(pd, drop); 1842 1843 PFSYNC_BUCKET_UNLOCK(b); 1844 return; 1845 } 1846 } 1847 PFSYNC_BUCKET_UNLOCK(b); 1848 1849 panic("%s: unable to find deferred state", __func__); 1850 } 1851 1852 static struct pfsync_bucket* 1853 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st) 1854 { 1855 int c = PF_IDHASH(st) % pfsync_buckets; 1856 return &sc->sc_buckets[c]; 1857 } 1858 1859 static void 1860 pfsync_update_state(struct pf_kstate *st) 1861 { 1862 struct pfsync_softc *sc = V_pfsyncif; 1863 bool sync = false, ref = true; 1864 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1865 1866 PF_STATE_LOCK_ASSERT(st); 1867 PFSYNC_BUCKET_LOCK(b); 1868 1869 if (st->state_flags & PFSTATE_ACK) 1870 pfsync_undefer_state(st, 0); 1871 if (st->state_flags & PFSTATE_NOSYNC) { 1872 if (st->sync_state != PFSYNC_S_NONE) 1873 pfsync_q_del(st, true, b); 1874 PFSYNC_BUCKET_UNLOCK(b); 1875 return; 1876 } 1877 1878 if (b->b_len == PFSYNC_MINPKT) 1879 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 1880 1881 switch (st->sync_state) { 1882 case PFSYNC_S_UPD_C: 1883 case PFSYNC_S_UPD: 1884 case PFSYNC_S_INS: 1885 /* we're already handling it */ 1886 1887 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 1888 st->sync_updates++; 1889 if (st->sync_updates >= sc->sc_maxupdates) 1890 sync = true; 1891 } 1892 break; 1893 1894 case PFSYNC_S_IACK: 1895 pfsync_q_del(st, false, b); 1896 ref = false; 1897 /* FALLTHROUGH */ 1898 1899 case PFSYNC_S_NONE: 1900 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); 1901 st->sync_updates = 0; 1902 break; 1903 1904 default: 1905 panic("%s: unexpected sync state %d", __func__, st->sync_state); 1906 } 1907 1908 if (sync || (time_uptime - st->pfsync_time) < 2) 1909 pfsync_push(b); 1910 1911 PFSYNC_BUCKET_UNLOCK(b); 1912 } 1913 1914 static void 1915 pfsync_request_update(u_int32_t creatorid, u_int64_t id) 1916 { 1917 struct pfsync_softc *sc = V_pfsyncif; 1918 struct pfsync_bucket *b = &sc->sc_buckets[0]; 1919 struct pfsync_upd_req_item *item; 1920 size_t nlen = sizeof(struct pfsync_upd_req); 1921 1922 PFSYNC_BUCKET_LOCK_ASSERT(b); 1923 1924 /* 1925 * This code does a bit to prevent multiple update requests for the 1926 * same state being generated. It searches current subheader queue, 1927 * but it doesn't lookup into queue of already packed datagrams. 1928 */ 1929 TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry) 1930 if (item->ur_msg.id == id && 1931 item->ur_msg.creatorid == creatorid) 1932 return; 1933 1934 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT); 1935 if (item == NULL) 1936 return; /* XXX stats */ 1937 1938 item->ur_msg.id = id; 1939 item->ur_msg.creatorid = creatorid; 1940 1941 if (TAILQ_EMPTY(&b->b_upd_req_list)) 1942 nlen += sizeof(struct pfsync_subheader); 1943 1944 if (b->b_len + nlen > sc->sc_ifp->if_mtu) { 1945 pfsync_sendout(0, 0); 1946 1947 nlen = sizeof(struct pfsync_subheader) + 1948 sizeof(struct pfsync_upd_req); 1949 } 1950 1951 TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry); 1952 b->b_len += nlen; 1953 1954 pfsync_push(b); 1955 } 1956 1957 static bool 1958 pfsync_update_state_req(struct pf_kstate *st) 1959 { 1960 struct pfsync_softc *sc = V_pfsyncif; 1961 bool ref = true, full = false; 1962 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1963 1964 PF_STATE_LOCK_ASSERT(st); 1965 PFSYNC_BUCKET_LOCK(b); 1966 1967 if (st->state_flags & PFSTATE_NOSYNC) { 1968 if (st->sync_state != PFSYNC_S_NONE) 1969 pfsync_q_del(st, true, b); 1970 PFSYNC_BUCKET_UNLOCK(b); 1971 return (full); 1972 } 1973 1974 switch (st->sync_state) { 1975 case PFSYNC_S_UPD_C: 1976 case PFSYNC_S_IACK: 1977 pfsync_q_del(st, false, b); 1978 ref = false; 1979 /* FALLTHROUGH */ 1980 1981 case PFSYNC_S_NONE: 1982 pfsync_q_ins(st, PFSYNC_S_UPD, ref); 1983 pfsync_push(b); 1984 break; 1985 1986 case PFSYNC_S_INS: 1987 case PFSYNC_S_UPD: 1988 case PFSYNC_S_DEL: 1989 /* we're already handling it */ 1990 break; 1991 1992 default: 1993 panic("%s: unexpected sync state %d", __func__, st->sync_state); 1994 } 1995 1996 if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(struct pfsync_state)) 1997 full = true; 1998 1999 PFSYNC_BUCKET_UNLOCK(b); 2000 2001 return (full); 2002 } 2003 2004 static void 2005 pfsync_delete_state(struct pf_kstate *st) 2006 { 2007 struct pfsync_softc *sc = V_pfsyncif; 2008 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2009 bool ref = true; 2010 2011 PFSYNC_BUCKET_LOCK(b); 2012 if (st->state_flags & PFSTATE_ACK) 2013 pfsync_undefer_state(st, 1); 2014 if (st->state_flags & PFSTATE_NOSYNC) { 2015 if (st->sync_state != PFSYNC_S_NONE) 2016 pfsync_q_del(st, true, b); 2017 PFSYNC_BUCKET_UNLOCK(b); 2018 return; 2019 } 2020 2021 if (b->b_len == PFSYNC_MINPKT) 2022 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 2023 2024 switch (st->sync_state) { 2025 case PFSYNC_S_INS: 2026 /* We never got to tell the world so just forget about it. */ 2027 pfsync_q_del(st, true, b); 2028 break; 2029 2030 case PFSYNC_S_UPD_C: 2031 case PFSYNC_S_UPD: 2032 case PFSYNC_S_IACK: 2033 pfsync_q_del(st, false, b); 2034 ref = false; 2035 /* FALLTHROUGH */ 2036 2037 case PFSYNC_S_NONE: 2038 pfsync_q_ins(st, PFSYNC_S_DEL, ref); 2039 break; 2040 2041 default: 2042 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2043 } 2044 2045 PFSYNC_BUCKET_UNLOCK(b); 2046 } 2047 2048 static void 2049 pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2050 { 2051 struct { 2052 struct pfsync_subheader subh; 2053 struct pfsync_clr clr; 2054 } __packed r; 2055 2056 bzero(&r, sizeof(r)); 2057 2058 r.subh.action = PFSYNC_ACT_CLR; 2059 r.subh.count = htons(1); 2060 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++; 2061 2062 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2063 r.clr.creatorid = creatorid; 2064 2065 pfsync_send_plus(&r, sizeof(r)); 2066 } 2067 2068 static void 2069 pfsync_q_ins(struct pf_kstate *st, int q, bool ref) 2070 { 2071 struct pfsync_softc *sc = V_pfsyncif; 2072 size_t nlen = pfsync_qs[q].len; 2073 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2074 2075 PFSYNC_BUCKET_LOCK_ASSERT(b); 2076 2077 KASSERT(st->sync_state == PFSYNC_S_NONE, 2078 ("%s: st->sync_state %u", __func__, st->sync_state)); 2079 KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", 2080 b->b_len)); 2081 2082 if (TAILQ_EMPTY(&b->b_qs[q])) 2083 nlen += sizeof(struct pfsync_subheader); 2084 2085 if (b->b_len + nlen > sc->sc_ifp->if_mtu) { 2086 pfsync_sendout(1, b->b_id); 2087 2088 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2089 } 2090 2091 b->b_len += nlen; 2092 TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); 2093 st->sync_state = q; 2094 if (ref) 2095 pf_ref_state(st); 2096 } 2097 2098 static void 2099 pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b) 2100 { 2101 int q = st->sync_state; 2102 2103 PFSYNC_BUCKET_LOCK_ASSERT(b); 2104 KASSERT(st->sync_state != PFSYNC_S_NONE, 2105 ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); 2106 2107 b->b_len -= pfsync_qs[q].len; 2108 TAILQ_REMOVE(&b->b_qs[q], st, sync_list); 2109 st->sync_state = PFSYNC_S_NONE; 2110 if (unref) 2111 pf_release_state(st); 2112 2113 if (TAILQ_EMPTY(&b->b_qs[q])) 2114 b->b_len -= sizeof(struct pfsync_subheader); 2115 } 2116 2117 static void 2118 pfsync_bulk_start(void) 2119 { 2120 struct pfsync_softc *sc = V_pfsyncif; 2121 2122 if (V_pf_status.debug >= PF_DEBUG_MISC) 2123 printf("pfsync: received bulk update request\n"); 2124 2125 PFSYNC_BLOCK(sc); 2126 2127 sc->sc_ureq_received = time_uptime; 2128 sc->sc_bulk_hashid = 0; 2129 sc->sc_bulk_stateid = 0; 2130 pfsync_bulk_status(PFSYNC_BUS_START); 2131 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); 2132 PFSYNC_BUNLOCK(sc); 2133 } 2134 2135 static void 2136 pfsync_bulk_update(void *arg) 2137 { 2138 struct pfsync_softc *sc = arg; 2139 struct pf_kstate *s; 2140 int i; 2141 2142 PFSYNC_BLOCK_ASSERT(sc); 2143 CURVNET_SET(sc->sc_ifp->if_vnet); 2144 2145 /* 2146 * Start with last state from previous invocation. 2147 * It may had gone, in this case start from the 2148 * hash slot. 2149 */ 2150 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid); 2151 2152 if (s != NULL) 2153 i = PF_IDHASH(s); 2154 else 2155 i = sc->sc_bulk_hashid; 2156 2157 for (; i <= pf_hashmask; i++) { 2158 struct pf_idhash *ih = &V_pf_idhash[i]; 2159 2160 if (s != NULL) 2161 PF_HASHROW_ASSERT(ih); 2162 else { 2163 PF_HASHROW_LOCK(ih); 2164 s = LIST_FIRST(&ih->states); 2165 } 2166 2167 for (; s; s = LIST_NEXT(s, entry)) { 2168 if (s->sync_state == PFSYNC_S_NONE && 2169 s->timeout < PFTM_MAX && 2170 s->pfsync_time <= sc->sc_ureq_received) { 2171 if (pfsync_update_state_req(s)) { 2172 /* We've filled a packet. */ 2173 sc->sc_bulk_hashid = i; 2174 sc->sc_bulk_stateid = s->id; 2175 sc->sc_bulk_creatorid = s->creatorid; 2176 PF_HASHROW_UNLOCK(ih); 2177 callout_reset(&sc->sc_bulk_tmo, 1, 2178 pfsync_bulk_update, sc); 2179 goto full; 2180 } 2181 } 2182 } 2183 PF_HASHROW_UNLOCK(ih); 2184 } 2185 2186 /* We're done. */ 2187 pfsync_bulk_status(PFSYNC_BUS_END); 2188 full: 2189 CURVNET_RESTORE(); 2190 } 2191 2192 static void 2193 pfsync_bulk_status(u_int8_t status) 2194 { 2195 struct { 2196 struct pfsync_subheader subh; 2197 struct pfsync_bus bus; 2198 } __packed r; 2199 2200 struct pfsync_softc *sc = V_pfsyncif; 2201 2202 bzero(&r, sizeof(r)); 2203 2204 r.subh.action = PFSYNC_ACT_BUS; 2205 r.subh.count = htons(1); 2206 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++; 2207 2208 r.bus.creatorid = V_pf_status.hostid; 2209 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 2210 r.bus.status = status; 2211 2212 pfsync_send_plus(&r, sizeof(r)); 2213 } 2214 2215 static void 2216 pfsync_bulk_fail(void *arg) 2217 { 2218 struct pfsync_softc *sc = arg; 2219 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2220 2221 CURVNET_SET(sc->sc_ifp->if_vnet); 2222 2223 PFSYNC_BLOCK_ASSERT(sc); 2224 2225 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 2226 /* Try again */ 2227 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2228 pfsync_bulk_fail, V_pfsyncif); 2229 PFSYNC_BUCKET_LOCK(b); 2230 pfsync_request_update(0, 0); 2231 PFSYNC_BUCKET_UNLOCK(b); 2232 } else { 2233 /* Pretend like the transfer was ok. */ 2234 sc->sc_ureq_sent = 0; 2235 sc->sc_bulk_tries = 0; 2236 PFSYNC_LOCK(sc); 2237 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 2238 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 2239 "pfsync bulk fail"); 2240 sc->sc_flags |= PFSYNCF_OK; 2241 PFSYNC_UNLOCK(sc); 2242 if (V_pf_status.debug >= PF_DEBUG_MISC) 2243 printf("pfsync: failed to receive bulk update\n"); 2244 } 2245 2246 CURVNET_RESTORE(); 2247 } 2248 2249 static void 2250 pfsync_send_plus(void *plus, size_t pluslen) 2251 { 2252 struct pfsync_softc *sc = V_pfsyncif; 2253 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2254 2255 PFSYNC_BUCKET_LOCK(b); 2256 2257 if (b->b_len + pluslen > sc->sc_ifp->if_mtu) 2258 pfsync_sendout(1, b->b_id); 2259 2260 b->b_plus = plus; 2261 b->b_len += (b->b_pluslen = pluslen); 2262 2263 pfsync_sendout(1, b->b_id); 2264 PFSYNC_BUCKET_UNLOCK(b); 2265 } 2266 2267 static void 2268 pfsync_timeout(void *arg) 2269 { 2270 struct pfsync_bucket *b = arg; 2271 2272 CURVNET_SET(b->b_sc->sc_ifp->if_vnet); 2273 PFSYNC_BUCKET_LOCK(b); 2274 pfsync_push(b); 2275 PFSYNC_BUCKET_UNLOCK(b); 2276 CURVNET_RESTORE(); 2277 } 2278 2279 static void 2280 pfsync_push(struct pfsync_bucket *b) 2281 { 2282 2283 PFSYNC_BUCKET_LOCK_ASSERT(b); 2284 2285 b->b_flags |= PFSYNCF_BUCKET_PUSH; 2286 swi_sched(V_pfsync_swi_cookie, 0); 2287 } 2288 2289 static void 2290 pfsync_push_all(struct pfsync_softc *sc) 2291 { 2292 int c; 2293 struct pfsync_bucket *b; 2294 2295 for (c = 0; c < pfsync_buckets; c++) { 2296 b = &sc->sc_buckets[c]; 2297 2298 PFSYNC_BUCKET_LOCK(b); 2299 pfsync_push(b); 2300 PFSYNC_BUCKET_UNLOCK(b); 2301 } 2302 } 2303 2304 static void 2305 pfsyncintr(void *arg) 2306 { 2307 struct epoch_tracker et; 2308 struct pfsync_softc *sc = arg; 2309 struct pfsync_bucket *b; 2310 struct mbuf *m, *n; 2311 int c; 2312 2313 NET_EPOCH_ENTER(et); 2314 CURVNET_SET(sc->sc_ifp->if_vnet); 2315 2316 for (c = 0; c < pfsync_buckets; c++) { 2317 b = &sc->sc_buckets[c]; 2318 2319 PFSYNC_BUCKET_LOCK(b); 2320 if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) { 2321 pfsync_sendout(0, b->b_id); 2322 b->b_flags &= ~PFSYNCF_BUCKET_PUSH; 2323 } 2324 _IF_DEQUEUE_ALL(&b->b_snd, m); 2325 PFSYNC_BUCKET_UNLOCK(b); 2326 2327 for (; m != NULL; m = n) { 2328 n = m->m_nextpkt; 2329 m->m_nextpkt = NULL; 2330 2331 /* 2332 * We distinguish between a deferral packet and our 2333 * own pfsync packet based on M_SKIP_FIREWALL 2334 * flag. This is XXX. 2335 */ 2336 if (m->m_flags & M_SKIP_FIREWALL) 2337 ip_output(m, NULL, NULL, 0, NULL, NULL); 2338 else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, 2339 NULL) == 0) 2340 V_pfsyncstats.pfsyncs_opackets++; 2341 else 2342 V_pfsyncstats.pfsyncs_oerrors++; 2343 } 2344 } 2345 CURVNET_RESTORE(); 2346 NET_EPOCH_EXIT(et); 2347 } 2348 2349 static int 2350 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, 2351 struct in_mfilter *imf) 2352 { 2353 struct ip_moptions *imo = &sc->sc_imo; 2354 int error; 2355 2356 if (!(ifp->if_flags & IFF_MULTICAST)) 2357 return (EADDRNOTAVAIL); 2358 2359 imo->imo_multicast_vif = -1; 2360 2361 if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL, 2362 &imf->imf_inm)) != 0) 2363 return (error); 2364 2365 ip_mfilter_init(&imo->imo_head); 2366 ip_mfilter_insert(&imo->imo_head, imf); 2367 imo->imo_multicast_ifp = ifp; 2368 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 2369 imo->imo_multicast_loop = 0; 2370 2371 return (0); 2372 } 2373 2374 static void 2375 pfsync_multicast_cleanup(struct pfsync_softc *sc) 2376 { 2377 struct ip_moptions *imo = &sc->sc_imo; 2378 struct in_mfilter *imf; 2379 2380 while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) { 2381 ip_mfilter_remove(&imo->imo_head, imf); 2382 in_leavegroup(imf->imf_inm, NULL); 2383 ip_mfilter_free(imf); 2384 } 2385 imo->imo_multicast_ifp = NULL; 2386 } 2387 2388 void 2389 pfsync_detach_ifnet(struct ifnet *ifp) 2390 { 2391 struct pfsync_softc *sc = V_pfsyncif; 2392 2393 if (sc == NULL) 2394 return; 2395 2396 PFSYNC_LOCK(sc); 2397 2398 if (sc->sc_sync_if == ifp) { 2399 /* We don't need mutlicast cleanup here, because the interface 2400 * is going away. We do need to ensure we don't try to do 2401 * cleanup later. 2402 */ 2403 ip_mfilter_init(&sc->sc_imo.imo_head); 2404 sc->sc_imo.imo_multicast_ifp = NULL; 2405 sc->sc_sync_if = NULL; 2406 } 2407 2408 PFSYNC_UNLOCK(sc); 2409 } 2410 2411 static void 2412 pfsync_pointers_init(void) 2413 { 2414 2415 PF_RULES_WLOCK(); 2416 V_pfsync_state_import_ptr = pfsync_state_import; 2417 V_pfsync_insert_state_ptr = pfsync_insert_state; 2418 V_pfsync_update_state_ptr = pfsync_update_state; 2419 V_pfsync_delete_state_ptr = pfsync_delete_state; 2420 V_pfsync_clear_states_ptr = pfsync_clear_states; 2421 V_pfsync_defer_ptr = pfsync_defer; 2422 PF_RULES_WUNLOCK(); 2423 } 2424 2425 static void 2426 pfsync_pointers_uninit(void) 2427 { 2428 2429 PF_RULES_WLOCK(); 2430 V_pfsync_state_import_ptr = NULL; 2431 V_pfsync_insert_state_ptr = NULL; 2432 V_pfsync_update_state_ptr = NULL; 2433 V_pfsync_delete_state_ptr = NULL; 2434 V_pfsync_clear_states_ptr = NULL; 2435 V_pfsync_defer_ptr = NULL; 2436 PF_RULES_WUNLOCK(); 2437 } 2438 2439 static void 2440 vnet_pfsync_init(const void *unused __unused) 2441 { 2442 int error; 2443 2444 V_pfsync_cloner = if_clone_simple(pfsyncname, 2445 pfsync_clone_create, pfsync_clone_destroy, 1); 2446 error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif, 2447 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); 2448 if (error) { 2449 if_clone_detach(V_pfsync_cloner); 2450 log(LOG_INFO, "swi_add() failed in %s\n", __func__); 2451 } 2452 2453 pfsync_pointers_init(); 2454 } 2455 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY, 2456 vnet_pfsync_init, NULL); 2457 2458 static void 2459 vnet_pfsync_uninit(const void *unused __unused) 2460 { 2461 int ret __diagused; 2462 2463 pfsync_pointers_uninit(); 2464 2465 if_clone_detach(V_pfsync_cloner); 2466 ret = swi_remove(V_pfsync_swi_cookie); 2467 MPASS(ret == 0); 2468 ret = intr_event_destroy(V_pfsync_swi_ie); 2469 MPASS(ret == 0); 2470 } 2471 2472 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH, 2473 vnet_pfsync_uninit, NULL); 2474 2475 static int 2476 pfsync_init(void) 2477 { 2478 #ifdef INET 2479 int error; 2480 2481 pfsync_detach_ifnet_ptr = pfsync_detach_ifnet; 2482 2483 error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL); 2484 if (error) 2485 return (error); 2486 #endif 2487 2488 return (0); 2489 } 2490 2491 static void 2492 pfsync_uninit(void) 2493 { 2494 pfsync_detach_ifnet_ptr = NULL; 2495 2496 #ifdef INET 2497 ipproto_unregister(IPPROTO_PFSYNC); 2498 #endif 2499 } 2500 2501 static int 2502 pfsync_modevent(module_t mod, int type, void *data) 2503 { 2504 int error = 0; 2505 2506 switch (type) { 2507 case MOD_LOAD: 2508 error = pfsync_init(); 2509 break; 2510 case MOD_UNLOAD: 2511 pfsync_uninit(); 2512 break; 2513 default: 2514 error = EINVAL; 2515 break; 2516 } 2517 2518 return (error); 2519 } 2520 2521 static moduledata_t pfsync_mod = { 2522 pfsyncname, 2523 pfsync_modevent, 2524 0 2525 }; 2526 2527 #define PFSYNC_MODVER 1 2528 2529 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */ 2530 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); 2531 MODULE_VERSION(pfsync, PFSYNC_MODVER); 2532 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 2533