1 /*- 2 * SPDX-License-Identifier: (BSD-2-Clause AND ISC) 3 * 4 * Copyright (c) 2002 Michael Shalayeff 5 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /*- 31 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 32 * 33 * Permission to use, copy, modify, and distribute this software for any 34 * purpose with or without fee is hereby granted, provided that the above 35 * copyright notice and this permission notice appear in all copies. 36 * 37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 44 */ 45 46 /* 47 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ 48 * 49 * Revisions picked from OpenBSD after revision 1.110 import: 50 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input() 51 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 52 * 1.120, 1.175 - use monotonic time_uptime 53 * 1.122 - reduce number of updates for non-TCP sessions 54 * 1.125, 1.127 - rewrite merge or stale processing 55 * 1.128 - cleanups 56 * 1.146 - bzero() mbuf before sparsely filling it with data 57 * 1.170 - SIOCSIFMTU checks 58 * 1.126, 1.142 - deferred packets processing 59 * 1.173 - correct expire time processing 60 */ 61 62 #include <sys/cdefs.h> 63 #include "opt_inet.h" 64 #include "opt_inet6.h" 65 #include "opt_pf.h" 66 67 #include <sys/param.h> 68 #include <sys/bus.h> 69 #include <sys/endian.h> 70 #include <sys/interrupt.h> 71 #include <sys/kernel.h> 72 #include <sys/lock.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/mutex.h> 76 #include <sys/nv.h> 77 #include <sys/priv.h> 78 #include <sys/smp.h> 79 #include <sys/socket.h> 80 #include <sys/sockio.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 84 #include <net/bpf.h> 85 #include <net/if.h> 86 #include <net/if_var.h> 87 #include <net/if_clone.h> 88 #include <net/if_private.h> 89 #include <net/if_types.h> 90 #include <net/vnet.h> 91 #include <net/pfvar.h> 92 #include <net/route.h> 93 #include <net/if_pfsync.h> 94 95 #include <netinet/if_ether.h> 96 #include <netinet/in.h> 97 #include <netinet/in_var.h> 98 #include <netinet6/in6_var.h> 99 #include <netinet/ip.h> 100 #include <netinet/ip6.h> 101 #include <netinet/ip_carp.h> 102 #include <netinet/ip_var.h> 103 #include <netinet/tcp.h> 104 #include <netinet/tcp_fsm.h> 105 #include <netinet/tcp_seq.h> 106 107 #include <netinet/ip6.h> 108 #include <netinet6/ip6_var.h> 109 #include <netinet6/scope6_var.h> 110 111 #include <netpfil/pf/pfsync_nv.h> 112 113 struct pfsync_bucket; 114 struct pfsync_softc; 115 116 union inet_template { 117 struct ip ipv4; 118 struct ip6_hdr ipv6; 119 }; 120 121 #define PFSYNC_MINPKT ( \ 122 sizeof(union inet_template) + \ 123 sizeof(struct pfsync_header) + \ 124 sizeof(struct pfsync_subheader) ) 125 126 static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *, 127 struct pfsync_state_peer *); 128 static int pfsync_in_clr(struct mbuf *, int, int, int, int); 129 static int pfsync_in_ins(struct mbuf *, int, int, int, int); 130 static int pfsync_in_iack(struct mbuf *, int, int, int, int); 131 static int pfsync_in_upd(struct mbuf *, int, int, int, int); 132 static int pfsync_in_upd_c(struct mbuf *, int, int, int, int); 133 static int pfsync_in_ureq(struct mbuf *, int, int, int, int); 134 static int pfsync_in_del_c(struct mbuf *, int, int, int, int); 135 static int pfsync_in_bus(struct mbuf *, int, int, int, int); 136 static int pfsync_in_tdb(struct mbuf *, int, int, int, int); 137 static int pfsync_in_eof(struct mbuf *, int, int, int, int); 138 static int pfsync_in_error(struct mbuf *, int, int, int, int); 139 140 static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = { 141 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 142 pfsync_in_ins, /* PFSYNC_ACT_INS_1301 */ 143 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 144 pfsync_in_upd, /* PFSYNC_ACT_UPD_1301 */ 145 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 146 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 147 pfsync_in_error, /* PFSYNC_ACT_DEL */ 148 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 149 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 150 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 151 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 152 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 153 pfsync_in_eof, /* PFSYNC_ACT_EOF */ 154 pfsync_in_ins, /* PFSYNC_ACT_INS_1400 */ 155 pfsync_in_upd, /* PFSYNC_ACT_UPD_1400 */ 156 }; 157 158 struct pfsync_q { 159 void (*write)(struct pf_kstate *, void *); 160 size_t len; 161 u_int8_t action; 162 }; 163 164 /* We have the following sync queues */ 165 enum pfsync_q_id { 166 PFSYNC_Q_INS_1301, 167 PFSYNC_Q_INS_1400, 168 PFSYNC_Q_IACK, 169 PFSYNC_Q_UPD_1301, 170 PFSYNC_Q_UPD_1400, 171 PFSYNC_Q_UPD_C, 172 PFSYNC_Q_DEL_C, 173 PFSYNC_Q_COUNT, 174 }; 175 176 /* Functions for building messages for given queue */ 177 static void pfsync_out_state_1301(struct pf_kstate *, void *); 178 static void pfsync_out_state_1400(struct pf_kstate *, void *); 179 static void pfsync_out_iack(struct pf_kstate *, void *); 180 static void pfsync_out_upd_c(struct pf_kstate *, void *); 181 static void pfsync_out_del_c(struct pf_kstate *, void *); 182 183 /* Attach those functions to queue */ 184 static struct pfsync_q pfsync_qs[] = { 185 { pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 }, 186 { pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 }, 187 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 188 { pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 }, 189 { pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 }, 190 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 191 { pfsync_out_del_c, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 192 }; 193 194 /* Map queue to pf_kstate->sync_state */ 195 static u_int8_t pfsync_qid_sstate[] = { 196 PFSYNC_S_INS, /* PFSYNC_Q_INS_1301 */ 197 PFSYNC_S_INS, /* PFSYNC_Q_INS_1400 */ 198 PFSYNC_S_IACK, /* PFSYNC_Q_IACK */ 199 PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1301 */ 200 PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1400 */ 201 PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */ 202 PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */ 203 }; 204 205 /* Map pf_kstate->sync_state to queue */ 206 static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t); 207 208 static void pfsync_q_ins(struct pf_kstate *, int sync_state, bool); 209 static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *); 210 211 static void pfsync_update_state(struct pf_kstate *); 212 static void pfsync_tx(struct pfsync_softc *, struct mbuf *); 213 214 struct pfsync_upd_req_item { 215 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 216 struct pfsync_upd_req ur_msg; 217 }; 218 219 struct pfsync_deferral { 220 struct pfsync_softc *pd_sc; 221 TAILQ_ENTRY(pfsync_deferral) pd_entry; 222 struct callout pd_tmo; 223 224 struct pf_kstate *pd_st; 225 struct mbuf *pd_m; 226 }; 227 228 struct pfsync_bucket 229 { 230 int b_id; 231 struct pfsync_softc *b_sc; 232 struct mtx b_mtx; 233 struct callout b_tmo; 234 int b_flags; 235 #define PFSYNCF_BUCKET_PUSH 0x00000001 236 237 size_t b_len; 238 TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_Q_COUNT]; 239 TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list; 240 TAILQ_HEAD(, pfsync_deferral) b_deferrals; 241 u_int b_deferred; 242 void *b_plus; 243 size_t b_pluslen; 244 245 struct ifaltq b_snd; 246 }; 247 248 struct pfsync_softc { 249 /* Configuration */ 250 struct ifnet *sc_ifp; 251 struct ifnet *sc_sync_if; 252 struct ip_moptions sc_imo; 253 struct ip6_moptions sc_im6o; 254 struct sockaddr_storage sc_sync_peer; 255 uint32_t sc_flags; 256 uint8_t sc_maxupdates; 257 union inet_template sc_template; 258 struct mtx sc_mtx; 259 uint32_t sc_version; 260 261 /* Queued data */ 262 struct pfsync_bucket *sc_buckets; 263 264 /* Bulk update info */ 265 struct mtx sc_bulk_mtx; 266 uint32_t sc_ureq_sent; 267 int sc_bulk_tries; 268 uint32_t sc_ureq_received; 269 int sc_bulk_hashid; 270 uint64_t sc_bulk_stateid; 271 uint32_t sc_bulk_creatorid; 272 struct callout sc_bulk_tmo; 273 struct callout sc_bulkfail_tmo; 274 }; 275 276 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 277 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 278 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 279 280 #define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx) 281 #define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx) 282 #define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED) 283 284 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) 285 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) 286 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) 287 288 #define PFSYNC_DEFER_TIMEOUT 20 289 290 static const char pfsyncname[] = "pfsync"; 291 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data"); 292 VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL; 293 #define V_pfsyncif VNET(pfsyncif) 294 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL; 295 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) 296 VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie); 297 #define V_pfsync_swi_ie VNET(pfsync_swi_ie) 298 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats); 299 #define V_pfsyncstats VNET(pfsyncstats) 300 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW; 301 #define V_pfsync_carp_adj VNET(pfsync_carp_adj) 302 VNET_DEFINE_STATIC(unsigned int, pfsync_defer_timeout) = PFSYNC_DEFER_TIMEOUT; 303 #define V_pfsync_defer_timeout VNET(pfsync_defer_timeout) 304 305 static void pfsync_timeout(void *); 306 static void pfsync_push(struct pfsync_bucket *); 307 static void pfsync_push_all(struct pfsync_softc *); 308 static void pfsyncintr(void *); 309 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, 310 struct in_mfilter *, struct in6_mfilter *); 311 static void pfsync_multicast_cleanup(struct pfsync_softc *); 312 static void pfsync_pointers_init(void); 313 static void pfsync_pointers_uninit(void); 314 static int pfsync_init(void); 315 static void pfsync_uninit(void); 316 317 static unsigned long pfsync_buckets; 318 319 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 320 "PFSYNC"); 321 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, 322 &VNET_NAME(pfsyncstats), pfsyncstats, 323 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 324 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, 325 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 326 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN, 327 &pfsync_buckets, 0, "Number of pfsync hash buckets"); 328 SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW, 329 &VNET_NAME(pfsync_defer_timeout), 0, "Deferred packet timeout (in ms)"); 330 331 static int pfsync_clone_create(struct if_clone *, int, caddr_t); 332 static void pfsync_clone_destroy(struct ifnet *); 333 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 334 struct pf_state_peer *); 335 static int pfsyncoutput(struct ifnet *, struct mbuf *, 336 const struct sockaddr *, struct route *); 337 static int pfsyncioctl(struct ifnet *, u_long, caddr_t); 338 339 static int pfsync_defer(struct pf_kstate *, struct mbuf *); 340 static void pfsync_undefer(struct pfsync_deferral *, int); 341 static void pfsync_undefer_state_locked(struct pf_kstate *, int); 342 static void pfsync_undefer_state(struct pf_kstate *, int); 343 static void pfsync_defer_tmo(void *); 344 345 static void pfsync_request_update(u_int32_t, u_int64_t); 346 static bool pfsync_update_state_req(struct pf_kstate *); 347 348 static void pfsync_drop(struct pfsync_softc *); 349 static void pfsync_sendout(int, int); 350 static void pfsync_send_plus(void *, size_t); 351 352 static void pfsync_bulk_start(void); 353 static void pfsync_bulk_status(u_int8_t); 354 static void pfsync_bulk_update(void *); 355 static void pfsync_bulk_fail(void *); 356 357 static void pfsync_detach_ifnet(struct ifnet *); 358 359 static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *, 360 struct pfsync_kstatus *); 361 static int pfsync_kstatus_to_softc(struct pfsync_kstatus *, 362 struct pfsync_softc *); 363 364 #ifdef IPSEC 365 static void pfsync_update_net_tdb(struct pfsync_tdb *); 366 #endif 367 static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *, 368 struct pf_kstate *); 369 370 #define PFSYNC_MAX_BULKTRIES 12 371 372 VNET_DEFINE(struct if_clone *, pfsync_cloner); 373 #define V_pfsync_cloner VNET(pfsync_cloner) 374 375 const struct in6_addr in6addr_linklocal_pfsync_group = 376 {{{ 0xff, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0 }}}; 378 static int 379 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 380 { 381 struct pfsync_softc *sc; 382 struct ifnet *ifp; 383 struct pfsync_bucket *b; 384 int c; 385 enum pfsync_q_id q; 386 387 if (unit != 0) 388 return (EINVAL); 389 390 if (! pfsync_buckets) 391 pfsync_buckets = mp_ncpus * 2; 392 393 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); 394 sc->sc_flags |= PFSYNCF_OK; 395 sc->sc_maxupdates = 128; 396 sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT; 397 398 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 399 if (ifp == NULL) { 400 free(sc, M_PFSYNC); 401 return (ENOSPC); 402 } 403 if_initname(ifp, pfsyncname, unit); 404 ifp->if_softc = sc; 405 ifp->if_ioctl = pfsyncioctl; 406 ifp->if_output = pfsyncoutput; 407 ifp->if_type = IFT_PFSYNC; 408 ifp->if_hdrlen = sizeof(struct pfsync_header); 409 ifp->if_mtu = ETHERMTU; 410 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); 411 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); 412 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); 413 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); 414 415 if_attach(ifp); 416 417 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 418 419 sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets), 420 M_PFSYNC, M_ZERO | M_WAITOK); 421 for (c = 0; c < pfsync_buckets; c++) { 422 b = &sc->sc_buckets[c]; 423 mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF); 424 425 b->b_id = c; 426 b->b_sc = sc; 427 b->b_len = PFSYNC_MINPKT; 428 429 for (q = 0; q < PFSYNC_Q_COUNT; q++) 430 TAILQ_INIT(&b->b_qs[q]); 431 432 TAILQ_INIT(&b->b_upd_req_list); 433 TAILQ_INIT(&b->b_deferrals); 434 435 callout_init(&b->b_tmo, 1); 436 437 b->b_snd.ifq_maxlen = ifqmaxlen; 438 } 439 440 V_pfsyncif = sc; 441 442 return (0); 443 } 444 445 static void 446 pfsync_clone_destroy(struct ifnet *ifp) 447 { 448 struct pfsync_softc *sc = ifp->if_softc; 449 struct pfsync_bucket *b; 450 int c, ret; 451 452 for (c = 0; c < pfsync_buckets; c++) { 453 b = &sc->sc_buckets[c]; 454 /* 455 * At this stage, everything should have already been 456 * cleared by pfsync_uninit(), and we have only to 457 * drain callouts. 458 */ 459 PFSYNC_BUCKET_LOCK(b); 460 while (b->b_deferred > 0) { 461 struct pfsync_deferral *pd = 462 TAILQ_FIRST(&b->b_deferrals); 463 464 ret = callout_stop(&pd->pd_tmo); 465 PFSYNC_BUCKET_UNLOCK(b); 466 if (ret > 0) { 467 pfsync_undefer(pd, 1); 468 } else { 469 callout_drain(&pd->pd_tmo); 470 } 471 PFSYNC_BUCKET_LOCK(b); 472 } 473 MPASS(b->b_deferred == 0); 474 MPASS(TAILQ_EMPTY(&b->b_deferrals)); 475 PFSYNC_BUCKET_UNLOCK(b); 476 477 callout_drain(&b->b_tmo); 478 } 479 480 callout_drain(&sc->sc_bulkfail_tmo); 481 callout_drain(&sc->sc_bulk_tmo); 482 483 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 484 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 485 bpfdetach(ifp); 486 if_detach(ifp); 487 488 pfsync_drop(sc); 489 490 if_free(ifp); 491 pfsync_multicast_cleanup(sc); 492 mtx_destroy(&sc->sc_mtx); 493 mtx_destroy(&sc->sc_bulk_mtx); 494 495 free(sc->sc_buckets, M_PFSYNC); 496 free(sc, M_PFSYNC); 497 498 V_pfsyncif = NULL; 499 } 500 501 static int 502 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 503 struct pf_state_peer *d) 504 { 505 if (s->scrub.scrub_flag && d->scrub == NULL) { 506 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); 507 if (d->scrub == NULL) 508 return (ENOMEM); 509 } 510 511 return (0); 512 } 513 514 static int 515 pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version) 516 { 517 struct pfsync_softc *sc = V_pfsyncif; 518 #ifndef __NO_STRICT_ALIGNMENT 519 struct pfsync_state_key key[2]; 520 #endif 521 struct pfsync_state_key *kw, *ks; 522 struct pf_kstate *st = NULL; 523 struct pf_state_key *skw = NULL, *sks = NULL; 524 struct pf_krule *r = NULL; 525 struct pfi_kkif *kif; 526 int error; 527 528 PF_RULES_RASSERT(); 529 530 if (sp->pfs_1301.creatorid == 0) { 531 if (V_pf_status.debug >= PF_DEBUG_MISC) 532 printf("%s: invalid creator id: %08x\n", __func__, 533 ntohl(sp->pfs_1301.creatorid)); 534 return (EINVAL); 535 } 536 537 if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) { 538 if (V_pf_status.debug >= PF_DEBUG_MISC) 539 printf("%s: unknown interface: %s\n", __func__, 540 sp->pfs_1301.ifname); 541 if (flags & PFSYNC_SI_IOCTL) 542 return (EINVAL); 543 return (0); /* skip this state */ 544 } 545 546 /* 547 * If the ruleset checksums match or the state is coming from the ioctl, 548 * it's safe to associate the state with the rule of that number. 549 */ 550 if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) && 551 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) < 552 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 553 r = pf_main_ruleset.rules[ 554 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)]; 555 else 556 r = &V_pf_default_rule; 557 558 if ((r->max_states && 559 counter_u64_fetch(r->states_cur) >= r->max_states)) 560 goto cleanup; 561 562 /* 563 * XXXGL: consider M_WAITOK in ioctl path after. 564 */ 565 st = pf_alloc_state(M_NOWAIT); 566 if (__predict_false(st == NULL)) 567 goto cleanup; 568 569 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL) 570 goto cleanup; 571 572 #ifndef __NO_STRICT_ALIGNMENT 573 bcopy(&sp->pfs_1301.key, key, sizeof(struct pfsync_state_key) * 2); 574 kw = &key[PF_SK_WIRE]; 575 ks = &key[PF_SK_STACK]; 576 #else 577 kw = &sp->pfs_1301.key[PF_SK_WIRE]; 578 ks = &sp->pfs_1301.key[PF_SK_STACK]; 579 #endif 580 581 if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) || 582 PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) || 583 kw->port[0] != ks->port[0] || 584 kw->port[1] != ks->port[1]) { 585 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 586 if (sks == NULL) 587 goto cleanup; 588 } else 589 sks = skw; 590 591 /* allocate memory for scrub info */ 592 if (pfsync_alloc_scrub_memory(&sp->pfs_1301.src, &st->src) || 593 pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst)) 594 goto cleanup; 595 596 /* Copy to state key(s). */ 597 skw->addr[0] = kw->addr[0]; 598 skw->addr[1] = kw->addr[1]; 599 skw->port[0] = kw->port[0]; 600 skw->port[1] = kw->port[1]; 601 skw->proto = sp->pfs_1301.proto; 602 skw->af = sp->pfs_1301.af; 603 if (sks != skw) { 604 sks->addr[0] = ks->addr[0]; 605 sks->addr[1] = ks->addr[1]; 606 sks->port[0] = ks->port[0]; 607 sks->port[1] = ks->port[1]; 608 sks->proto = sp->pfs_1301.proto; 609 sks->af = sp->pfs_1301.af; 610 } 611 612 /* copy to state */ 613 bcopy(&sp->pfs_1301.rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 614 st->creation = time_uptime - ntohl(sp->pfs_1301.creation); 615 st->expire = time_uptime; 616 if (sp->pfs_1301.expire) { 617 uint32_t timeout; 618 619 timeout = r->timeout[sp->pfs_1301.timeout]; 620 if (!timeout) 621 timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout]; 622 623 /* sp->expire may have been adaptively scaled by export. */ 624 st->expire -= timeout - ntohl(sp->pfs_1301.expire); 625 } 626 627 st->direction = sp->pfs_1301.direction; 628 st->act.log = sp->pfs_1301.log; 629 st->timeout = sp->pfs_1301.timeout; 630 631 switch (msg_version) { 632 case PFSYNC_MSG_VERSION_1301: 633 st->state_flags = sp->pfs_1301.state_flags; 634 /* 635 * In FreeBSD 13 pfsync lacks many attributes. Copy them 636 * from the rule if possible. If rule can't be matched 637 * clear any set options as we can't recover their 638 * parameters. 639 */ 640 if (r == &V_pf_default_rule) { 641 st->state_flags &= ~PFSTATE_SETMASK; 642 } else { 643 /* 644 * Similar to pf_rule_to_actions(). This code 645 * won't set the actions properly if they come 646 * from multiple "match" rules as only rule 647 * creating the state is send over pfsync. 648 */ 649 st->act.qid = r->qid; 650 st->act.pqid = r->pqid; 651 st->act.rtableid = r->rtableid; 652 if (r->scrub_flags & PFSTATE_SETTOS) 653 st->act.set_tos = r->set_tos; 654 st->act.min_ttl = r->min_ttl; 655 st->act.max_mss = r->max_mss; 656 st->state_flags |= (r->scrub_flags & 657 (PFSTATE_NODF|PFSTATE_RANDOMID| 658 PFSTATE_SETTOS|PFSTATE_SCRUB_TCP| 659 PFSTATE_SETPRIO)); 660 if (r->dnpipe || r->dnrpipe) { 661 if (r->free_flags & PFRULE_DN_IS_PIPE) 662 st->state_flags |= PFSTATE_DN_IS_PIPE; 663 else 664 st->state_flags &= ~PFSTATE_DN_IS_PIPE; 665 } 666 st->act.dnpipe = r->dnpipe; 667 st->act.dnrpipe = r->dnrpipe; 668 } 669 break; 670 case PFSYNC_MSG_VERSION_1400: 671 st->state_flags = ntohs(sp->pfs_1400.state_flags); 672 st->act.qid = ntohs(sp->pfs_1400.qid); 673 st->act.pqid = ntohs(sp->pfs_1400.pqid); 674 st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe); 675 st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe); 676 st->act.rtableid = ntohl(sp->pfs_1400.rtableid); 677 st->act.min_ttl = sp->pfs_1400.min_ttl; 678 st->act.set_tos = sp->pfs_1400.set_tos; 679 st->act.max_mss = ntohs(sp->pfs_1400.max_mss); 680 st->act.set_prio[0] = sp->pfs_1400.set_prio[0]; 681 st->act.set_prio[1] = sp->pfs_1400.set_prio[1]; 682 st->rt = sp->pfs_1400.rt; 683 if (st->rt && (st->rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname)) == NULL) { 684 if (V_pf_status.debug >= PF_DEBUG_MISC) 685 printf("%s: unknown route interface: %s\n", 686 __func__, sp->pfs_1400.rt_ifname); 687 if (flags & PFSYNC_SI_IOCTL) 688 error = EINVAL; 689 else 690 error = 0; 691 goto cleanup_keys; 692 } 693 break; 694 default: 695 panic("%s: Unsupported pfsync_msg_version %d", 696 __func__, msg_version); 697 } 698 699 st->id = sp->pfs_1301.id; 700 st->creatorid = sp->pfs_1301.creatorid; 701 pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src); 702 pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); 703 704 st->rule.ptr = r; 705 st->nat_rule.ptr = NULL; 706 st->anchor.ptr = NULL; 707 708 st->pfsync_time = time_uptime; 709 st->sync_state = PFSYNC_S_NONE; 710 711 if (!(flags & PFSYNC_SI_IOCTL)) 712 st->state_flags |= PFSTATE_NOSYNC; 713 714 if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0) 715 goto cleanup_state; 716 717 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 718 counter_u64_add(r->states_cur, 1); 719 counter_u64_add(r->states_tot, 1); 720 721 if (!(flags & PFSYNC_SI_IOCTL)) { 722 st->state_flags &= ~PFSTATE_NOSYNC; 723 if (st->state_flags & PFSTATE_ACK) { 724 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 725 PFSYNC_BUCKET_LOCK(b); 726 pfsync_q_ins(st, PFSYNC_S_IACK, true); 727 PFSYNC_BUCKET_UNLOCK(b); 728 729 pfsync_push_all(sc); 730 } 731 } 732 st->state_flags &= ~PFSTATE_ACK; 733 PF_STATE_UNLOCK(st); 734 735 return (0); 736 737 cleanup: 738 error = ENOMEM; 739 cleanup_keys: 740 if (skw == sks) 741 sks = NULL; 742 uma_zfree(V_pf_state_key_z, skw); 743 uma_zfree(V_pf_state_key_z, sks); 744 745 cleanup_state: /* pf_state_insert() frees the state keys. */ 746 if (st) { 747 st->timeout = PFTM_UNLINKED; /* appease an assert */ 748 pf_free_state(st); 749 } 750 return (error); 751 } 752 753 #ifdef INET 754 static int 755 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused) 756 { 757 struct pfsync_softc *sc = V_pfsyncif; 758 struct mbuf *m = *mp; 759 struct ip *ip = mtod(m, struct ip *); 760 struct pfsync_header *ph; 761 struct pfsync_subheader subh; 762 763 int offset, len, flags = 0; 764 int rv; 765 uint16_t count; 766 767 PF_RULES_RLOCK_TRACKER; 768 769 *mp = NULL; 770 V_pfsyncstats.pfsyncs_ipackets++; 771 772 /* Verify that we have a sync interface configured. */ 773 if (!sc || !sc->sc_sync_if || !V_pf_status.running || 774 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 775 goto done; 776 777 /* verify that the packet came in on the right interface */ 778 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 779 V_pfsyncstats.pfsyncs_badif++; 780 goto done; 781 } 782 783 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 784 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 785 /* verify that the IP TTL is 255. */ 786 if (ip->ip_ttl != PFSYNC_DFLTTL) { 787 V_pfsyncstats.pfsyncs_badttl++; 788 goto done; 789 } 790 791 offset = ip->ip_hl << 2; 792 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 793 V_pfsyncstats.pfsyncs_hdrops++; 794 goto done; 795 } 796 797 if (offset + sizeof(*ph) > m->m_len) { 798 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 799 V_pfsyncstats.pfsyncs_hdrops++; 800 return (IPPROTO_DONE); 801 } 802 ip = mtod(m, struct ip *); 803 } 804 ph = (struct pfsync_header *)((char *)ip + offset); 805 806 /* verify the version */ 807 if (ph->version != PFSYNC_VERSION) { 808 V_pfsyncstats.pfsyncs_badver++; 809 goto done; 810 } 811 812 len = ntohs(ph->len) + offset; 813 if (m->m_pkthdr.len < len) { 814 V_pfsyncstats.pfsyncs_badlen++; 815 goto done; 816 } 817 818 /* 819 * Trusting pf_chksum during packet processing, as well as seeking 820 * in interface name tree, require holding PF_RULES_RLOCK(). 821 */ 822 PF_RULES_RLOCK(); 823 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 824 flags = PFSYNC_SI_CKSUM; 825 826 offset += sizeof(*ph); 827 while (offset <= len - sizeof(subh)) { 828 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 829 offset += sizeof(subh); 830 831 if (subh.action >= PFSYNC_ACT_MAX) { 832 V_pfsyncstats.pfsyncs_badact++; 833 PF_RULES_RUNLOCK(); 834 goto done; 835 } 836 837 count = ntohs(subh.count); 838 V_pfsyncstats.pfsyncs_iacts[subh.action] += count; 839 rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action); 840 if (rv == -1) { 841 PF_RULES_RUNLOCK(); 842 return (IPPROTO_DONE); 843 } 844 845 offset += rv; 846 } 847 PF_RULES_RUNLOCK(); 848 849 done: 850 m_freem(m); 851 return (IPPROTO_DONE); 852 } 853 #endif 854 855 #ifdef INET6 856 static int 857 pfsync6_input(struct mbuf **mp, int *offp __unused, int proto __unused) 858 { 859 struct pfsync_softc *sc = V_pfsyncif; 860 struct mbuf *m = *mp; 861 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 862 struct pfsync_header *ph; 863 struct pfsync_subheader subh; 864 865 int offset, len, flags = 0; 866 int rv; 867 uint16_t count; 868 869 PF_RULES_RLOCK_TRACKER; 870 871 *mp = NULL; 872 V_pfsyncstats.pfsyncs_ipackets++; 873 874 /* Verify that we have a sync interface configured. */ 875 if (!sc || !sc->sc_sync_if || !V_pf_status.running || 876 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 877 goto done; 878 879 /* verify that the packet came in on the right interface */ 880 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 881 V_pfsyncstats.pfsyncs_badif++; 882 goto done; 883 } 884 885 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 886 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 887 /* verify that the IP TTL is 255. */ 888 if (ip6->ip6_hlim != PFSYNC_DFLTTL) { 889 V_pfsyncstats.pfsyncs_badttl++; 890 goto done; 891 } 892 893 894 offset = sizeof(*ip6); 895 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 896 V_pfsyncstats.pfsyncs_hdrops++; 897 goto done; 898 } 899 900 if (offset + sizeof(*ph) > m->m_len) { 901 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 902 V_pfsyncstats.pfsyncs_hdrops++; 903 return (IPPROTO_DONE); 904 } 905 ip6 = mtod(m, struct ip6_hdr *); 906 } 907 ph = (struct pfsync_header *)((char *)ip6 + offset); 908 909 /* verify the version */ 910 if (ph->version != PFSYNC_VERSION) { 911 V_pfsyncstats.pfsyncs_badver++; 912 goto done; 913 } 914 915 len = ntohs(ph->len) + offset; 916 if (m->m_pkthdr.len < len) { 917 V_pfsyncstats.pfsyncs_badlen++; 918 goto done; 919 } 920 921 /* 922 * Trusting pf_chksum during packet processing, as well as seeking 923 * in interface name tree, require holding PF_RULES_RLOCK(). 924 */ 925 PF_RULES_RLOCK(); 926 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 927 flags = PFSYNC_SI_CKSUM; 928 929 offset += sizeof(*ph); 930 while (offset <= len - sizeof(subh)) { 931 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 932 offset += sizeof(subh); 933 934 if (subh.action >= PFSYNC_ACT_MAX) { 935 V_pfsyncstats.pfsyncs_badact++; 936 PF_RULES_RUNLOCK(); 937 goto done; 938 } 939 940 count = ntohs(subh.count); 941 V_pfsyncstats.pfsyncs_iacts[subh.action] += count; 942 rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action); 943 if (rv == -1) { 944 PF_RULES_RUNLOCK(); 945 return (IPPROTO_DONE); 946 } 947 948 offset += rv; 949 } 950 PF_RULES_RUNLOCK(); 951 952 done: 953 m_freem(m); 954 return (IPPROTO_DONE); 955 } 956 #endif 957 958 static int 959 pfsync_in_clr(struct mbuf *m, int offset, int count, int flags, int action) 960 { 961 struct pfsync_clr *clr; 962 struct mbuf *mp; 963 int len = sizeof(*clr) * count; 964 int i, offp; 965 u_int32_t creatorid; 966 967 mp = m_pulldown(m, offset, len, &offp); 968 if (mp == NULL) { 969 V_pfsyncstats.pfsyncs_badlen++; 970 return (-1); 971 } 972 clr = (struct pfsync_clr *)(mp->m_data + offp); 973 974 for (i = 0; i < count; i++) { 975 creatorid = clr[i].creatorid; 976 977 if (clr[i].ifname[0] != '\0' && 978 pfi_kkif_find(clr[i].ifname) == NULL) 979 continue; 980 981 for (int i = 0; i <= pf_hashmask; i++) { 982 struct pf_idhash *ih = &V_pf_idhash[i]; 983 struct pf_kstate *s; 984 relock: 985 PF_HASHROW_LOCK(ih); 986 LIST_FOREACH(s, &ih->states, entry) { 987 if (s->creatorid == creatorid) { 988 s->state_flags |= PFSTATE_NOSYNC; 989 pf_unlink_state(s); 990 goto relock; 991 } 992 } 993 PF_HASHROW_UNLOCK(ih); 994 } 995 } 996 997 return (len); 998 } 999 1000 static int 1001 pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action) 1002 { 1003 struct mbuf *mp; 1004 union pfsync_state_union *sa, *sp; 1005 int i, offp, len, msg_version; 1006 1007 switch (action) { 1008 case PFSYNC_ACT_INS_1301: 1009 len = sizeof(struct pfsync_state_1301) * count; 1010 msg_version = PFSYNC_MSG_VERSION_1301; 1011 break; 1012 case PFSYNC_ACT_INS_1400: 1013 len = sizeof(struct pfsync_state_1400) * count; 1014 msg_version = PFSYNC_MSG_VERSION_1400; 1015 break; 1016 default: 1017 V_pfsyncstats.pfsyncs_badact++; 1018 return (-1); 1019 } 1020 1021 mp = m_pulldown(m, offset, len, &offp); 1022 if (mp == NULL) { 1023 V_pfsyncstats.pfsyncs_badlen++; 1024 return (-1); 1025 } 1026 sa = (union pfsync_state_union *)(mp->m_data + offp); 1027 1028 for (i = 0; i < count; i++) { 1029 sp = &sa[i]; 1030 1031 /* Check for invalid values. */ 1032 if (sp->pfs_1301.timeout >= PFTM_MAX || 1033 sp->pfs_1301.src.state > PF_TCPS_PROXY_DST || 1034 sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST || 1035 sp->pfs_1301.direction > PF_OUT || 1036 (sp->pfs_1301.af != AF_INET && 1037 sp->pfs_1301.af != AF_INET6)) { 1038 if (V_pf_status.debug >= PF_DEBUG_MISC) 1039 printf("%s: invalid value\n", __func__); 1040 V_pfsyncstats.pfsyncs_badval++; 1041 continue; 1042 } 1043 1044 if (pfsync_state_import(sp, flags, msg_version) == ENOMEM) 1045 /* Drop out, but process the rest of the actions. */ 1046 break; 1047 } 1048 1049 return (len); 1050 } 1051 1052 static int 1053 pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action) 1054 { 1055 struct pfsync_ins_ack *ia, *iaa; 1056 struct pf_kstate *st; 1057 1058 struct mbuf *mp; 1059 int len = count * sizeof(*ia); 1060 int offp, i; 1061 1062 mp = m_pulldown(m, offset, len, &offp); 1063 if (mp == NULL) { 1064 V_pfsyncstats.pfsyncs_badlen++; 1065 return (-1); 1066 } 1067 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 1068 1069 for (i = 0; i < count; i++) { 1070 ia = &iaa[i]; 1071 1072 st = pf_find_state_byid(ia->id, ia->creatorid); 1073 if (st == NULL) 1074 continue; 1075 1076 if (st->state_flags & PFSTATE_ACK) { 1077 pfsync_undefer_state(st, 0); 1078 } 1079 PF_STATE_UNLOCK(st); 1080 } 1081 /* 1082 * XXX this is not yet implemented, but we know the size of the 1083 * message so we can skip it. 1084 */ 1085 1086 return (count * sizeof(struct pfsync_ins_ack)); 1087 } 1088 1089 static int 1090 pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src, 1091 struct pfsync_state_peer *dst) 1092 { 1093 int sync = 0; 1094 1095 PF_STATE_LOCK_ASSERT(st); 1096 1097 /* 1098 * The state should never go backwards except 1099 * for syn-proxy states. Neither should the 1100 * sequence window slide backwards. 1101 */ 1102 if ((st->src.state > src->state && 1103 (st->src.state < PF_TCPS_PROXY_SRC || 1104 src->state >= PF_TCPS_PROXY_SRC)) || 1105 1106 (st->src.state == src->state && 1107 SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))) 1108 sync++; 1109 else 1110 pf_state_peer_ntoh(src, &st->src); 1111 1112 if ((st->dst.state > dst->state) || 1113 1114 (st->dst.state >= TCPS_SYN_SENT && 1115 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))) 1116 sync++; 1117 else 1118 pf_state_peer_ntoh(dst, &st->dst); 1119 1120 return (sync); 1121 } 1122 1123 static int 1124 pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action) 1125 { 1126 struct pfsync_softc *sc = V_pfsyncif; 1127 union pfsync_state_union *sa, *sp; 1128 struct pf_kstate *st; 1129 struct mbuf *mp; 1130 int sync, offp, i, len, msg_version; 1131 1132 switch (action) { 1133 case PFSYNC_ACT_UPD_1301: 1134 len = sizeof(struct pfsync_state_1301) * count; 1135 msg_version = PFSYNC_MSG_VERSION_1301; 1136 break; 1137 case PFSYNC_ACT_UPD_1400: 1138 len = sizeof(struct pfsync_state_1400) * count; 1139 msg_version = PFSYNC_MSG_VERSION_1400; 1140 break; 1141 default: 1142 V_pfsyncstats.pfsyncs_badact++; 1143 return (-1); 1144 } 1145 1146 mp = m_pulldown(m, offset, len, &offp); 1147 if (mp == NULL) { 1148 V_pfsyncstats.pfsyncs_badlen++; 1149 return (-1); 1150 } 1151 sa = (union pfsync_state_union *)(mp->m_data + offp); 1152 1153 for (i = 0; i < count; i++) { 1154 sp = &sa[i]; 1155 1156 /* check for invalid values */ 1157 if (sp->pfs_1301.timeout >= PFTM_MAX || 1158 sp->pfs_1301.src.state > PF_TCPS_PROXY_DST || 1159 sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) { 1160 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1161 printf("pfsync_input: PFSYNC_ACT_UPD: " 1162 "invalid value\n"); 1163 } 1164 V_pfsyncstats.pfsyncs_badval++; 1165 continue; 1166 } 1167 1168 st = pf_find_state_byid(sp->pfs_1301.id, sp->pfs_1301.creatorid); 1169 if (st == NULL) { 1170 /* insert the update */ 1171 if (pfsync_state_import(sp, flags, msg_version)) 1172 V_pfsyncstats.pfsyncs_badstate++; 1173 continue; 1174 } 1175 1176 if (st->state_flags & PFSTATE_ACK) { 1177 pfsync_undefer_state(st, 1); 1178 } 1179 1180 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 1181 sync = pfsync_upd_tcp(st, &sp->pfs_1301.src, &sp->pfs_1301.dst); 1182 else { 1183 sync = 0; 1184 1185 /* 1186 * Non-TCP protocol state machine always go 1187 * forwards 1188 */ 1189 if (st->src.state > sp->pfs_1301.src.state) 1190 sync++; 1191 else 1192 pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src); 1193 if (st->dst.state > sp->pfs_1301.dst.state) 1194 sync++; 1195 else 1196 pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); 1197 } 1198 if (sync < 2) { 1199 pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst); 1200 pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); 1201 st->expire = time_uptime; 1202 st->timeout = sp->pfs_1301.timeout; 1203 } 1204 st->pfsync_time = time_uptime; 1205 1206 if (sync) { 1207 V_pfsyncstats.pfsyncs_stale++; 1208 1209 pfsync_update_state(st); 1210 PF_STATE_UNLOCK(st); 1211 pfsync_push_all(sc); 1212 continue; 1213 } 1214 PF_STATE_UNLOCK(st); 1215 } 1216 1217 return (len); 1218 } 1219 1220 static int 1221 pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags, int action) 1222 { 1223 struct pfsync_softc *sc = V_pfsyncif; 1224 struct pfsync_upd_c *ua, *up; 1225 struct pf_kstate *st; 1226 int len = count * sizeof(*up); 1227 int sync; 1228 struct mbuf *mp; 1229 int offp, i; 1230 1231 mp = m_pulldown(m, offset, len, &offp); 1232 if (mp == NULL) { 1233 V_pfsyncstats.pfsyncs_badlen++; 1234 return (-1); 1235 } 1236 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 1237 1238 for (i = 0; i < count; i++) { 1239 up = &ua[i]; 1240 1241 /* check for invalid values */ 1242 if (up->timeout >= PFTM_MAX || 1243 up->src.state > PF_TCPS_PROXY_DST || 1244 up->dst.state > PF_TCPS_PROXY_DST) { 1245 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1246 printf("pfsync_input: " 1247 "PFSYNC_ACT_UPD_C: " 1248 "invalid value\n"); 1249 } 1250 V_pfsyncstats.pfsyncs_badval++; 1251 continue; 1252 } 1253 1254 st = pf_find_state_byid(up->id, up->creatorid); 1255 if (st == NULL) { 1256 /* We don't have this state. Ask for it. */ 1257 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); 1258 pfsync_request_update(up->creatorid, up->id); 1259 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); 1260 continue; 1261 } 1262 1263 if (st->state_flags & PFSTATE_ACK) { 1264 pfsync_undefer_state(st, 1); 1265 } 1266 1267 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 1268 sync = pfsync_upd_tcp(st, &up->src, &up->dst); 1269 else { 1270 sync = 0; 1271 1272 /* 1273 * Non-TCP protocol state machine always go 1274 * forwards 1275 */ 1276 if (st->src.state > up->src.state) 1277 sync++; 1278 else 1279 pf_state_peer_ntoh(&up->src, &st->src); 1280 if (st->dst.state > up->dst.state) 1281 sync++; 1282 else 1283 pf_state_peer_ntoh(&up->dst, &st->dst); 1284 } 1285 if (sync < 2) { 1286 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1287 pf_state_peer_ntoh(&up->dst, &st->dst); 1288 st->expire = time_uptime; 1289 st->timeout = up->timeout; 1290 } 1291 st->pfsync_time = time_uptime; 1292 1293 if (sync) { 1294 V_pfsyncstats.pfsyncs_stale++; 1295 1296 pfsync_update_state(st); 1297 PF_STATE_UNLOCK(st); 1298 pfsync_push_all(sc); 1299 continue; 1300 } 1301 PF_STATE_UNLOCK(st); 1302 } 1303 1304 return (len); 1305 } 1306 1307 static int 1308 pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags, int action) 1309 { 1310 struct pfsync_upd_req *ur, *ura; 1311 struct mbuf *mp; 1312 int len = count * sizeof(*ur); 1313 int i, offp; 1314 1315 struct pf_kstate *st; 1316 1317 mp = m_pulldown(m, offset, len, &offp); 1318 if (mp == NULL) { 1319 V_pfsyncstats.pfsyncs_badlen++; 1320 return (-1); 1321 } 1322 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1323 1324 for (i = 0; i < count; i++) { 1325 ur = &ura[i]; 1326 1327 if (ur->id == 0 && ur->creatorid == 0) 1328 pfsync_bulk_start(); 1329 else { 1330 st = pf_find_state_byid(ur->id, ur->creatorid); 1331 if (st == NULL) { 1332 V_pfsyncstats.pfsyncs_badstate++; 1333 continue; 1334 } 1335 if (st->state_flags & PFSTATE_NOSYNC) { 1336 PF_STATE_UNLOCK(st); 1337 continue; 1338 } 1339 1340 pfsync_update_state_req(st); 1341 PF_STATE_UNLOCK(st); 1342 } 1343 } 1344 1345 return (len); 1346 } 1347 1348 static int 1349 pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags, int action) 1350 { 1351 struct mbuf *mp; 1352 struct pfsync_del_c *sa, *sp; 1353 struct pf_kstate *st; 1354 int len = count * sizeof(*sp); 1355 int offp, i; 1356 1357 mp = m_pulldown(m, offset, len, &offp); 1358 if (mp == NULL) { 1359 V_pfsyncstats.pfsyncs_badlen++; 1360 return (-1); 1361 } 1362 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1363 1364 for (i = 0; i < count; i++) { 1365 sp = &sa[i]; 1366 1367 st = pf_find_state_byid(sp->id, sp->creatorid); 1368 if (st == NULL) { 1369 V_pfsyncstats.pfsyncs_badstate++; 1370 continue; 1371 } 1372 1373 st->state_flags |= PFSTATE_NOSYNC; 1374 pf_unlink_state(st); 1375 } 1376 1377 return (len); 1378 } 1379 1380 static int 1381 pfsync_in_bus(struct mbuf *m, int offset, int count, int flags, int action) 1382 { 1383 struct pfsync_softc *sc = V_pfsyncif; 1384 struct pfsync_bus *bus; 1385 struct mbuf *mp; 1386 int len = count * sizeof(*bus); 1387 int offp; 1388 1389 PFSYNC_BLOCK(sc); 1390 1391 /* If we're not waiting for a bulk update, who cares. */ 1392 if (sc->sc_ureq_sent == 0) { 1393 PFSYNC_BUNLOCK(sc); 1394 return (len); 1395 } 1396 1397 mp = m_pulldown(m, offset, len, &offp); 1398 if (mp == NULL) { 1399 PFSYNC_BUNLOCK(sc); 1400 V_pfsyncstats.pfsyncs_badlen++; 1401 return (-1); 1402 } 1403 bus = (struct pfsync_bus *)(mp->m_data + offp); 1404 1405 switch (bus->status) { 1406 case PFSYNC_BUS_START: 1407 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1408 V_pf_limits[PF_LIMIT_STATES].limit / 1409 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / 1410 sizeof(union pfsync_state_union)), 1411 pfsync_bulk_fail, sc); 1412 if (V_pf_status.debug >= PF_DEBUG_MISC) 1413 printf("pfsync: received bulk update start\n"); 1414 break; 1415 1416 case PFSYNC_BUS_END: 1417 if (time_uptime - ntohl(bus->endtime) >= 1418 sc->sc_ureq_sent) { 1419 /* that's it, we're happy */ 1420 sc->sc_ureq_sent = 0; 1421 sc->sc_bulk_tries = 0; 1422 callout_stop(&sc->sc_bulkfail_tmo); 1423 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 1424 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1425 "pfsync bulk done"); 1426 sc->sc_flags |= PFSYNCF_OK; 1427 if (V_pf_status.debug >= PF_DEBUG_MISC) 1428 printf("pfsync: received valid " 1429 "bulk update end\n"); 1430 } else { 1431 if (V_pf_status.debug >= PF_DEBUG_MISC) 1432 printf("pfsync: received invalid " 1433 "bulk update end: bad timestamp\n"); 1434 } 1435 break; 1436 } 1437 PFSYNC_BUNLOCK(sc); 1438 1439 return (len); 1440 } 1441 1442 static int 1443 pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags, int action) 1444 { 1445 int len = count * sizeof(struct pfsync_tdb); 1446 1447 #if defined(IPSEC) 1448 struct pfsync_tdb *tp; 1449 struct mbuf *mp; 1450 int offp; 1451 int i; 1452 int s; 1453 1454 mp = m_pulldown(m, offset, len, &offp); 1455 if (mp == NULL) { 1456 V_pfsyncstats.pfsyncs_badlen++; 1457 return (-1); 1458 } 1459 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1460 1461 for (i = 0; i < count; i++) 1462 pfsync_update_net_tdb(&tp[i]); 1463 #endif 1464 1465 return (len); 1466 } 1467 1468 #if defined(IPSEC) 1469 /* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1470 static void 1471 pfsync_update_net_tdb(struct pfsync_tdb *pt) 1472 { 1473 struct tdb *tdb; 1474 int s; 1475 1476 /* check for invalid values */ 1477 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1478 (pt->dst.sa.sa_family != AF_INET && 1479 pt->dst.sa.sa_family != AF_INET6)) 1480 goto bad; 1481 1482 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1483 if (tdb) { 1484 pt->rpl = ntohl(pt->rpl); 1485 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes); 1486 1487 /* Neither replay nor byte counter should ever decrease. */ 1488 if (pt->rpl < tdb->tdb_rpl || 1489 pt->cur_bytes < tdb->tdb_cur_bytes) { 1490 goto bad; 1491 } 1492 1493 tdb->tdb_rpl = pt->rpl; 1494 tdb->tdb_cur_bytes = pt->cur_bytes; 1495 } 1496 return; 1497 1498 bad: 1499 if (V_pf_status.debug >= PF_DEBUG_MISC) 1500 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1501 "invalid value\n"); 1502 V_pfsyncstats.pfsyncs_badstate++; 1503 return; 1504 } 1505 #endif 1506 1507 static int 1508 pfsync_in_eof(struct mbuf *m, int offset, int count, int flags, int action) 1509 { 1510 /* check if we are at the right place in the packet */ 1511 if (offset != m->m_pkthdr.len) 1512 V_pfsyncstats.pfsyncs_badlen++; 1513 1514 /* we're done. free and let the caller return */ 1515 m_freem(m); 1516 return (-1); 1517 } 1518 1519 static int 1520 pfsync_in_error(struct mbuf *m, int offset, int count, int flags, int action) 1521 { 1522 V_pfsyncstats.pfsyncs_badact++; 1523 1524 m_freem(m); 1525 return (-1); 1526 } 1527 1528 static int 1529 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1530 struct route *rt) 1531 { 1532 m_freem(m); 1533 return (0); 1534 } 1535 1536 /* ARGSUSED */ 1537 static int 1538 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1539 { 1540 struct pfsync_softc *sc = ifp->if_softc; 1541 struct ifreq *ifr = (struct ifreq *)data; 1542 struct pfsyncreq pfsyncr; 1543 size_t nvbuflen; 1544 int error; 1545 int c; 1546 1547 switch (cmd) { 1548 case SIOCSIFFLAGS: 1549 PFSYNC_LOCK(sc); 1550 if (ifp->if_flags & IFF_UP) { 1551 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1552 PFSYNC_UNLOCK(sc); 1553 pfsync_pointers_init(); 1554 } else { 1555 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1556 PFSYNC_UNLOCK(sc); 1557 pfsync_pointers_uninit(); 1558 } 1559 break; 1560 case SIOCSIFMTU: 1561 if (!sc->sc_sync_if || 1562 ifr->ifr_mtu <= PFSYNC_MINPKT || 1563 ifr->ifr_mtu > sc->sc_sync_if->if_mtu) 1564 return (EINVAL); 1565 if (ifr->ifr_mtu < ifp->if_mtu) { 1566 for (c = 0; c < pfsync_buckets; c++) { 1567 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); 1568 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT) 1569 pfsync_sendout(1, c); 1570 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); 1571 } 1572 } 1573 ifp->if_mtu = ifr->ifr_mtu; 1574 break; 1575 case SIOCGETPFSYNC: 1576 bzero(&pfsyncr, sizeof(pfsyncr)); 1577 PFSYNC_LOCK(sc); 1578 if (sc->sc_sync_if) { 1579 strlcpy(pfsyncr.pfsyncr_syncdev, 1580 sc->sc_sync_if->if_xname, IFNAMSIZ); 1581 } 1582 pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; 1583 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1584 pfsyncr.pfsyncr_defer = sc->sc_flags; 1585 PFSYNC_UNLOCK(sc); 1586 return (copyout(&pfsyncr, ifr_data_get_ptr(ifr), 1587 sizeof(pfsyncr))); 1588 1589 case SIOCGETPFSYNCNV: 1590 { 1591 nvlist_t *nvl_syncpeer; 1592 nvlist_t *nvl = nvlist_create(0); 1593 1594 if (nvl == NULL) 1595 return (ENOMEM); 1596 1597 if (sc->sc_sync_if) 1598 nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname); 1599 nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates); 1600 nvlist_add_number(nvl, "flags", sc->sc_flags); 1601 nvlist_add_number(nvl, "version", sc->sc_version); 1602 if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL) 1603 nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer); 1604 1605 void *packed = NULL; 1606 packed = nvlist_pack(nvl, &nvbuflen); 1607 if (packed == NULL) { 1608 free(packed, M_NVLIST); 1609 nvlist_destroy(nvl); 1610 return (ENOMEM); 1611 } 1612 1613 if (nvbuflen > ifr->ifr_cap_nv.buf_length) { 1614 ifr->ifr_cap_nv.length = nvbuflen; 1615 ifr->ifr_cap_nv.buffer = NULL; 1616 free(packed, M_NVLIST); 1617 nvlist_destroy(nvl); 1618 return (EFBIG); 1619 } 1620 1621 ifr->ifr_cap_nv.length = nvbuflen; 1622 error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen); 1623 1624 nvlist_destroy(nvl); 1625 nvlist_destroy(nvl_syncpeer); 1626 free(packed, M_NVLIST); 1627 break; 1628 } 1629 1630 case SIOCSETPFSYNC: 1631 { 1632 struct pfsync_kstatus status; 1633 1634 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1635 return (error); 1636 if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr, 1637 sizeof(pfsyncr)))) 1638 return (error); 1639 1640 memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); 1641 pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status); 1642 1643 error = pfsync_kstatus_to_softc(&status, sc); 1644 return (error); 1645 } 1646 case SIOCSETPFSYNCNV: 1647 { 1648 struct pfsync_kstatus status; 1649 void *data; 1650 nvlist_t *nvl; 1651 1652 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1653 return (error); 1654 if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE) 1655 return (EINVAL); 1656 1657 data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK); 1658 1659 if ((error = copyin(ifr->ifr_cap_nv.buffer, data, 1660 ifr->ifr_cap_nv.length)) != 0) { 1661 free(data, M_TEMP); 1662 return (error); 1663 } 1664 1665 if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) { 1666 free(data, M_TEMP); 1667 return (EINVAL); 1668 } 1669 1670 memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); 1671 pfsync_nvstatus_to_kstatus(nvl, &status); 1672 1673 nvlist_destroy(nvl); 1674 free(data, M_TEMP); 1675 1676 error = pfsync_kstatus_to_softc(&status, sc); 1677 return (error); 1678 } 1679 default: 1680 return (ENOTTY); 1681 } 1682 1683 return (0); 1684 } 1685 1686 static void 1687 pfsync_out_state_1301(struct pf_kstate *st, void *buf) 1688 { 1689 union pfsync_state_union *sp = buf; 1690 1691 pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1301); 1692 } 1693 1694 static void 1695 pfsync_out_state_1400(struct pf_kstate *st, void *buf) 1696 { 1697 union pfsync_state_union *sp = buf; 1698 1699 pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1400); 1700 } 1701 1702 static void 1703 pfsync_out_iack(struct pf_kstate *st, void *buf) 1704 { 1705 struct pfsync_ins_ack *iack = buf; 1706 1707 iack->id = st->id; 1708 iack->creatorid = st->creatorid; 1709 } 1710 1711 static void 1712 pfsync_out_upd_c(struct pf_kstate *st, void *buf) 1713 { 1714 struct pfsync_upd_c *up = buf; 1715 1716 bzero(up, sizeof(*up)); 1717 up->id = st->id; 1718 pf_state_peer_hton(&st->src, &up->src); 1719 pf_state_peer_hton(&st->dst, &up->dst); 1720 up->creatorid = st->creatorid; 1721 up->timeout = st->timeout; 1722 } 1723 1724 static void 1725 pfsync_out_del_c(struct pf_kstate *st, void *buf) 1726 { 1727 struct pfsync_del_c *dp = buf; 1728 1729 dp->id = st->id; 1730 dp->creatorid = st->creatorid; 1731 st->state_flags |= PFSTATE_NOSYNC; 1732 } 1733 1734 static void 1735 pfsync_drop(struct pfsync_softc *sc) 1736 { 1737 struct pf_kstate *st, *next; 1738 struct pfsync_upd_req_item *ur; 1739 struct pfsync_bucket *b; 1740 int c; 1741 enum pfsync_q_id q; 1742 1743 for (c = 0; c < pfsync_buckets; c++) { 1744 b = &sc->sc_buckets[c]; 1745 for (q = 0; q < PFSYNC_Q_COUNT; q++) { 1746 if (TAILQ_EMPTY(&b->b_qs[q])) 1747 continue; 1748 1749 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { 1750 KASSERT(st->sync_state == pfsync_qid_sstate[q], 1751 ("%s: st->sync_state == q", 1752 __func__)); 1753 st->sync_state = PFSYNC_S_NONE; 1754 pf_release_state(st); 1755 } 1756 TAILQ_INIT(&b->b_qs[q]); 1757 } 1758 1759 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { 1760 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); 1761 free(ur, M_PFSYNC); 1762 } 1763 1764 b->b_len = PFSYNC_MINPKT; 1765 b->b_plus = NULL; 1766 } 1767 } 1768 1769 static void 1770 pfsync_sendout(int schedswi, int c) 1771 { 1772 struct pfsync_softc *sc = V_pfsyncif; 1773 struct ifnet *ifp = sc->sc_ifp; 1774 struct mbuf *m; 1775 struct pfsync_header *ph; 1776 struct pfsync_subheader *subh; 1777 struct pf_kstate *st, *st_next; 1778 struct pfsync_upd_req_item *ur; 1779 struct pfsync_bucket *b = &sc->sc_buckets[c]; 1780 size_t len; 1781 int aflen, offset, count = 0; 1782 enum pfsync_q_id q; 1783 1784 KASSERT(sc != NULL, ("%s: null sc", __func__)); 1785 KASSERT(b->b_len > PFSYNC_MINPKT, 1786 ("%s: sc_len %zu", __func__, b->b_len)); 1787 PFSYNC_BUCKET_LOCK_ASSERT(b); 1788 1789 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 1790 pfsync_drop(sc); 1791 return; 1792 } 1793 1794 m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR); 1795 if (m == NULL) { 1796 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 1797 V_pfsyncstats.pfsyncs_onomem++; 1798 return; 1799 } 1800 m->m_data += max_linkhdr; 1801 bzero(m->m_data, b->b_len); 1802 1803 len = b->b_len; 1804 1805 /* build the ip header */ 1806 switch (sc->sc_sync_peer.ss_family) { 1807 #ifdef INET 1808 case AF_INET: 1809 { 1810 struct ip *ip; 1811 1812 ip = mtod(m, struct ip *); 1813 bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip)); 1814 aflen = offset = sizeof(*ip); 1815 1816 len -= sizeof(union inet_template) - sizeof(struct ip); 1817 ip->ip_len = htons(len); 1818 ip_fillid(ip); 1819 break; 1820 } 1821 #endif 1822 #ifdef INET6 1823 case AF_INET6: 1824 { 1825 struct ip6_hdr *ip6; 1826 1827 ip6 = mtod(m, struct ip6_hdr *); 1828 bcopy(&sc->sc_template.ipv6, ip6, sizeof(*ip6)); 1829 aflen = offset = sizeof(*ip6); 1830 1831 len -= sizeof(union inet_template) - sizeof(struct ip6_hdr); 1832 ip6->ip6_plen = htons(len); 1833 break; 1834 } 1835 #endif 1836 default: 1837 m_freem(m); 1838 return; 1839 } 1840 m->m_len = m->m_pkthdr.len = len; 1841 1842 /* build the pfsync header */ 1843 ph = (struct pfsync_header *)(m->m_data + offset); 1844 offset += sizeof(*ph); 1845 1846 ph->version = PFSYNC_VERSION; 1847 ph->len = htons(len - aflen); 1848 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 1849 1850 /* walk the queues */ 1851 for (q = 0; q < PFSYNC_Q_COUNT; q++) { 1852 if (TAILQ_EMPTY(&b->b_qs[q])) 1853 continue; 1854 1855 subh = (struct pfsync_subheader *)(m->m_data + offset); 1856 offset += sizeof(*subh); 1857 1858 count = 0; 1859 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) { 1860 KASSERT(st->sync_state == pfsync_qid_sstate[q], 1861 ("%s: st->sync_state == q", 1862 __func__)); 1863 /* 1864 * XXXGL: some of write methods do unlocked reads 1865 * of state data :( 1866 */ 1867 pfsync_qs[q].write(st, m->m_data + offset); 1868 offset += pfsync_qs[q].len; 1869 st->sync_state = PFSYNC_S_NONE; 1870 pf_release_state(st); 1871 count++; 1872 } 1873 TAILQ_INIT(&b->b_qs[q]); 1874 1875 subh->action = pfsync_qs[q].action; 1876 subh->count = htons(count); 1877 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; 1878 } 1879 1880 if (!TAILQ_EMPTY(&b->b_upd_req_list)) { 1881 subh = (struct pfsync_subheader *)(m->m_data + offset); 1882 offset += sizeof(*subh); 1883 1884 count = 0; 1885 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { 1886 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); 1887 1888 bcopy(&ur->ur_msg, m->m_data + offset, 1889 sizeof(ur->ur_msg)); 1890 offset += sizeof(ur->ur_msg); 1891 free(ur, M_PFSYNC); 1892 count++; 1893 } 1894 1895 subh->action = PFSYNC_ACT_UPD_REQ; 1896 subh->count = htons(count); 1897 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count; 1898 } 1899 1900 /* has someone built a custom region for us to add? */ 1901 if (b->b_plus != NULL) { 1902 bcopy(b->b_plus, m->m_data + offset, b->b_pluslen); 1903 offset += b->b_pluslen; 1904 1905 b->b_plus = NULL; 1906 } 1907 1908 subh = (struct pfsync_subheader *)(m->m_data + offset); 1909 offset += sizeof(*subh); 1910 1911 subh->action = PFSYNC_ACT_EOF; 1912 subh->count = htons(1); 1913 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++; 1914 1915 /* we're done, let's put it on the wire */ 1916 if (ifp->if_bpf) { 1917 m->m_data += aflen; 1918 m->m_len = m->m_pkthdr.len = len - aflen; 1919 BPF_MTAP(ifp, m); 1920 m->m_data -= aflen; 1921 m->m_len = m->m_pkthdr.len = len; 1922 } 1923 1924 if (sc->sc_sync_if == NULL) { 1925 b->b_len = PFSYNC_MINPKT; 1926 m_freem(m); 1927 return; 1928 } 1929 1930 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); 1931 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); 1932 b->b_len = PFSYNC_MINPKT; 1933 1934 if (!_IF_QFULL(&b->b_snd)) 1935 _IF_ENQUEUE(&b->b_snd, m); 1936 else { 1937 m_freem(m); 1938 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); 1939 } 1940 if (schedswi) 1941 swi_sched(V_pfsync_swi_cookie, 0); 1942 } 1943 1944 static void 1945 pfsync_insert_state(struct pf_kstate *st) 1946 { 1947 struct pfsync_softc *sc = V_pfsyncif; 1948 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1949 1950 if (st->state_flags & PFSTATE_NOSYNC) 1951 return; 1952 1953 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || 1954 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 1955 st->state_flags |= PFSTATE_NOSYNC; 1956 return; 1957 } 1958 1959 KASSERT(st->sync_state == PFSYNC_S_NONE, 1960 ("%s: st->sync_state %u", __func__, st->sync_state)); 1961 1962 PFSYNC_BUCKET_LOCK(b); 1963 if (b->b_len == PFSYNC_MINPKT) 1964 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 1965 1966 pfsync_q_ins(st, PFSYNC_S_INS, true); 1967 PFSYNC_BUCKET_UNLOCK(b); 1968 1969 st->sync_updates = 0; 1970 } 1971 1972 static int 1973 pfsync_defer(struct pf_kstate *st, struct mbuf *m) 1974 { 1975 struct pfsync_softc *sc = V_pfsyncif; 1976 struct pfsync_deferral *pd; 1977 struct pfsync_bucket *b; 1978 1979 if (m->m_flags & (M_BCAST|M_MCAST)) 1980 return (0); 1981 1982 if (sc == NULL) 1983 return (0); 1984 1985 b = pfsync_get_bucket(sc, st); 1986 1987 PFSYNC_LOCK(sc); 1988 1989 if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) || 1990 !(sc->sc_flags & PFSYNCF_DEFER)) { 1991 PFSYNC_UNLOCK(sc); 1992 return (0); 1993 } 1994 1995 PFSYNC_BUCKET_LOCK(b); 1996 PFSYNC_UNLOCK(sc); 1997 1998 if (b->b_deferred >= 128) 1999 pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0); 2000 2001 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); 2002 if (pd == NULL) { 2003 PFSYNC_BUCKET_UNLOCK(b); 2004 return (0); 2005 } 2006 b->b_deferred++; 2007 2008 m->m_flags |= M_SKIP_FIREWALL; 2009 st->state_flags |= PFSTATE_ACK; 2010 2011 pd->pd_sc = sc; 2012 pd->pd_st = st; 2013 pf_ref_state(st); 2014 pd->pd_m = m; 2015 2016 TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry); 2017 callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED); 2018 callout_reset(&pd->pd_tmo, (V_pfsync_defer_timeout * hz) / 1000, 2019 pfsync_defer_tmo, pd); 2020 2021 pfsync_push(b); 2022 PFSYNC_BUCKET_UNLOCK(b); 2023 2024 return (1); 2025 } 2026 2027 static void 2028 pfsync_undefer(struct pfsync_deferral *pd, int drop) 2029 { 2030 struct pfsync_softc *sc = pd->pd_sc; 2031 struct mbuf *m = pd->pd_m; 2032 struct pf_kstate *st = pd->pd_st; 2033 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2034 2035 PFSYNC_BUCKET_LOCK_ASSERT(b); 2036 2037 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 2038 b->b_deferred--; 2039 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 2040 free(pd, M_PFSYNC); 2041 pf_release_state(st); 2042 2043 if (drop) 2044 m_freem(m); 2045 else { 2046 _IF_ENQUEUE(&b->b_snd, m); 2047 pfsync_push(b); 2048 } 2049 } 2050 2051 static void 2052 pfsync_defer_tmo(void *arg) 2053 { 2054 struct epoch_tracker et; 2055 struct pfsync_deferral *pd = arg; 2056 struct pfsync_softc *sc = pd->pd_sc; 2057 struct mbuf *m = pd->pd_m; 2058 struct pf_kstate *st = pd->pd_st; 2059 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2060 2061 PFSYNC_BUCKET_LOCK_ASSERT(b); 2062 2063 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 2064 b->b_deferred--; 2065 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 2066 PFSYNC_BUCKET_UNLOCK(b); 2067 free(pd, M_PFSYNC); 2068 2069 if (sc->sc_sync_if == NULL) { 2070 pf_release_state(st); 2071 m_freem(m); 2072 return; 2073 } 2074 2075 NET_EPOCH_ENTER(et); 2076 CURVNET_SET(sc->sc_sync_if->if_vnet); 2077 2078 pfsync_tx(sc, m); 2079 2080 pf_release_state(st); 2081 2082 CURVNET_RESTORE(); 2083 NET_EPOCH_EXIT(et); 2084 } 2085 2086 static void 2087 pfsync_undefer_state_locked(struct pf_kstate *st, int drop) 2088 { 2089 struct pfsync_softc *sc = V_pfsyncif; 2090 struct pfsync_deferral *pd; 2091 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2092 2093 PFSYNC_BUCKET_LOCK_ASSERT(b); 2094 2095 TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) { 2096 if (pd->pd_st == st) { 2097 if (callout_stop(&pd->pd_tmo) > 0) 2098 pfsync_undefer(pd, drop); 2099 2100 return; 2101 } 2102 } 2103 2104 panic("%s: unable to find deferred state", __func__); 2105 } 2106 2107 static void 2108 pfsync_undefer_state(struct pf_kstate *st, int drop) 2109 { 2110 struct pfsync_softc *sc = V_pfsyncif; 2111 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2112 2113 PFSYNC_BUCKET_LOCK(b); 2114 pfsync_undefer_state_locked(st, drop); 2115 PFSYNC_BUCKET_UNLOCK(b); 2116 } 2117 2118 static struct pfsync_bucket* 2119 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st) 2120 { 2121 int c = PF_IDHASH(st) % pfsync_buckets; 2122 return &sc->sc_buckets[c]; 2123 } 2124 2125 static void 2126 pfsync_update_state(struct pf_kstate *st) 2127 { 2128 struct pfsync_softc *sc = V_pfsyncif; 2129 bool sync = false, ref = true; 2130 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2131 2132 PF_STATE_LOCK_ASSERT(st); 2133 PFSYNC_BUCKET_LOCK(b); 2134 2135 if (st->state_flags & PFSTATE_ACK) 2136 pfsync_undefer_state_locked(st, 0); 2137 if (st->state_flags & PFSTATE_NOSYNC) { 2138 if (st->sync_state != PFSYNC_S_NONE) 2139 pfsync_q_del(st, true, b); 2140 PFSYNC_BUCKET_UNLOCK(b); 2141 return; 2142 } 2143 2144 if (b->b_len == PFSYNC_MINPKT) 2145 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 2146 2147 switch (st->sync_state) { 2148 case PFSYNC_S_UPD_C: 2149 case PFSYNC_S_UPD: 2150 case PFSYNC_S_INS: 2151 /* we're already handling it */ 2152 2153 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 2154 st->sync_updates++; 2155 if (st->sync_updates >= sc->sc_maxupdates) 2156 sync = true; 2157 } 2158 break; 2159 2160 case PFSYNC_S_IACK: 2161 pfsync_q_del(st, false, b); 2162 ref = false; 2163 /* FALLTHROUGH */ 2164 2165 case PFSYNC_S_NONE: 2166 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); 2167 st->sync_updates = 0; 2168 break; 2169 2170 default: 2171 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2172 } 2173 2174 if (sync || (time_uptime - st->pfsync_time) < 2) 2175 pfsync_push(b); 2176 2177 PFSYNC_BUCKET_UNLOCK(b); 2178 } 2179 2180 static void 2181 pfsync_request_update(u_int32_t creatorid, u_int64_t id) 2182 { 2183 struct pfsync_softc *sc = V_pfsyncif; 2184 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2185 struct pfsync_upd_req_item *item; 2186 size_t nlen = sizeof(struct pfsync_upd_req); 2187 2188 PFSYNC_BUCKET_LOCK_ASSERT(b); 2189 2190 /* 2191 * This code does a bit to prevent multiple update requests for the 2192 * same state being generated. It searches current subheader queue, 2193 * but it doesn't lookup into queue of already packed datagrams. 2194 */ 2195 TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry) 2196 if (item->ur_msg.id == id && 2197 item->ur_msg.creatorid == creatorid) 2198 return; 2199 2200 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT); 2201 if (item == NULL) 2202 return; /* XXX stats */ 2203 2204 item->ur_msg.id = id; 2205 item->ur_msg.creatorid = creatorid; 2206 2207 if (TAILQ_EMPTY(&b->b_upd_req_list)) 2208 nlen += sizeof(struct pfsync_subheader); 2209 2210 if (b->b_len + nlen > sc->sc_ifp->if_mtu) { 2211 pfsync_sendout(0, 0); 2212 2213 nlen = sizeof(struct pfsync_subheader) + 2214 sizeof(struct pfsync_upd_req); 2215 } 2216 2217 TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry); 2218 b->b_len += nlen; 2219 2220 pfsync_push(b); 2221 } 2222 2223 static bool 2224 pfsync_update_state_req(struct pf_kstate *st) 2225 { 2226 struct pfsync_softc *sc = V_pfsyncif; 2227 bool ref = true, full = false; 2228 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2229 2230 PF_STATE_LOCK_ASSERT(st); 2231 PFSYNC_BUCKET_LOCK(b); 2232 2233 if (st->state_flags & PFSTATE_NOSYNC) { 2234 if (st->sync_state != PFSYNC_S_NONE) 2235 pfsync_q_del(st, true, b); 2236 PFSYNC_BUCKET_UNLOCK(b); 2237 return (full); 2238 } 2239 2240 switch (st->sync_state) { 2241 case PFSYNC_S_UPD_C: 2242 case PFSYNC_S_IACK: 2243 pfsync_q_del(st, false, b); 2244 ref = false; 2245 /* FALLTHROUGH */ 2246 2247 case PFSYNC_S_NONE: 2248 pfsync_q_ins(st, PFSYNC_S_UPD, ref); 2249 pfsync_push(b); 2250 break; 2251 2252 case PFSYNC_S_INS: 2253 case PFSYNC_S_UPD: 2254 case PFSYNC_S_DEL_C: 2255 /* we're already handling it */ 2256 break; 2257 2258 default: 2259 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2260 } 2261 2262 if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(union pfsync_state_union)) 2263 full = true; 2264 2265 PFSYNC_BUCKET_UNLOCK(b); 2266 2267 return (full); 2268 } 2269 2270 static void 2271 pfsync_delete_state(struct pf_kstate *st) 2272 { 2273 struct pfsync_softc *sc = V_pfsyncif; 2274 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2275 bool ref = true; 2276 2277 PFSYNC_BUCKET_LOCK(b); 2278 if (st->state_flags & PFSTATE_ACK) 2279 pfsync_undefer_state_locked(st, 1); 2280 if (st->state_flags & PFSTATE_NOSYNC) { 2281 if (st->sync_state != PFSYNC_S_NONE) 2282 pfsync_q_del(st, true, b); 2283 PFSYNC_BUCKET_UNLOCK(b); 2284 return; 2285 } 2286 2287 if (b->b_len == PFSYNC_MINPKT) 2288 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 2289 2290 switch (st->sync_state) { 2291 case PFSYNC_S_INS: 2292 /* We never got to tell the world so just forget about it. */ 2293 pfsync_q_del(st, true, b); 2294 break; 2295 2296 case PFSYNC_S_UPD_C: 2297 case PFSYNC_S_UPD: 2298 case PFSYNC_S_IACK: 2299 pfsync_q_del(st, false, b); 2300 ref = false; 2301 /* FALLTHROUGH */ 2302 2303 case PFSYNC_S_NONE: 2304 pfsync_q_ins(st, PFSYNC_S_DEL_C, ref); 2305 break; 2306 2307 default: 2308 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2309 } 2310 2311 PFSYNC_BUCKET_UNLOCK(b); 2312 } 2313 2314 static void 2315 pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2316 { 2317 struct { 2318 struct pfsync_subheader subh; 2319 struct pfsync_clr clr; 2320 } __packed r; 2321 2322 bzero(&r, sizeof(r)); 2323 2324 r.subh.action = PFSYNC_ACT_CLR; 2325 r.subh.count = htons(1); 2326 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++; 2327 2328 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2329 r.clr.creatorid = creatorid; 2330 2331 pfsync_send_plus(&r, sizeof(r)); 2332 } 2333 2334 static enum pfsync_q_id 2335 pfsync_sstate_to_qid(u_int8_t sync_state) 2336 { 2337 struct pfsync_softc *sc = V_pfsyncif; 2338 2339 switch (sync_state) { 2340 case PFSYNC_S_INS: 2341 switch (sc->sc_version) { 2342 case PFSYNC_MSG_VERSION_1301: 2343 return PFSYNC_Q_INS_1301; 2344 case PFSYNC_MSG_VERSION_1400: 2345 return PFSYNC_Q_INS_1400; 2346 } 2347 break; 2348 case PFSYNC_S_IACK: 2349 return PFSYNC_Q_IACK; 2350 case PFSYNC_S_UPD: 2351 switch (sc->sc_version) { 2352 case PFSYNC_MSG_VERSION_1301: 2353 return PFSYNC_Q_UPD_1301; 2354 case PFSYNC_MSG_VERSION_1400: 2355 return PFSYNC_Q_UPD_1400; 2356 } 2357 break; 2358 case PFSYNC_S_UPD_C: 2359 return PFSYNC_Q_UPD_C; 2360 case PFSYNC_S_DEL_C: 2361 return PFSYNC_Q_DEL_C; 2362 default: 2363 panic("%s: Unsupported st->sync_state 0x%02x", 2364 __func__, sync_state); 2365 } 2366 2367 panic("%s: Unsupported pfsync_msg_version %d", 2368 __func__, sc->sc_version); 2369 } 2370 2371 static void 2372 pfsync_q_ins(struct pf_kstate *st, int sync_state, bool ref) 2373 { 2374 enum pfsync_q_id q = pfsync_sstate_to_qid(sync_state); 2375 struct pfsync_softc *sc = V_pfsyncif; 2376 size_t nlen = pfsync_qs[q].len; 2377 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2378 2379 PFSYNC_BUCKET_LOCK_ASSERT(b); 2380 2381 KASSERT(st->sync_state == PFSYNC_S_NONE, 2382 ("%s: st->sync_state %u", __func__, st->sync_state)); 2383 KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", 2384 b->b_len)); 2385 2386 if (TAILQ_EMPTY(&b->b_qs[q])) 2387 nlen += sizeof(struct pfsync_subheader); 2388 2389 if (b->b_len + nlen > sc->sc_ifp->if_mtu) { 2390 pfsync_sendout(1, b->b_id); 2391 2392 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2393 } 2394 2395 b->b_len += nlen; 2396 TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); 2397 st->sync_state = pfsync_qid_sstate[q]; 2398 if (ref) 2399 pf_ref_state(st); 2400 } 2401 2402 static void 2403 pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b) 2404 { 2405 enum pfsync_q_id q; 2406 2407 PFSYNC_BUCKET_LOCK_ASSERT(b); 2408 KASSERT(st->sync_state != PFSYNC_S_NONE, 2409 ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); 2410 2411 q = pfsync_sstate_to_qid(st->sync_state); 2412 b->b_len -= pfsync_qs[q].len; 2413 TAILQ_REMOVE(&b->b_qs[q], st, sync_list); 2414 st->sync_state = PFSYNC_S_NONE; 2415 if (unref) 2416 pf_release_state(st); 2417 2418 if (TAILQ_EMPTY(&b->b_qs[q])) 2419 b->b_len -= sizeof(struct pfsync_subheader); 2420 } 2421 2422 static void 2423 pfsync_bulk_start(void) 2424 { 2425 struct pfsync_softc *sc = V_pfsyncif; 2426 2427 if (V_pf_status.debug >= PF_DEBUG_MISC) 2428 printf("pfsync: received bulk update request\n"); 2429 2430 PFSYNC_BLOCK(sc); 2431 2432 sc->sc_ureq_received = time_uptime; 2433 sc->sc_bulk_hashid = 0; 2434 sc->sc_bulk_stateid = 0; 2435 pfsync_bulk_status(PFSYNC_BUS_START); 2436 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); 2437 PFSYNC_BUNLOCK(sc); 2438 } 2439 2440 static void 2441 pfsync_bulk_update(void *arg) 2442 { 2443 struct pfsync_softc *sc = arg; 2444 struct pf_kstate *s; 2445 int i; 2446 2447 PFSYNC_BLOCK_ASSERT(sc); 2448 CURVNET_SET(sc->sc_ifp->if_vnet); 2449 2450 /* 2451 * Start with last state from previous invocation. 2452 * It may had gone, in this case start from the 2453 * hash slot. 2454 */ 2455 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid); 2456 2457 if (s != NULL) 2458 i = PF_IDHASH(s); 2459 else 2460 i = sc->sc_bulk_hashid; 2461 2462 for (; i <= pf_hashmask; i++) { 2463 struct pf_idhash *ih = &V_pf_idhash[i]; 2464 2465 if (s != NULL) 2466 PF_HASHROW_ASSERT(ih); 2467 else { 2468 PF_HASHROW_LOCK(ih); 2469 s = LIST_FIRST(&ih->states); 2470 } 2471 2472 for (; s; s = LIST_NEXT(s, entry)) { 2473 if (s->sync_state == PFSYNC_S_NONE && 2474 s->timeout < PFTM_MAX && 2475 s->pfsync_time <= sc->sc_ureq_received) { 2476 if (pfsync_update_state_req(s)) { 2477 /* We've filled a packet. */ 2478 sc->sc_bulk_hashid = i; 2479 sc->sc_bulk_stateid = s->id; 2480 sc->sc_bulk_creatorid = s->creatorid; 2481 PF_HASHROW_UNLOCK(ih); 2482 callout_reset(&sc->sc_bulk_tmo, 1, 2483 pfsync_bulk_update, sc); 2484 goto full; 2485 } 2486 } 2487 } 2488 PF_HASHROW_UNLOCK(ih); 2489 } 2490 2491 /* We're done. */ 2492 pfsync_bulk_status(PFSYNC_BUS_END); 2493 full: 2494 CURVNET_RESTORE(); 2495 } 2496 2497 static void 2498 pfsync_bulk_status(u_int8_t status) 2499 { 2500 struct { 2501 struct pfsync_subheader subh; 2502 struct pfsync_bus bus; 2503 } __packed r; 2504 2505 struct pfsync_softc *sc = V_pfsyncif; 2506 2507 bzero(&r, sizeof(r)); 2508 2509 r.subh.action = PFSYNC_ACT_BUS; 2510 r.subh.count = htons(1); 2511 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++; 2512 2513 r.bus.creatorid = V_pf_status.hostid; 2514 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 2515 r.bus.status = status; 2516 2517 pfsync_send_plus(&r, sizeof(r)); 2518 } 2519 2520 static void 2521 pfsync_bulk_fail(void *arg) 2522 { 2523 struct pfsync_softc *sc = arg; 2524 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2525 2526 CURVNET_SET(sc->sc_ifp->if_vnet); 2527 2528 PFSYNC_BLOCK_ASSERT(sc); 2529 2530 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 2531 /* Try again */ 2532 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2533 pfsync_bulk_fail, V_pfsyncif); 2534 PFSYNC_BUCKET_LOCK(b); 2535 pfsync_request_update(0, 0); 2536 PFSYNC_BUCKET_UNLOCK(b); 2537 } else { 2538 /* Pretend like the transfer was ok. */ 2539 sc->sc_ureq_sent = 0; 2540 sc->sc_bulk_tries = 0; 2541 PFSYNC_LOCK(sc); 2542 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 2543 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 2544 "pfsync bulk fail"); 2545 sc->sc_flags |= PFSYNCF_OK; 2546 PFSYNC_UNLOCK(sc); 2547 if (V_pf_status.debug >= PF_DEBUG_MISC) 2548 printf("pfsync: failed to receive bulk update\n"); 2549 } 2550 2551 CURVNET_RESTORE(); 2552 } 2553 2554 static void 2555 pfsync_send_plus(void *plus, size_t pluslen) 2556 { 2557 struct pfsync_softc *sc = V_pfsyncif; 2558 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2559 2560 PFSYNC_BUCKET_LOCK(b); 2561 2562 if (b->b_len + pluslen > sc->sc_ifp->if_mtu) 2563 pfsync_sendout(1, b->b_id); 2564 2565 b->b_plus = plus; 2566 b->b_len += (b->b_pluslen = pluslen); 2567 2568 pfsync_sendout(1, b->b_id); 2569 PFSYNC_BUCKET_UNLOCK(b); 2570 } 2571 2572 static void 2573 pfsync_timeout(void *arg) 2574 { 2575 struct pfsync_bucket *b = arg; 2576 2577 CURVNET_SET(b->b_sc->sc_ifp->if_vnet); 2578 PFSYNC_BUCKET_LOCK(b); 2579 pfsync_push(b); 2580 PFSYNC_BUCKET_UNLOCK(b); 2581 CURVNET_RESTORE(); 2582 } 2583 2584 static void 2585 pfsync_push(struct pfsync_bucket *b) 2586 { 2587 2588 PFSYNC_BUCKET_LOCK_ASSERT(b); 2589 2590 b->b_flags |= PFSYNCF_BUCKET_PUSH; 2591 swi_sched(V_pfsync_swi_cookie, 0); 2592 } 2593 2594 static void 2595 pfsync_push_all(struct pfsync_softc *sc) 2596 { 2597 int c; 2598 struct pfsync_bucket *b; 2599 2600 for (c = 0; c < pfsync_buckets; c++) { 2601 b = &sc->sc_buckets[c]; 2602 2603 PFSYNC_BUCKET_LOCK(b); 2604 pfsync_push(b); 2605 PFSYNC_BUCKET_UNLOCK(b); 2606 } 2607 } 2608 2609 static void 2610 pfsync_tx(struct pfsync_softc *sc, struct mbuf *m) 2611 { 2612 struct ip *ip; 2613 int af, error = 0; 2614 2615 ip = mtod(m, struct ip *); 2616 MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4)); 2617 2618 af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6; 2619 2620 /* 2621 * We distinguish between a deferral packet and our 2622 * own pfsync packet based on M_SKIP_FIREWALL 2623 * flag. This is XXX. 2624 */ 2625 switch (af) { 2626 #ifdef INET 2627 case AF_INET: 2628 if (m->m_flags & M_SKIP_FIREWALL) { 2629 error = ip_output(m, NULL, NULL, 0, 2630 NULL, NULL); 2631 } else { 2632 error = ip_output(m, NULL, NULL, 2633 IP_RAWOUTPUT, &sc->sc_imo, NULL); 2634 } 2635 break; 2636 #endif 2637 #ifdef INET6 2638 case AF_INET6: 2639 if (m->m_flags & M_SKIP_FIREWALL) { 2640 error = ip6_output(m, NULL, NULL, 0, 2641 NULL, NULL, NULL); 2642 } else { 2643 error = ip6_output(m, NULL, NULL, 0, 2644 &sc->sc_im6o, NULL, NULL); 2645 } 2646 break; 2647 #endif 2648 } 2649 2650 if (error == 0) 2651 V_pfsyncstats.pfsyncs_opackets++; 2652 else 2653 V_pfsyncstats.pfsyncs_oerrors++; 2654 2655 } 2656 2657 static void 2658 pfsyncintr(void *arg) 2659 { 2660 struct epoch_tracker et; 2661 struct pfsync_softc *sc = arg; 2662 struct pfsync_bucket *b; 2663 struct mbuf *m, *n; 2664 int c; 2665 2666 NET_EPOCH_ENTER(et); 2667 CURVNET_SET(sc->sc_ifp->if_vnet); 2668 2669 for (c = 0; c < pfsync_buckets; c++) { 2670 b = &sc->sc_buckets[c]; 2671 2672 PFSYNC_BUCKET_LOCK(b); 2673 if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) { 2674 pfsync_sendout(0, b->b_id); 2675 b->b_flags &= ~PFSYNCF_BUCKET_PUSH; 2676 } 2677 _IF_DEQUEUE_ALL(&b->b_snd, m); 2678 PFSYNC_BUCKET_UNLOCK(b); 2679 2680 for (; m != NULL; m = n) { 2681 n = m->m_nextpkt; 2682 m->m_nextpkt = NULL; 2683 2684 pfsync_tx(sc, m); 2685 } 2686 } 2687 CURVNET_RESTORE(); 2688 NET_EPOCH_EXIT(et); 2689 } 2690 2691 static int 2692 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, 2693 struct in_mfilter* imf, struct in6_mfilter* im6f) 2694 { 2695 #ifdef INET 2696 struct ip_moptions *imo = &sc->sc_imo; 2697 #endif 2698 #ifdef INET6 2699 struct ip6_moptions *im6o = &sc->sc_im6o; 2700 struct sockaddr_in6 *syncpeer_sa6 = NULL; 2701 #endif 2702 2703 if (!(ifp->if_flags & IFF_MULTICAST)) 2704 return (EADDRNOTAVAIL); 2705 2706 switch (sc->sc_sync_peer.ss_family) { 2707 #ifdef INET 2708 case AF_INET: 2709 { 2710 int error; 2711 2712 ip_mfilter_init(&imo->imo_head); 2713 imo->imo_multicast_vif = -1; 2714 if ((error = in_joingroup(ifp, 2715 &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL, 2716 &imf->imf_inm)) != 0) 2717 return (error); 2718 2719 ip_mfilter_insert(&imo->imo_head, imf); 2720 imo->imo_multicast_ifp = ifp; 2721 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 2722 imo->imo_multicast_loop = 0; 2723 break; 2724 } 2725 #endif 2726 #ifdef INET6 2727 case AF_INET6: 2728 { 2729 int error; 2730 2731 syncpeer_sa6 = (struct sockaddr_in6 *)&sc->sc_sync_peer; 2732 if ((error = in6_setscope(&syncpeer_sa6->sin6_addr, ifp, NULL))) 2733 return (error); 2734 2735 ip6_mfilter_init(&im6o->im6o_head); 2736 if ((error = in6_joingroup(ifp, &syncpeer_sa6->sin6_addr, NULL, 2737 &(im6f->im6f_in6m), 0)) != 0) 2738 return (error); 2739 2740 ip6_mfilter_insert(&im6o->im6o_head, im6f); 2741 im6o->im6o_multicast_ifp = ifp; 2742 im6o->im6o_multicast_hlim = PFSYNC_DFLTTL; 2743 im6o->im6o_multicast_loop = 0; 2744 break; 2745 } 2746 #endif 2747 } 2748 2749 return (0); 2750 } 2751 2752 static void 2753 pfsync_multicast_cleanup(struct pfsync_softc *sc) 2754 { 2755 #ifdef INET 2756 struct ip_moptions *imo = &sc->sc_imo; 2757 struct in_mfilter *imf; 2758 2759 while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) { 2760 ip_mfilter_remove(&imo->imo_head, imf); 2761 in_leavegroup(imf->imf_inm, NULL); 2762 ip_mfilter_free(imf); 2763 } 2764 imo->imo_multicast_ifp = NULL; 2765 #endif 2766 2767 #ifdef INET6 2768 struct ip6_moptions *im6o = &sc->sc_im6o; 2769 struct in6_mfilter *im6f; 2770 2771 while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) { 2772 ip6_mfilter_remove(&im6o->im6o_head, im6f); 2773 in6_leavegroup(im6f->im6f_in6m, NULL); 2774 ip6_mfilter_free(im6f); 2775 } 2776 im6o->im6o_multicast_ifp = NULL; 2777 #endif 2778 } 2779 2780 void 2781 pfsync_detach_ifnet(struct ifnet *ifp) 2782 { 2783 struct pfsync_softc *sc = V_pfsyncif; 2784 2785 if (sc == NULL) 2786 return; 2787 2788 PFSYNC_LOCK(sc); 2789 2790 if (sc->sc_sync_if == ifp) { 2791 /* We don't need mutlicast cleanup here, because the interface 2792 * is going away. We do need to ensure we don't try to do 2793 * cleanup later. 2794 */ 2795 ip_mfilter_init(&sc->sc_imo.imo_head); 2796 sc->sc_imo.imo_multicast_ifp = NULL; 2797 sc->sc_im6o.im6o_multicast_ifp = NULL; 2798 sc->sc_sync_if = NULL; 2799 } 2800 2801 PFSYNC_UNLOCK(sc); 2802 } 2803 2804 static int 2805 pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status) 2806 { 2807 struct sockaddr_storage sa; 2808 status->maxupdates = pfsyncr->pfsyncr_maxupdates; 2809 status->flags = pfsyncr->pfsyncr_defer; 2810 2811 strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ); 2812 2813 memset(&sa, 0, sizeof(sa)); 2814 if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) { 2815 struct sockaddr_in *in = (struct sockaddr_in *)&sa; 2816 in->sin_family = AF_INET; 2817 in->sin_len = sizeof(*in); 2818 in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr; 2819 } 2820 status->syncpeer = sa; 2821 2822 return 0; 2823 } 2824 2825 static int 2826 pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc) 2827 { 2828 struct ifnet *sifp; 2829 struct in_mfilter *imf = NULL; 2830 struct in6_mfilter *im6f = NULL; 2831 int error; 2832 int c; 2833 2834 if ((status->maxupdates < 0) || (status->maxupdates > 255)) 2835 return (EINVAL); 2836 2837 if (status->syncdev[0] == '\0') 2838 sifp = NULL; 2839 else if ((sifp = ifunit_ref(status->syncdev)) == NULL) 2840 return (EINVAL); 2841 2842 switch (status->syncpeer.ss_family) { 2843 #ifdef INET 2844 case AF_UNSPEC: 2845 case AF_INET: { 2846 struct sockaddr_in *status_sin; 2847 status_sin = (struct sockaddr_in *)&(status->syncpeer); 2848 if (sifp != NULL) { 2849 if (status_sin->sin_addr.s_addr == 0 || 2850 status_sin->sin_addr.s_addr == 2851 htonl(INADDR_PFSYNC_GROUP)) { 2852 status_sin->sin_family = AF_INET; 2853 status_sin->sin_len = sizeof(*status_sin); 2854 status_sin->sin_addr.s_addr = 2855 htonl(INADDR_PFSYNC_GROUP); 2856 } 2857 2858 if (IN_MULTICAST(ntohl(status_sin->sin_addr.s_addr))) { 2859 imf = ip_mfilter_alloc(M_WAITOK, 0, 0); 2860 } 2861 } 2862 break; 2863 } 2864 #endif 2865 #ifdef INET6 2866 case AF_INET6: { 2867 struct sockaddr_in6 *status_sin6; 2868 status_sin6 = (struct sockaddr_in6*)&(status->syncpeer); 2869 if (sifp != NULL) { 2870 if (IN6_IS_ADDR_UNSPECIFIED(&status_sin6->sin6_addr) || 2871 IN6_ARE_ADDR_EQUAL(&status_sin6->sin6_addr, 2872 &in6addr_linklocal_pfsync_group)) { 2873 status_sin6->sin6_family = AF_INET6; 2874 status_sin6->sin6_len = sizeof(*status_sin6); 2875 status_sin6->sin6_addr = 2876 in6addr_linklocal_pfsync_group; 2877 } 2878 2879 if (IN6_IS_ADDR_MULTICAST(&status_sin6->sin6_addr)) { 2880 im6f = ip6_mfilter_alloc(M_WAITOK, 0, 0); 2881 } 2882 } 2883 break; 2884 } 2885 #endif 2886 } 2887 2888 PFSYNC_LOCK(sc); 2889 2890 switch (status->version) { 2891 case PFSYNC_MSG_VERSION_UNSPECIFIED: 2892 sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT; 2893 break; 2894 case PFSYNC_MSG_VERSION_1301: 2895 case PFSYNC_MSG_VERSION_1400: 2896 sc->sc_version = status->version; 2897 break; 2898 default: 2899 PFSYNC_UNLOCK(sc); 2900 return (EINVAL); 2901 } 2902 2903 switch (status->syncpeer.ss_family) { 2904 case AF_INET: { 2905 struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer); 2906 struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer; 2907 sc_sin->sin_family = AF_INET; 2908 sc_sin->sin_len = sizeof(*sc_sin); 2909 if (status_sin->sin_addr.s_addr == 0) { 2910 sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP); 2911 } else { 2912 sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr; 2913 } 2914 break; 2915 } 2916 case AF_INET6: { 2917 struct sockaddr_in6 *status_sin = (struct sockaddr_in6 *)&(status->syncpeer); 2918 struct sockaddr_in6 *sc_sin = (struct sockaddr_in6 *)&sc->sc_sync_peer; 2919 sc_sin->sin6_family = AF_INET6; 2920 sc_sin->sin6_len = sizeof(*sc_sin); 2921 if(IN6_IS_ADDR_UNSPECIFIED(&status_sin->sin6_addr)) { 2922 sc_sin->sin6_addr = in6addr_linklocal_pfsync_group; 2923 } else { 2924 sc_sin->sin6_addr = status_sin->sin6_addr; 2925 } 2926 break; 2927 } 2928 } 2929 2930 sc->sc_maxupdates = status->maxupdates; 2931 if (status->flags & PFSYNCF_DEFER) { 2932 sc->sc_flags |= PFSYNCF_DEFER; 2933 V_pfsync_defer_ptr = pfsync_defer; 2934 } else { 2935 sc->sc_flags &= ~PFSYNCF_DEFER; 2936 V_pfsync_defer_ptr = NULL; 2937 } 2938 2939 if (sifp == NULL) { 2940 if (sc->sc_sync_if) 2941 if_rele(sc->sc_sync_if); 2942 sc->sc_sync_if = NULL; 2943 pfsync_multicast_cleanup(sc); 2944 PFSYNC_UNLOCK(sc); 2945 return (0); 2946 } 2947 2948 for (c = 0; c < pfsync_buckets; c++) { 2949 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); 2950 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT && 2951 (sifp->if_mtu < sc->sc_ifp->if_mtu || 2952 (sc->sc_sync_if != NULL && 2953 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 2954 sifp->if_mtu < MCLBYTES - sizeof(struct ip))) 2955 pfsync_sendout(1, c); 2956 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); 2957 } 2958 2959 pfsync_multicast_cleanup(sc); 2960 2961 if (((sc->sc_sync_peer.ss_family == AF_INET) && 2962 IN_MULTICAST(ntohl(((struct sockaddr_in *) 2963 &sc->sc_sync_peer)->sin_addr.s_addr))) || 2964 ((sc->sc_sync_peer.ss_family == AF_INET6) && 2965 IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6*) 2966 &sc->sc_sync_peer)->sin6_addr))) { 2967 error = pfsync_multicast_setup(sc, sifp, imf, im6f); 2968 if (error) { 2969 if_rele(sifp); 2970 PFSYNC_UNLOCK(sc); 2971 #ifdef INET 2972 if (imf != NULL) 2973 ip_mfilter_free(imf); 2974 #endif 2975 #ifdef INET6 2976 if (im6f != NULL) 2977 ip6_mfilter_free(im6f); 2978 #endif 2979 return (error); 2980 } 2981 } 2982 if (sc->sc_sync_if) 2983 if_rele(sc->sc_sync_if); 2984 sc->sc_sync_if = sifp; 2985 2986 switch (sc->sc_sync_peer.ss_family) { 2987 #ifdef INET 2988 case AF_INET: { 2989 struct ip *ip; 2990 ip = &sc->sc_template.ipv4; 2991 bzero(ip, sizeof(*ip)); 2992 ip->ip_v = IPVERSION; 2993 ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2; 2994 ip->ip_tos = IPTOS_LOWDELAY; 2995 /* len and id are set later. */ 2996 ip->ip_off = htons(IP_DF); 2997 ip->ip_ttl = PFSYNC_DFLTTL; 2998 ip->ip_p = IPPROTO_PFSYNC; 2999 ip->ip_src.s_addr = INADDR_ANY; 3000 ip->ip_dst = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; 3001 break; 3002 } 3003 #endif 3004 #ifdef INET6 3005 case AF_INET6: { 3006 struct ip6_hdr *ip6; 3007 ip6 = &sc->sc_template.ipv6; 3008 bzero(ip6, sizeof(*ip6)); 3009 ip6->ip6_vfc = IPV6_VERSION; 3010 ip6->ip6_hlim = PFSYNC_DFLTTL; 3011 ip6->ip6_nxt = IPPROTO_PFSYNC; 3012 ip6->ip6_dst = ((struct sockaddr_in6 *)&sc->sc_sync_peer)->sin6_addr; 3013 3014 struct epoch_tracker et; 3015 NET_EPOCH_ENTER(et); 3016 in6_selectsrc_addr(if_getfib(sc->sc_sync_if), &ip6->ip6_dst, 0, 3017 sc->sc_sync_if, &ip6->ip6_src, NULL); 3018 NET_EPOCH_EXIT(et); 3019 break; 3020 } 3021 #endif 3022 } 3023 3024 /* Request a full state table update. */ 3025 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 3026 (*carp_demote_adj_p)(V_pfsync_carp_adj, 3027 "pfsync bulk start"); 3028 sc->sc_flags &= ~PFSYNCF_OK; 3029 if (V_pf_status.debug >= PF_DEBUG_MISC) 3030 printf("pfsync: requesting bulk update\n"); 3031 PFSYNC_UNLOCK(sc); 3032 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); 3033 pfsync_request_update(0, 0); 3034 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); 3035 PFSYNC_BLOCK(sc); 3036 sc->sc_ureq_sent = time_uptime; 3037 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc); 3038 PFSYNC_BUNLOCK(sc); 3039 return (0); 3040 } 3041 3042 static void 3043 pfsync_pointers_init(void) 3044 { 3045 3046 PF_RULES_WLOCK(); 3047 V_pfsync_state_import_ptr = pfsync_state_import; 3048 V_pfsync_insert_state_ptr = pfsync_insert_state; 3049 V_pfsync_update_state_ptr = pfsync_update_state; 3050 V_pfsync_delete_state_ptr = pfsync_delete_state; 3051 V_pfsync_clear_states_ptr = pfsync_clear_states; 3052 V_pfsync_defer_ptr = pfsync_defer; 3053 PF_RULES_WUNLOCK(); 3054 } 3055 3056 static void 3057 pfsync_pointers_uninit(void) 3058 { 3059 3060 PF_RULES_WLOCK(); 3061 V_pfsync_state_import_ptr = NULL; 3062 V_pfsync_insert_state_ptr = NULL; 3063 V_pfsync_update_state_ptr = NULL; 3064 V_pfsync_delete_state_ptr = NULL; 3065 V_pfsync_clear_states_ptr = NULL; 3066 V_pfsync_defer_ptr = NULL; 3067 PF_RULES_WUNLOCK(); 3068 } 3069 3070 static void 3071 vnet_pfsync_init(const void *unused __unused) 3072 { 3073 int error; 3074 3075 V_pfsync_cloner = if_clone_simple(pfsyncname, 3076 pfsync_clone_create, pfsync_clone_destroy, 1); 3077 error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif, 3078 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); 3079 if (error) { 3080 if_clone_detach(V_pfsync_cloner); 3081 log(LOG_INFO, "swi_add() failed in %s\n", __func__); 3082 } 3083 3084 pfsync_pointers_init(); 3085 } 3086 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY, 3087 vnet_pfsync_init, NULL); 3088 3089 static void 3090 vnet_pfsync_uninit(const void *unused __unused) 3091 { 3092 int ret __diagused; 3093 3094 pfsync_pointers_uninit(); 3095 3096 if_clone_detach(V_pfsync_cloner); 3097 ret = swi_remove(V_pfsync_swi_cookie); 3098 MPASS(ret == 0); 3099 ret = intr_event_destroy(V_pfsync_swi_ie); 3100 MPASS(ret == 0); 3101 } 3102 3103 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH, 3104 vnet_pfsync_uninit, NULL); 3105 3106 static int 3107 pfsync_init(void) 3108 { 3109 int error; 3110 3111 pfsync_detach_ifnet_ptr = pfsync_detach_ifnet; 3112 3113 #ifdef INET 3114 error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL); 3115 if (error) 3116 return (error); 3117 #endif 3118 #ifdef INET6 3119 error = ip6proto_register(IPPROTO_PFSYNC, pfsync6_input, NULL); 3120 if (error) { 3121 ipproto_unregister(IPPROTO_PFSYNC); 3122 return (error); 3123 } 3124 #endif 3125 3126 return (0); 3127 } 3128 3129 static void 3130 pfsync_uninit(void) 3131 { 3132 pfsync_detach_ifnet_ptr = NULL; 3133 3134 #ifdef INET 3135 ipproto_unregister(IPPROTO_PFSYNC); 3136 #endif 3137 #ifdef INET6 3138 ip6proto_unregister(IPPROTO_PFSYNC); 3139 #endif 3140 } 3141 3142 static int 3143 pfsync_modevent(module_t mod, int type, void *data) 3144 { 3145 int error = 0; 3146 3147 switch (type) { 3148 case MOD_LOAD: 3149 error = pfsync_init(); 3150 break; 3151 case MOD_UNLOAD: 3152 pfsync_uninit(); 3153 break; 3154 default: 3155 error = EINVAL; 3156 break; 3157 } 3158 3159 return (error); 3160 } 3161 3162 static moduledata_t pfsync_mod = { 3163 pfsyncname, 3164 pfsync_modevent, 3165 0 3166 }; 3167 3168 #define PFSYNC_MODVER 1 3169 3170 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */ 3171 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); 3172 MODULE_VERSION(pfsync, PFSYNC_MODVER); 3173 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 3174