1 /*- 2 * SPDX-License-Identifier: (BSD-2-Clause AND ISC) 3 * 4 * Copyright (c) 2002 Michael Shalayeff 5 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /*- 31 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 32 * 33 * Permission to use, copy, modify, and distribute this software for any 34 * purpose with or without fee is hereby granted, provided that the above 35 * copyright notice and this permission notice appear in all copies. 36 * 37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 44 */ 45 46 /* 47 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ 48 * 49 * Revisions picked from OpenBSD after revision 1.110 import: 50 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input() 51 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 52 * 1.120, 1.175 - use monotonic time_uptime 53 * 1.122 - reduce number of updates for non-TCP sessions 54 * 1.125, 1.127 - rewrite merge or stale processing 55 * 1.128 - cleanups 56 * 1.146 - bzero() mbuf before sparsely filling it with data 57 * 1.170 - SIOCSIFMTU checks 58 * 1.126, 1.142 - deferred packets processing 59 * 1.173 - correct expire time processing 60 */ 61 62 #include <sys/cdefs.h> 63 #include "opt_inet.h" 64 #include "opt_inet6.h" 65 #include "opt_pf.h" 66 67 #include <sys/param.h> 68 #include <sys/bus.h> 69 #include <sys/endian.h> 70 #include <sys/interrupt.h> 71 #include <sys/kernel.h> 72 #include <sys/lock.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/mutex.h> 76 #include <sys/nv.h> 77 #include <sys/priv.h> 78 #include <sys/smp.h> 79 #include <sys/socket.h> 80 #include <sys/sockio.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 84 #include <net/bpf.h> 85 #include <net/if.h> 86 #include <net/if_var.h> 87 #include <net/if_clone.h> 88 #include <net/if_private.h> 89 #include <net/if_types.h> 90 #include <net/vnet.h> 91 #include <net/pfvar.h> 92 #include <net/route.h> 93 #include <net/if_pfsync.h> 94 95 #include <netinet/if_ether.h> 96 #include <netinet/in.h> 97 #include <netinet/in_var.h> 98 #include <netinet6/in6_var.h> 99 #include <netinet/ip.h> 100 #include <netinet/ip6.h> 101 #include <netinet/ip_carp.h> 102 #include <netinet/ip_var.h> 103 #include <netinet/tcp.h> 104 #include <netinet/tcp_fsm.h> 105 #include <netinet/tcp_seq.h> 106 107 #include <netinet/ip6.h> 108 #include <netinet6/ip6_var.h> 109 #include <netinet6/scope6_var.h> 110 111 #include <netpfil/pf/pfsync_nv.h> 112 113 struct pfsync_bucket; 114 struct pfsync_softc; 115 116 union inet_template { 117 struct ip ipv4; 118 struct ip6_hdr ipv6; 119 }; 120 121 #define PFSYNC_MINPKT ( \ 122 sizeof(union inet_template) + \ 123 sizeof(struct pfsync_header) + \ 124 sizeof(struct pfsync_subheader) ) 125 126 static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *, 127 struct pfsync_state_peer *); 128 static int pfsync_in_clr(struct mbuf *, int, int, int, int); 129 static int pfsync_in_ins(struct mbuf *, int, int, int, int); 130 static int pfsync_in_iack(struct mbuf *, int, int, int, int); 131 static int pfsync_in_upd(struct mbuf *, int, int, int, int); 132 static int pfsync_in_upd_c(struct mbuf *, int, int, int, int); 133 static int pfsync_in_ureq(struct mbuf *, int, int, int, int); 134 static int pfsync_in_del_c(struct mbuf *, int, int, int, int); 135 static int pfsync_in_bus(struct mbuf *, int, int, int, int); 136 static int pfsync_in_tdb(struct mbuf *, int, int, int, int); 137 static int pfsync_in_eof(struct mbuf *, int, int, int, int); 138 static int pfsync_in_error(struct mbuf *, int, int, int, int); 139 140 static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = { 141 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 142 pfsync_in_ins, /* PFSYNC_ACT_INS_1301 */ 143 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 144 pfsync_in_upd, /* PFSYNC_ACT_UPD_1301 */ 145 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 146 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 147 pfsync_in_error, /* PFSYNC_ACT_DEL */ 148 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 149 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 150 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 151 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 152 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 153 pfsync_in_eof, /* PFSYNC_ACT_EOF */ 154 pfsync_in_ins, /* PFSYNC_ACT_INS_1400 */ 155 pfsync_in_upd, /* PFSYNC_ACT_UPD_1400 */ 156 }; 157 158 struct pfsync_q { 159 void (*write)(struct pf_kstate *, void *); 160 size_t len; 161 u_int8_t action; 162 }; 163 164 /* We have the following sync queues */ 165 enum pfsync_q_id { 166 PFSYNC_Q_INS_1301, 167 PFSYNC_Q_INS_1400, 168 PFSYNC_Q_IACK, 169 PFSYNC_Q_UPD_1301, 170 PFSYNC_Q_UPD_1400, 171 PFSYNC_Q_UPD_C, 172 PFSYNC_Q_DEL_C, 173 PFSYNC_Q_COUNT, 174 }; 175 176 /* Functions for building messages for given queue */ 177 static void pfsync_out_state_1301(struct pf_kstate *, void *); 178 static void pfsync_out_state_1400(struct pf_kstate *, void *); 179 static void pfsync_out_iack(struct pf_kstate *, void *); 180 static void pfsync_out_upd_c(struct pf_kstate *, void *); 181 static void pfsync_out_del_c(struct pf_kstate *, void *); 182 183 /* Attach those functions to queue */ 184 static struct pfsync_q pfsync_qs[] = { 185 { pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 }, 186 { pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 }, 187 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 188 { pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 }, 189 { pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 }, 190 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 191 { pfsync_out_del_c, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 192 }; 193 194 /* Map queue to pf_kstate->sync_state */ 195 static u_int8_t pfsync_qid_sstate[] = { 196 PFSYNC_S_INS, /* PFSYNC_Q_INS_1301 */ 197 PFSYNC_S_INS, /* PFSYNC_Q_INS_1400 */ 198 PFSYNC_S_IACK, /* PFSYNC_Q_IACK */ 199 PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1301 */ 200 PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1400 */ 201 PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */ 202 PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */ 203 }; 204 205 /* Map pf_kstate->sync_state to queue */ 206 static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t); 207 208 static void pfsync_q_ins(struct pf_kstate *, int sync_state, bool); 209 static void pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *); 210 211 static void pfsync_update_state(struct pf_kstate *); 212 static void pfsync_tx(struct pfsync_softc *, struct mbuf *); 213 214 struct pfsync_upd_req_item { 215 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 216 struct pfsync_upd_req ur_msg; 217 }; 218 219 struct pfsync_deferral { 220 struct pfsync_softc *pd_sc; 221 TAILQ_ENTRY(pfsync_deferral) pd_entry; 222 struct callout pd_tmo; 223 224 struct pf_kstate *pd_st; 225 struct mbuf *pd_m; 226 }; 227 228 struct pfsync_bucket 229 { 230 int b_id; 231 struct pfsync_softc *b_sc; 232 struct mtx b_mtx; 233 struct callout b_tmo; 234 int b_flags; 235 #define PFSYNCF_BUCKET_PUSH 0x00000001 236 237 size_t b_len; 238 TAILQ_HEAD(, pf_kstate) b_qs[PFSYNC_Q_COUNT]; 239 TAILQ_HEAD(, pfsync_upd_req_item) b_upd_req_list; 240 TAILQ_HEAD(, pfsync_deferral) b_deferrals; 241 u_int b_deferred; 242 void *b_plus; 243 size_t b_pluslen; 244 245 struct ifaltq b_snd; 246 }; 247 248 struct pfsync_softc { 249 /* Configuration */ 250 struct ifnet *sc_ifp; 251 struct ifnet *sc_sync_if; 252 struct ip_moptions sc_imo; 253 struct ip6_moptions sc_im6o; 254 struct sockaddr_storage sc_sync_peer; 255 uint32_t sc_flags; 256 uint8_t sc_maxupdates; 257 union inet_template sc_template; 258 struct mtx sc_mtx; 259 uint32_t sc_version; 260 261 /* Queued data */ 262 struct pfsync_bucket *sc_buckets; 263 264 /* Bulk update info */ 265 struct mtx sc_bulk_mtx; 266 uint32_t sc_ureq_sent; 267 int sc_bulk_tries; 268 uint32_t sc_ureq_received; 269 int sc_bulk_hashid; 270 uint64_t sc_bulk_stateid; 271 uint32_t sc_bulk_creatorid; 272 struct callout sc_bulk_tmo; 273 struct callout sc_bulkfail_tmo; 274 }; 275 276 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 277 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 278 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 279 280 #define PFSYNC_BUCKET_LOCK(b) mtx_lock(&(b)->b_mtx) 281 #define PFSYNC_BUCKET_UNLOCK(b) mtx_unlock(&(b)->b_mtx) 282 #define PFSYNC_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->b_mtx, MA_OWNED) 283 284 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) 285 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) 286 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) 287 288 #define PFSYNC_DEFER_TIMEOUT 20 289 290 static const char pfsyncname[] = "pfsync"; 291 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data"); 292 VNET_DEFINE_STATIC(struct pfsync_softc *, pfsyncif) = NULL; 293 #define V_pfsyncif VNET(pfsyncif) 294 VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL; 295 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) 296 VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie); 297 #define V_pfsync_swi_ie VNET(pfsync_swi_ie) 298 VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats); 299 #define V_pfsyncstats VNET(pfsyncstats) 300 VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW; 301 #define V_pfsync_carp_adj VNET(pfsync_carp_adj) 302 VNET_DEFINE_STATIC(unsigned int, pfsync_defer_timeout) = PFSYNC_DEFER_TIMEOUT; 303 #define V_pfsync_defer_timeout VNET(pfsync_defer_timeout) 304 305 static void pfsync_timeout(void *); 306 static void pfsync_push(struct pfsync_bucket *); 307 static void pfsync_push_all(struct pfsync_softc *); 308 static void pfsyncintr(void *); 309 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, 310 struct in_mfilter *, struct in6_mfilter *); 311 static void pfsync_multicast_cleanup(struct pfsync_softc *); 312 static void pfsync_pointers_init(void); 313 static void pfsync_pointers_uninit(void); 314 static int pfsync_init(void); 315 static void pfsync_uninit(void); 316 317 static unsigned long pfsync_buckets; 318 319 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 320 "PFSYNC"); 321 SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW, 322 &VNET_NAME(pfsyncstats), pfsyncstats, 323 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 324 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW, 325 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 326 SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN, 327 &pfsync_buckets, 0, "Number of pfsync hash buckets"); 328 SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW, 329 &VNET_NAME(pfsync_defer_timeout), 0, "Deferred packet timeout (in ms)"); 330 331 static int pfsync_clone_create(struct if_clone *, int, caddr_t); 332 static void pfsync_clone_destroy(struct ifnet *); 333 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 334 struct pf_state_peer *); 335 static int pfsyncoutput(struct ifnet *, struct mbuf *, 336 const struct sockaddr *, struct route *); 337 static int pfsyncioctl(struct ifnet *, u_long, caddr_t); 338 339 static int pfsync_defer(struct pf_kstate *, struct mbuf *); 340 static void pfsync_undefer(struct pfsync_deferral *, int); 341 static void pfsync_undefer_state_locked(struct pf_kstate *, int); 342 static void pfsync_undefer_state(struct pf_kstate *, int); 343 static void pfsync_defer_tmo(void *); 344 345 static void pfsync_request_update(u_int32_t, u_int64_t); 346 static bool pfsync_update_state_req(struct pf_kstate *); 347 348 static void pfsync_drop(struct pfsync_softc *); 349 static void pfsync_sendout(int, int); 350 static void pfsync_send_plus(void *, size_t); 351 352 static void pfsync_bulk_start(void); 353 static void pfsync_bulk_status(u_int8_t); 354 static void pfsync_bulk_update(void *); 355 static void pfsync_bulk_fail(void *); 356 357 static void pfsync_detach_ifnet(struct ifnet *); 358 359 static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *, 360 struct pfsync_kstatus *); 361 static int pfsync_kstatus_to_softc(struct pfsync_kstatus *, 362 struct pfsync_softc *); 363 364 #ifdef IPSEC 365 static void pfsync_update_net_tdb(struct pfsync_tdb *); 366 #endif 367 static struct pfsync_bucket *pfsync_get_bucket(struct pfsync_softc *, 368 struct pf_kstate *); 369 370 #define PFSYNC_MAX_BULKTRIES 12 371 372 VNET_DEFINE(struct if_clone *, pfsync_cloner); 373 #define V_pfsync_cloner VNET(pfsync_cloner) 374 375 const struct in6_addr in6addr_linklocal_pfsync_group = 376 {{{ 0xff, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0 }}}; 378 static int 379 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 380 { 381 struct pfsync_softc *sc; 382 struct ifnet *ifp; 383 struct pfsync_bucket *b; 384 int c; 385 enum pfsync_q_id q; 386 387 if (unit != 0) 388 return (EINVAL); 389 390 if (! pfsync_buckets) 391 pfsync_buckets = mp_ncpus * 2; 392 393 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); 394 sc->sc_flags |= PFSYNCF_OK; 395 sc->sc_maxupdates = 128; 396 sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT; 397 398 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 399 if (ifp == NULL) { 400 free(sc, M_PFSYNC); 401 return (ENOSPC); 402 } 403 if_initname(ifp, pfsyncname, unit); 404 ifp->if_softc = sc; 405 ifp->if_ioctl = pfsyncioctl; 406 ifp->if_output = pfsyncoutput; 407 ifp->if_type = IFT_PFSYNC; 408 ifp->if_hdrlen = sizeof(struct pfsync_header); 409 ifp->if_mtu = ETHERMTU; 410 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); 411 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); 412 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); 413 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); 414 415 if_attach(ifp); 416 417 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 418 419 sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets), 420 M_PFSYNC, M_ZERO | M_WAITOK); 421 for (c = 0; c < pfsync_buckets; c++) { 422 b = &sc->sc_buckets[c]; 423 mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF); 424 425 b->b_id = c; 426 b->b_sc = sc; 427 b->b_len = PFSYNC_MINPKT; 428 429 for (q = 0; q < PFSYNC_Q_COUNT; q++) 430 TAILQ_INIT(&b->b_qs[q]); 431 432 TAILQ_INIT(&b->b_upd_req_list); 433 TAILQ_INIT(&b->b_deferrals); 434 435 callout_init(&b->b_tmo, 1); 436 437 b->b_snd.ifq_maxlen = ifqmaxlen; 438 } 439 440 V_pfsyncif = sc; 441 442 return (0); 443 } 444 445 static void 446 pfsync_clone_destroy(struct ifnet *ifp) 447 { 448 struct pfsync_softc *sc = ifp->if_softc; 449 struct pfsync_bucket *b; 450 int c, ret; 451 452 for (c = 0; c < pfsync_buckets; c++) { 453 b = &sc->sc_buckets[c]; 454 /* 455 * At this stage, everything should have already been 456 * cleared by pfsync_uninit(), and we have only to 457 * drain callouts. 458 */ 459 PFSYNC_BUCKET_LOCK(b); 460 while (b->b_deferred > 0) { 461 struct pfsync_deferral *pd = 462 TAILQ_FIRST(&b->b_deferrals); 463 464 ret = callout_stop(&pd->pd_tmo); 465 PFSYNC_BUCKET_UNLOCK(b); 466 if (ret > 0) { 467 pfsync_undefer(pd, 1); 468 } else { 469 callout_drain(&pd->pd_tmo); 470 } 471 PFSYNC_BUCKET_LOCK(b); 472 } 473 MPASS(b->b_deferred == 0); 474 MPASS(TAILQ_EMPTY(&b->b_deferrals)); 475 PFSYNC_BUCKET_UNLOCK(b); 476 477 callout_drain(&b->b_tmo); 478 } 479 480 callout_drain(&sc->sc_bulkfail_tmo); 481 callout_drain(&sc->sc_bulk_tmo); 482 483 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 484 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 485 bpfdetach(ifp); 486 if_detach(ifp); 487 488 pfsync_drop(sc); 489 490 if_free(ifp); 491 pfsync_multicast_cleanup(sc); 492 mtx_destroy(&sc->sc_mtx); 493 mtx_destroy(&sc->sc_bulk_mtx); 494 495 free(sc->sc_buckets, M_PFSYNC); 496 free(sc, M_PFSYNC); 497 498 V_pfsyncif = NULL; 499 } 500 501 static int 502 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 503 struct pf_state_peer *d) 504 { 505 if (s->scrub.scrub_flag && d->scrub == NULL) { 506 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); 507 if (d->scrub == NULL) 508 return (ENOMEM); 509 } 510 511 return (0); 512 } 513 514 static int 515 pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version) 516 { 517 struct pfsync_softc *sc = V_pfsyncif; 518 #ifndef __NO_STRICT_ALIGNMENT 519 struct pfsync_state_key key[2]; 520 #endif 521 struct pfsync_state_key *kw, *ks; 522 struct pf_kstate *st = NULL; 523 struct pf_state_key *skw = NULL, *sks = NULL; 524 struct pf_krule *r = NULL; 525 struct pfi_kkif *kif; 526 int error; 527 528 PF_RULES_RASSERT(); 529 530 if (sp->pfs_1301.creatorid == 0) { 531 if (V_pf_status.debug >= PF_DEBUG_MISC) 532 printf("%s: invalid creator id: %08x\n", __func__, 533 ntohl(sp->pfs_1301.creatorid)); 534 return (EINVAL); 535 } 536 537 if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) { 538 if (V_pf_status.debug >= PF_DEBUG_MISC) 539 printf("%s: unknown interface: %s\n", __func__, 540 sp->pfs_1301.ifname); 541 if (flags & PFSYNC_SI_IOCTL) 542 return (EINVAL); 543 return (0); /* skip this state */ 544 } 545 546 /* 547 * If the ruleset checksums match or the state is coming from the ioctl, 548 * it's safe to associate the state with the rule of that number. 549 */ 550 if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) && 551 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) < 552 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 553 r = pf_main_ruleset.rules[ 554 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)]; 555 else 556 r = &V_pf_default_rule; 557 558 if ((r->max_states && 559 counter_u64_fetch(r->states_cur) >= r->max_states)) 560 goto cleanup; 561 562 /* 563 * XXXGL: consider M_WAITOK in ioctl path after. 564 */ 565 st = pf_alloc_state(M_NOWAIT); 566 if (__predict_false(st == NULL)) 567 goto cleanup; 568 569 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL) 570 goto cleanup; 571 572 #ifndef __NO_STRICT_ALIGNMENT 573 bcopy(&sp->pfs_1301.key, key, sizeof(struct pfsync_state_key) * 2); 574 kw = &key[PF_SK_WIRE]; 575 ks = &key[PF_SK_STACK]; 576 #else 577 kw = &sp->pfs_1301.key[PF_SK_WIRE]; 578 ks = &sp->pfs_1301.key[PF_SK_STACK]; 579 #endif 580 581 if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) || 582 PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) || 583 kw->port[0] != ks->port[0] || 584 kw->port[1] != ks->port[1]) { 585 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 586 if (sks == NULL) 587 goto cleanup; 588 } else 589 sks = skw; 590 591 /* allocate memory for scrub info */ 592 if (pfsync_alloc_scrub_memory(&sp->pfs_1301.src, &st->src) || 593 pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst)) 594 goto cleanup; 595 596 /* Copy to state key(s). */ 597 skw->addr[0] = kw->addr[0]; 598 skw->addr[1] = kw->addr[1]; 599 skw->port[0] = kw->port[0]; 600 skw->port[1] = kw->port[1]; 601 skw->proto = sp->pfs_1301.proto; 602 skw->af = sp->pfs_1301.af; 603 if (sks != skw) { 604 sks->addr[0] = ks->addr[0]; 605 sks->addr[1] = ks->addr[1]; 606 sks->port[0] = ks->port[0]; 607 sks->port[1] = ks->port[1]; 608 sks->proto = sp->pfs_1301.proto; 609 sks->af = sp->pfs_1301.af; 610 } 611 612 /* copy to state */ 613 bcopy(&sp->pfs_1301.rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 614 st->creation = (time_uptime - ntohl(sp->pfs_1301.creation)) * 1000; 615 st->expire = pf_get_uptime(); 616 if (sp->pfs_1301.expire) { 617 uint32_t timeout; 618 619 timeout = r->timeout[sp->pfs_1301.timeout]; 620 if (!timeout) 621 timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout]; 622 623 /* sp->expire may have been adaptively scaled by export. */ 624 st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000; 625 } 626 627 st->direction = sp->pfs_1301.direction; 628 st->act.log = sp->pfs_1301.log; 629 st->timeout = sp->pfs_1301.timeout; 630 631 switch (msg_version) { 632 case PFSYNC_MSG_VERSION_1301: 633 st->state_flags = sp->pfs_1301.state_flags; 634 /* 635 * In FreeBSD 13 pfsync lacks many attributes. Copy them 636 * from the rule if possible. If rule can't be matched 637 * clear any set options as we can't recover their 638 * parameters. 639 */ 640 if (r == &V_pf_default_rule) { 641 st->state_flags &= ~PFSTATE_SETMASK; 642 } else { 643 /* 644 * Similar to pf_rule_to_actions(). This code 645 * won't set the actions properly if they come 646 * from multiple "match" rules as only rule 647 * creating the state is send over pfsync. 648 */ 649 st->act.qid = r->qid; 650 st->act.pqid = r->pqid; 651 st->act.rtableid = r->rtableid; 652 if (r->scrub_flags & PFSTATE_SETTOS) 653 st->act.set_tos = r->set_tos; 654 st->act.min_ttl = r->min_ttl; 655 st->act.max_mss = r->max_mss; 656 st->state_flags |= (r->scrub_flags & 657 (PFSTATE_NODF|PFSTATE_RANDOMID| 658 PFSTATE_SETTOS|PFSTATE_SCRUB_TCP| 659 PFSTATE_SETPRIO)); 660 if (r->dnpipe || r->dnrpipe) { 661 if (r->free_flags & PFRULE_DN_IS_PIPE) 662 st->state_flags |= PFSTATE_DN_IS_PIPE; 663 else 664 st->state_flags &= ~PFSTATE_DN_IS_PIPE; 665 } 666 st->act.dnpipe = r->dnpipe; 667 st->act.dnrpipe = r->dnrpipe; 668 } 669 break; 670 case PFSYNC_MSG_VERSION_1400: 671 st->state_flags = ntohs(sp->pfs_1400.state_flags); 672 st->act.qid = ntohs(sp->pfs_1400.qid); 673 st->act.pqid = ntohs(sp->pfs_1400.pqid); 674 st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe); 675 st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe); 676 st->act.rtableid = ntohl(sp->pfs_1400.rtableid); 677 st->act.min_ttl = sp->pfs_1400.min_ttl; 678 st->act.set_tos = sp->pfs_1400.set_tos; 679 st->act.max_mss = ntohs(sp->pfs_1400.max_mss); 680 st->act.set_prio[0] = sp->pfs_1400.set_prio[0]; 681 st->act.set_prio[1] = sp->pfs_1400.set_prio[1]; 682 st->rt = sp->pfs_1400.rt; 683 if (st->rt && (st->rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname)) == NULL) { 684 if (V_pf_status.debug >= PF_DEBUG_MISC) 685 printf("%s: unknown route interface: %s\n", 686 __func__, sp->pfs_1400.rt_ifname); 687 if (flags & PFSYNC_SI_IOCTL) 688 error = EINVAL; 689 else 690 error = 0; 691 goto cleanup_keys; 692 } 693 break; 694 default: 695 panic("%s: Unsupported pfsync_msg_version %d", 696 __func__, msg_version); 697 } 698 699 st->id = sp->pfs_1301.id; 700 st->creatorid = sp->pfs_1301.creatorid; 701 pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src); 702 pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); 703 704 st->rule.ptr = r; 705 st->nat_rule.ptr = NULL; 706 st->anchor.ptr = NULL; 707 708 st->pfsync_time = time_uptime; 709 st->sync_state = PFSYNC_S_NONE; 710 711 if (!(flags & PFSYNC_SI_IOCTL)) 712 st->state_flags |= PFSTATE_NOSYNC; 713 714 if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0) 715 goto cleanup_state; 716 717 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 718 counter_u64_add(r->states_cur, 1); 719 counter_u64_add(r->states_tot, 1); 720 721 if (!(flags & PFSYNC_SI_IOCTL)) { 722 st->state_flags &= ~PFSTATE_NOSYNC; 723 if (st->state_flags & PFSTATE_ACK) { 724 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 725 PFSYNC_BUCKET_LOCK(b); 726 pfsync_q_ins(st, PFSYNC_S_IACK, true); 727 PFSYNC_BUCKET_UNLOCK(b); 728 729 pfsync_push_all(sc); 730 } 731 } 732 st->state_flags &= ~PFSTATE_ACK; 733 PF_STATE_UNLOCK(st); 734 735 return (0); 736 737 cleanup: 738 error = ENOMEM; 739 cleanup_keys: 740 if (skw == sks) 741 sks = NULL; 742 uma_zfree(V_pf_state_key_z, skw); 743 uma_zfree(V_pf_state_key_z, sks); 744 745 cleanup_state: /* pf_state_insert() frees the state keys. */ 746 if (st) { 747 st->timeout = PFTM_UNLINKED; /* appease an assert */ 748 pf_free_state(st); 749 } 750 return (error); 751 } 752 753 #ifdef INET 754 static int 755 pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused) 756 { 757 struct pfsync_softc *sc = V_pfsyncif; 758 struct mbuf *m = *mp; 759 struct ip *ip = mtod(m, struct ip *); 760 struct pfsync_header *ph; 761 struct pfsync_subheader subh; 762 763 int offset, len, flags = 0; 764 int rv; 765 uint16_t count; 766 767 PF_RULES_RLOCK_TRACKER; 768 769 *mp = NULL; 770 V_pfsyncstats.pfsyncs_ipackets++; 771 772 /* Verify that we have a sync interface configured. */ 773 if (!sc || !sc->sc_sync_if || !V_pf_status.running || 774 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 775 goto done; 776 777 /* verify that the packet came in on the right interface */ 778 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 779 V_pfsyncstats.pfsyncs_badif++; 780 goto done; 781 } 782 783 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 784 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 785 /* verify that the IP TTL is 255. */ 786 if (ip->ip_ttl != PFSYNC_DFLTTL) { 787 V_pfsyncstats.pfsyncs_badttl++; 788 goto done; 789 } 790 791 offset = ip->ip_hl << 2; 792 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 793 V_pfsyncstats.pfsyncs_hdrops++; 794 goto done; 795 } 796 797 if (offset + sizeof(*ph) > m->m_len) { 798 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 799 V_pfsyncstats.pfsyncs_hdrops++; 800 return (IPPROTO_DONE); 801 } 802 ip = mtod(m, struct ip *); 803 } 804 ph = (struct pfsync_header *)((char *)ip + offset); 805 806 /* verify the version */ 807 if (ph->version != PFSYNC_VERSION) { 808 V_pfsyncstats.pfsyncs_badver++; 809 goto done; 810 } 811 812 len = ntohs(ph->len) + offset; 813 if (m->m_pkthdr.len < len) { 814 V_pfsyncstats.pfsyncs_badlen++; 815 goto done; 816 } 817 818 /* 819 * Trusting pf_chksum during packet processing, as well as seeking 820 * in interface name tree, require holding PF_RULES_RLOCK(). 821 */ 822 PF_RULES_RLOCK(); 823 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 824 flags = PFSYNC_SI_CKSUM; 825 826 offset += sizeof(*ph); 827 while (offset <= len - sizeof(subh)) { 828 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 829 offset += sizeof(subh); 830 831 if (subh.action >= PFSYNC_ACT_MAX) { 832 V_pfsyncstats.pfsyncs_badact++; 833 PF_RULES_RUNLOCK(); 834 goto done; 835 } 836 837 count = ntohs(subh.count); 838 V_pfsyncstats.pfsyncs_iacts[subh.action] += count; 839 rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action); 840 if (rv == -1) { 841 PF_RULES_RUNLOCK(); 842 return (IPPROTO_DONE); 843 } 844 845 offset += rv; 846 } 847 PF_RULES_RUNLOCK(); 848 849 done: 850 m_freem(m); 851 return (IPPROTO_DONE); 852 } 853 #endif 854 855 #ifdef INET6 856 static int 857 pfsync6_input(struct mbuf **mp, int *offp __unused, int proto __unused) 858 { 859 struct pfsync_softc *sc = V_pfsyncif; 860 struct mbuf *m = *mp; 861 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 862 struct pfsync_header *ph; 863 struct pfsync_subheader subh; 864 865 int offset, len, flags = 0; 866 int rv; 867 uint16_t count; 868 869 PF_RULES_RLOCK_TRACKER; 870 871 *mp = NULL; 872 V_pfsyncstats.pfsyncs_ipackets++; 873 874 /* Verify that we have a sync interface configured. */ 875 if (!sc || !sc->sc_sync_if || !V_pf_status.running || 876 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 877 goto done; 878 879 /* verify that the packet came in on the right interface */ 880 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 881 V_pfsyncstats.pfsyncs_badif++; 882 goto done; 883 } 884 885 if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 886 if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 887 /* verify that the IP TTL is 255. */ 888 if (ip6->ip6_hlim != PFSYNC_DFLTTL) { 889 V_pfsyncstats.pfsyncs_badttl++; 890 goto done; 891 } 892 893 894 offset = sizeof(*ip6); 895 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 896 V_pfsyncstats.pfsyncs_hdrops++; 897 goto done; 898 } 899 900 if (offset + sizeof(*ph) > m->m_len) { 901 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 902 V_pfsyncstats.pfsyncs_hdrops++; 903 return (IPPROTO_DONE); 904 } 905 ip6 = mtod(m, struct ip6_hdr *); 906 } 907 ph = (struct pfsync_header *)((char *)ip6 + offset); 908 909 /* verify the version */ 910 if (ph->version != PFSYNC_VERSION) { 911 V_pfsyncstats.pfsyncs_badver++; 912 goto done; 913 } 914 915 len = ntohs(ph->len) + offset; 916 if (m->m_pkthdr.len < len) { 917 V_pfsyncstats.pfsyncs_badlen++; 918 goto done; 919 } 920 921 /* 922 * Trusting pf_chksum during packet processing, as well as seeking 923 * in interface name tree, require holding PF_RULES_RLOCK(). 924 */ 925 PF_RULES_RLOCK(); 926 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 927 flags = PFSYNC_SI_CKSUM; 928 929 offset += sizeof(*ph); 930 while (offset <= len - sizeof(subh)) { 931 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 932 offset += sizeof(subh); 933 934 if (subh.action >= PFSYNC_ACT_MAX) { 935 V_pfsyncstats.pfsyncs_badact++; 936 PF_RULES_RUNLOCK(); 937 goto done; 938 } 939 940 count = ntohs(subh.count); 941 V_pfsyncstats.pfsyncs_iacts[subh.action] += count; 942 rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action); 943 if (rv == -1) { 944 PF_RULES_RUNLOCK(); 945 return (IPPROTO_DONE); 946 } 947 948 offset += rv; 949 } 950 PF_RULES_RUNLOCK(); 951 952 done: 953 m_freem(m); 954 return (IPPROTO_DONE); 955 } 956 #endif 957 958 static int 959 pfsync_in_clr(struct mbuf *m, int offset, int count, int flags, int action) 960 { 961 struct pfsync_clr *clr; 962 struct mbuf *mp; 963 int len = sizeof(*clr) * count; 964 int i, offp; 965 u_int32_t creatorid; 966 967 mp = m_pulldown(m, offset, len, &offp); 968 if (mp == NULL) { 969 V_pfsyncstats.pfsyncs_badlen++; 970 return (-1); 971 } 972 clr = (struct pfsync_clr *)(mp->m_data + offp); 973 974 for (i = 0; i < count; i++) { 975 creatorid = clr[i].creatorid; 976 977 if (clr[i].ifname[0] != '\0' && 978 pfi_kkif_find(clr[i].ifname) == NULL) 979 continue; 980 981 for (int i = 0; i <= pf_hashmask; i++) { 982 struct pf_idhash *ih = &V_pf_idhash[i]; 983 struct pf_kstate *s; 984 relock: 985 PF_HASHROW_LOCK(ih); 986 LIST_FOREACH(s, &ih->states, entry) { 987 if (s->creatorid == creatorid) { 988 s->state_flags |= PFSTATE_NOSYNC; 989 pf_unlink_state(s); 990 goto relock; 991 } 992 } 993 PF_HASHROW_UNLOCK(ih); 994 } 995 } 996 997 return (len); 998 } 999 1000 static int 1001 pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action) 1002 { 1003 struct mbuf *mp; 1004 union pfsync_state_union *sa, *sp; 1005 int i, offp, total_len, msg_version, msg_len; 1006 1007 switch (action) { 1008 case PFSYNC_ACT_INS_1301: 1009 msg_len = sizeof(struct pfsync_state_1301); 1010 total_len = msg_len * count; 1011 msg_version = PFSYNC_MSG_VERSION_1301; 1012 break; 1013 case PFSYNC_ACT_INS_1400: 1014 msg_len = sizeof(struct pfsync_state_1400); 1015 total_len = msg_len * count; 1016 msg_version = PFSYNC_MSG_VERSION_1400; 1017 break; 1018 default: 1019 V_pfsyncstats.pfsyncs_badact++; 1020 return (-1); 1021 } 1022 1023 mp = m_pulldown(m, offset, total_len, &offp); 1024 if (mp == NULL) { 1025 V_pfsyncstats.pfsyncs_badlen++; 1026 return (-1); 1027 } 1028 sa = (union pfsync_state_union *)(mp->m_data + offp); 1029 1030 for (i = 0; i < count; i++) { 1031 sp = (union pfsync_state_union *)((char *)sa + msg_len * i); 1032 1033 /* Check for invalid values. */ 1034 if (sp->pfs_1301.timeout >= PFTM_MAX || 1035 sp->pfs_1301.src.state > PF_TCPS_PROXY_DST || 1036 sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST || 1037 sp->pfs_1301.direction > PF_OUT || 1038 (sp->pfs_1301.af != AF_INET && 1039 sp->pfs_1301.af != AF_INET6)) { 1040 if (V_pf_status.debug >= PF_DEBUG_MISC) 1041 printf("%s: invalid value\n", __func__); 1042 V_pfsyncstats.pfsyncs_badval++; 1043 continue; 1044 } 1045 1046 if (pfsync_state_import(sp, flags, msg_version) == ENOMEM) 1047 /* Drop out, but process the rest of the actions. */ 1048 break; 1049 } 1050 1051 return (total_len); 1052 } 1053 1054 static int 1055 pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action) 1056 { 1057 struct pfsync_ins_ack *ia, *iaa; 1058 struct pf_kstate *st; 1059 1060 struct mbuf *mp; 1061 int len = count * sizeof(*ia); 1062 int offp, i; 1063 1064 mp = m_pulldown(m, offset, len, &offp); 1065 if (mp == NULL) { 1066 V_pfsyncstats.pfsyncs_badlen++; 1067 return (-1); 1068 } 1069 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 1070 1071 for (i = 0; i < count; i++) { 1072 ia = &iaa[i]; 1073 1074 st = pf_find_state_byid(ia->id, ia->creatorid); 1075 if (st == NULL) 1076 continue; 1077 1078 if (st->state_flags & PFSTATE_ACK) { 1079 pfsync_undefer_state(st, 0); 1080 } 1081 PF_STATE_UNLOCK(st); 1082 } 1083 /* 1084 * XXX this is not yet implemented, but we know the size of the 1085 * message so we can skip it. 1086 */ 1087 1088 return (count * sizeof(struct pfsync_ins_ack)); 1089 } 1090 1091 static int 1092 pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src, 1093 struct pfsync_state_peer *dst) 1094 { 1095 int sync = 0; 1096 1097 PF_STATE_LOCK_ASSERT(st); 1098 1099 /* 1100 * The state should never go backwards except 1101 * for syn-proxy states. Neither should the 1102 * sequence window slide backwards. 1103 */ 1104 if ((st->src.state > src->state && 1105 (st->src.state < PF_TCPS_PROXY_SRC || 1106 src->state >= PF_TCPS_PROXY_SRC)) || 1107 1108 (st->src.state == src->state && 1109 SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))) 1110 sync++; 1111 else 1112 pf_state_peer_ntoh(src, &st->src); 1113 1114 if ((st->dst.state > dst->state) || 1115 1116 (st->dst.state >= TCPS_SYN_SENT && 1117 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))) 1118 sync++; 1119 else 1120 pf_state_peer_ntoh(dst, &st->dst); 1121 1122 return (sync); 1123 } 1124 1125 static int 1126 pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action) 1127 { 1128 struct pfsync_softc *sc = V_pfsyncif; 1129 union pfsync_state_union *sa, *sp; 1130 struct pf_kstate *st; 1131 struct mbuf *mp; 1132 int sync, offp, i, total_len, msg_len, msg_version; 1133 1134 switch (action) { 1135 case PFSYNC_ACT_UPD_1301: 1136 msg_len = sizeof(struct pfsync_state_1301); 1137 total_len = msg_len * count; 1138 msg_version = PFSYNC_MSG_VERSION_1301; 1139 break; 1140 case PFSYNC_ACT_UPD_1400: 1141 msg_len = sizeof(struct pfsync_state_1400); 1142 total_len = msg_len * count; 1143 msg_version = PFSYNC_MSG_VERSION_1400; 1144 break; 1145 default: 1146 V_pfsyncstats.pfsyncs_badact++; 1147 return (-1); 1148 } 1149 1150 mp = m_pulldown(m, offset, total_len, &offp); 1151 if (mp == NULL) { 1152 V_pfsyncstats.pfsyncs_badlen++; 1153 return (-1); 1154 } 1155 sa = (union pfsync_state_union *)(mp->m_data + offp); 1156 1157 for (i = 0; i < count; i++) { 1158 sp = (union pfsync_state_union *)((char *)sa + msg_len * i); 1159 1160 /* check for invalid values */ 1161 if (sp->pfs_1301.timeout >= PFTM_MAX || 1162 sp->pfs_1301.src.state > PF_TCPS_PROXY_DST || 1163 sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) { 1164 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1165 printf("pfsync_input: PFSYNC_ACT_UPD: " 1166 "invalid value\n"); 1167 } 1168 V_pfsyncstats.pfsyncs_badval++; 1169 continue; 1170 } 1171 1172 st = pf_find_state_byid(sp->pfs_1301.id, sp->pfs_1301.creatorid); 1173 if (st == NULL) { 1174 /* insert the update */ 1175 if (pfsync_state_import(sp, flags, msg_version)) 1176 V_pfsyncstats.pfsyncs_badstate++; 1177 continue; 1178 } 1179 1180 if (st->state_flags & PFSTATE_ACK) { 1181 pfsync_undefer_state(st, 1); 1182 } 1183 1184 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 1185 sync = pfsync_upd_tcp(st, &sp->pfs_1301.src, &sp->pfs_1301.dst); 1186 else { 1187 sync = 0; 1188 1189 /* 1190 * Non-TCP protocol state machine always go 1191 * forwards 1192 */ 1193 if (st->src.state > sp->pfs_1301.src.state) 1194 sync++; 1195 else 1196 pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src); 1197 if (st->dst.state > sp->pfs_1301.dst.state) 1198 sync++; 1199 else 1200 pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); 1201 } 1202 if (sync < 2) { 1203 pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst); 1204 pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst); 1205 st->expire = pf_get_uptime(); 1206 st->timeout = sp->pfs_1301.timeout; 1207 } 1208 st->pfsync_time = time_uptime; 1209 1210 if (sync) { 1211 V_pfsyncstats.pfsyncs_stale++; 1212 1213 pfsync_update_state(st); 1214 PF_STATE_UNLOCK(st); 1215 pfsync_push_all(sc); 1216 continue; 1217 } 1218 PF_STATE_UNLOCK(st); 1219 } 1220 1221 return (total_len); 1222 } 1223 1224 static int 1225 pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags, int action) 1226 { 1227 struct pfsync_softc *sc = V_pfsyncif; 1228 struct pfsync_upd_c *ua, *up; 1229 struct pf_kstate *st; 1230 int len = count * sizeof(*up); 1231 int sync; 1232 struct mbuf *mp; 1233 int offp, i; 1234 1235 mp = m_pulldown(m, offset, len, &offp); 1236 if (mp == NULL) { 1237 V_pfsyncstats.pfsyncs_badlen++; 1238 return (-1); 1239 } 1240 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 1241 1242 for (i = 0; i < count; i++) { 1243 up = &ua[i]; 1244 1245 /* check for invalid values */ 1246 if (up->timeout >= PFTM_MAX || 1247 up->src.state > PF_TCPS_PROXY_DST || 1248 up->dst.state > PF_TCPS_PROXY_DST) { 1249 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1250 printf("pfsync_input: " 1251 "PFSYNC_ACT_UPD_C: " 1252 "invalid value\n"); 1253 } 1254 V_pfsyncstats.pfsyncs_badval++; 1255 continue; 1256 } 1257 1258 st = pf_find_state_byid(up->id, up->creatorid); 1259 if (st == NULL) { 1260 /* We don't have this state. Ask for it. */ 1261 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); 1262 pfsync_request_update(up->creatorid, up->id); 1263 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); 1264 continue; 1265 } 1266 1267 if (st->state_flags & PFSTATE_ACK) { 1268 pfsync_undefer_state(st, 1); 1269 } 1270 1271 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 1272 sync = pfsync_upd_tcp(st, &up->src, &up->dst); 1273 else { 1274 sync = 0; 1275 1276 /* 1277 * Non-TCP protocol state machine always go 1278 * forwards 1279 */ 1280 if (st->src.state > up->src.state) 1281 sync++; 1282 else 1283 pf_state_peer_ntoh(&up->src, &st->src); 1284 if (st->dst.state > up->dst.state) 1285 sync++; 1286 else 1287 pf_state_peer_ntoh(&up->dst, &st->dst); 1288 } 1289 if (sync < 2) { 1290 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1291 pf_state_peer_ntoh(&up->dst, &st->dst); 1292 st->expire = pf_get_uptime(); 1293 st->timeout = up->timeout; 1294 } 1295 st->pfsync_time = time_uptime; 1296 1297 if (sync) { 1298 V_pfsyncstats.pfsyncs_stale++; 1299 1300 pfsync_update_state(st); 1301 PF_STATE_UNLOCK(st); 1302 pfsync_push_all(sc); 1303 continue; 1304 } 1305 PF_STATE_UNLOCK(st); 1306 } 1307 1308 return (len); 1309 } 1310 1311 static int 1312 pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags, int action) 1313 { 1314 struct pfsync_upd_req *ur, *ura; 1315 struct mbuf *mp; 1316 int len = count * sizeof(*ur); 1317 int i, offp; 1318 1319 struct pf_kstate *st; 1320 1321 mp = m_pulldown(m, offset, len, &offp); 1322 if (mp == NULL) { 1323 V_pfsyncstats.pfsyncs_badlen++; 1324 return (-1); 1325 } 1326 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1327 1328 for (i = 0; i < count; i++) { 1329 ur = &ura[i]; 1330 1331 if (ur->id == 0 && ur->creatorid == 0) 1332 pfsync_bulk_start(); 1333 else { 1334 st = pf_find_state_byid(ur->id, ur->creatorid); 1335 if (st == NULL) { 1336 V_pfsyncstats.pfsyncs_badstate++; 1337 continue; 1338 } 1339 if (st->state_flags & PFSTATE_NOSYNC) { 1340 PF_STATE_UNLOCK(st); 1341 continue; 1342 } 1343 1344 pfsync_update_state_req(st); 1345 PF_STATE_UNLOCK(st); 1346 } 1347 } 1348 1349 return (len); 1350 } 1351 1352 static int 1353 pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags, int action) 1354 { 1355 struct mbuf *mp; 1356 struct pfsync_del_c *sa, *sp; 1357 struct pf_kstate *st; 1358 int len = count * sizeof(*sp); 1359 int offp, i; 1360 1361 mp = m_pulldown(m, offset, len, &offp); 1362 if (mp == NULL) { 1363 V_pfsyncstats.pfsyncs_badlen++; 1364 return (-1); 1365 } 1366 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1367 1368 for (i = 0; i < count; i++) { 1369 sp = &sa[i]; 1370 1371 st = pf_find_state_byid(sp->id, sp->creatorid); 1372 if (st == NULL) { 1373 V_pfsyncstats.pfsyncs_badstate++; 1374 continue; 1375 } 1376 1377 st->state_flags |= PFSTATE_NOSYNC; 1378 pf_unlink_state(st); 1379 } 1380 1381 return (len); 1382 } 1383 1384 static int 1385 pfsync_in_bus(struct mbuf *m, int offset, int count, int flags, int action) 1386 { 1387 struct pfsync_softc *sc = V_pfsyncif; 1388 struct pfsync_bus *bus; 1389 struct mbuf *mp; 1390 int len = count * sizeof(*bus); 1391 int offp; 1392 1393 PFSYNC_BLOCK(sc); 1394 1395 /* If we're not waiting for a bulk update, who cares. */ 1396 if (sc->sc_ureq_sent == 0) { 1397 PFSYNC_BUNLOCK(sc); 1398 return (len); 1399 } 1400 1401 mp = m_pulldown(m, offset, len, &offp); 1402 if (mp == NULL) { 1403 PFSYNC_BUNLOCK(sc); 1404 V_pfsyncstats.pfsyncs_badlen++; 1405 return (-1); 1406 } 1407 bus = (struct pfsync_bus *)(mp->m_data + offp); 1408 1409 switch (bus->status) { 1410 case PFSYNC_BUS_START: 1411 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1412 V_pf_limits[PF_LIMIT_STATES].limit / 1413 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / 1414 sizeof(union pfsync_state_union)), 1415 pfsync_bulk_fail, sc); 1416 if (V_pf_status.debug >= PF_DEBUG_MISC) 1417 printf("pfsync: received bulk update start\n"); 1418 break; 1419 1420 case PFSYNC_BUS_END: 1421 if (time_uptime - ntohl(bus->endtime) >= 1422 sc->sc_ureq_sent) { 1423 /* that's it, we're happy */ 1424 sc->sc_ureq_sent = 0; 1425 sc->sc_bulk_tries = 0; 1426 callout_stop(&sc->sc_bulkfail_tmo); 1427 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 1428 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1429 "pfsync bulk done"); 1430 sc->sc_flags |= PFSYNCF_OK; 1431 if (V_pf_status.debug >= PF_DEBUG_MISC) 1432 printf("pfsync: received valid " 1433 "bulk update end\n"); 1434 } else { 1435 if (V_pf_status.debug >= PF_DEBUG_MISC) 1436 printf("pfsync: received invalid " 1437 "bulk update end: bad timestamp\n"); 1438 } 1439 break; 1440 } 1441 PFSYNC_BUNLOCK(sc); 1442 1443 return (len); 1444 } 1445 1446 static int 1447 pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags, int action) 1448 { 1449 int len = count * sizeof(struct pfsync_tdb); 1450 1451 #if defined(IPSEC) 1452 struct pfsync_tdb *tp; 1453 struct mbuf *mp; 1454 int offp; 1455 int i; 1456 int s; 1457 1458 mp = m_pulldown(m, offset, len, &offp); 1459 if (mp == NULL) { 1460 V_pfsyncstats.pfsyncs_badlen++; 1461 return (-1); 1462 } 1463 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1464 1465 for (i = 0; i < count; i++) 1466 pfsync_update_net_tdb(&tp[i]); 1467 #endif 1468 1469 return (len); 1470 } 1471 1472 #if defined(IPSEC) 1473 /* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1474 static void 1475 pfsync_update_net_tdb(struct pfsync_tdb *pt) 1476 { 1477 struct tdb *tdb; 1478 int s; 1479 1480 /* check for invalid values */ 1481 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1482 (pt->dst.sa.sa_family != AF_INET && 1483 pt->dst.sa.sa_family != AF_INET6)) 1484 goto bad; 1485 1486 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1487 if (tdb) { 1488 pt->rpl = ntohl(pt->rpl); 1489 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes); 1490 1491 /* Neither replay nor byte counter should ever decrease. */ 1492 if (pt->rpl < tdb->tdb_rpl || 1493 pt->cur_bytes < tdb->tdb_cur_bytes) { 1494 goto bad; 1495 } 1496 1497 tdb->tdb_rpl = pt->rpl; 1498 tdb->tdb_cur_bytes = pt->cur_bytes; 1499 } 1500 return; 1501 1502 bad: 1503 if (V_pf_status.debug >= PF_DEBUG_MISC) 1504 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1505 "invalid value\n"); 1506 V_pfsyncstats.pfsyncs_badstate++; 1507 return; 1508 } 1509 #endif 1510 1511 static int 1512 pfsync_in_eof(struct mbuf *m, int offset, int count, int flags, int action) 1513 { 1514 /* check if we are at the right place in the packet */ 1515 if (offset != m->m_pkthdr.len) 1516 V_pfsyncstats.pfsyncs_badlen++; 1517 1518 /* we're done. free and let the caller return */ 1519 m_freem(m); 1520 return (-1); 1521 } 1522 1523 static int 1524 pfsync_in_error(struct mbuf *m, int offset, int count, int flags, int action) 1525 { 1526 V_pfsyncstats.pfsyncs_badact++; 1527 1528 m_freem(m); 1529 return (-1); 1530 } 1531 1532 static int 1533 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1534 struct route *rt) 1535 { 1536 m_freem(m); 1537 return (0); 1538 } 1539 1540 /* ARGSUSED */ 1541 static int 1542 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1543 { 1544 struct pfsync_softc *sc = ifp->if_softc; 1545 struct ifreq *ifr = (struct ifreq *)data; 1546 struct pfsyncreq pfsyncr; 1547 size_t nvbuflen; 1548 int error; 1549 int c; 1550 1551 switch (cmd) { 1552 case SIOCSIFFLAGS: 1553 PFSYNC_LOCK(sc); 1554 if (ifp->if_flags & IFF_UP) { 1555 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1556 PFSYNC_UNLOCK(sc); 1557 pfsync_pointers_init(); 1558 } else { 1559 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1560 PFSYNC_UNLOCK(sc); 1561 pfsync_pointers_uninit(); 1562 } 1563 break; 1564 case SIOCSIFMTU: 1565 if (!sc->sc_sync_if || 1566 ifr->ifr_mtu <= PFSYNC_MINPKT || 1567 ifr->ifr_mtu > sc->sc_sync_if->if_mtu) 1568 return (EINVAL); 1569 if (ifr->ifr_mtu < ifp->if_mtu) { 1570 for (c = 0; c < pfsync_buckets; c++) { 1571 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); 1572 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT) 1573 pfsync_sendout(1, c); 1574 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); 1575 } 1576 } 1577 ifp->if_mtu = ifr->ifr_mtu; 1578 break; 1579 case SIOCGETPFSYNC: 1580 bzero(&pfsyncr, sizeof(pfsyncr)); 1581 PFSYNC_LOCK(sc); 1582 if (sc->sc_sync_if) { 1583 strlcpy(pfsyncr.pfsyncr_syncdev, 1584 sc->sc_sync_if->if_xname, IFNAMSIZ); 1585 } 1586 pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; 1587 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1588 pfsyncr.pfsyncr_defer = sc->sc_flags; 1589 PFSYNC_UNLOCK(sc); 1590 return (copyout(&pfsyncr, ifr_data_get_ptr(ifr), 1591 sizeof(pfsyncr))); 1592 1593 case SIOCGETPFSYNCNV: 1594 { 1595 nvlist_t *nvl_syncpeer; 1596 nvlist_t *nvl = nvlist_create(0); 1597 1598 if (nvl == NULL) 1599 return (ENOMEM); 1600 1601 if (sc->sc_sync_if) 1602 nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname); 1603 nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates); 1604 nvlist_add_number(nvl, "flags", sc->sc_flags); 1605 nvlist_add_number(nvl, "version", sc->sc_version); 1606 if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL) 1607 nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer); 1608 1609 void *packed = NULL; 1610 packed = nvlist_pack(nvl, &nvbuflen); 1611 if (packed == NULL) { 1612 free(packed, M_NVLIST); 1613 nvlist_destroy(nvl); 1614 return (ENOMEM); 1615 } 1616 1617 if (nvbuflen > ifr->ifr_cap_nv.buf_length) { 1618 ifr->ifr_cap_nv.length = nvbuflen; 1619 ifr->ifr_cap_nv.buffer = NULL; 1620 free(packed, M_NVLIST); 1621 nvlist_destroy(nvl); 1622 return (EFBIG); 1623 } 1624 1625 ifr->ifr_cap_nv.length = nvbuflen; 1626 error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen); 1627 1628 nvlist_destroy(nvl); 1629 nvlist_destroy(nvl_syncpeer); 1630 free(packed, M_NVLIST); 1631 break; 1632 } 1633 1634 case SIOCSETPFSYNC: 1635 { 1636 struct pfsync_kstatus status; 1637 1638 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1639 return (error); 1640 if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr, 1641 sizeof(pfsyncr)))) 1642 return (error); 1643 1644 memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); 1645 pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status); 1646 1647 error = pfsync_kstatus_to_softc(&status, sc); 1648 return (error); 1649 } 1650 case SIOCSETPFSYNCNV: 1651 { 1652 struct pfsync_kstatus status; 1653 void *data; 1654 nvlist_t *nvl; 1655 1656 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1657 return (error); 1658 if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE) 1659 return (EINVAL); 1660 1661 data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK); 1662 1663 if ((error = copyin(ifr->ifr_cap_nv.buffer, data, 1664 ifr->ifr_cap_nv.length)) != 0) { 1665 free(data, M_TEMP); 1666 return (error); 1667 } 1668 1669 if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) { 1670 free(data, M_TEMP); 1671 return (EINVAL); 1672 } 1673 1674 memset((char *)&status, 0, sizeof(struct pfsync_kstatus)); 1675 pfsync_nvstatus_to_kstatus(nvl, &status); 1676 1677 nvlist_destroy(nvl); 1678 free(data, M_TEMP); 1679 1680 error = pfsync_kstatus_to_softc(&status, sc); 1681 return (error); 1682 } 1683 default: 1684 return (ENOTTY); 1685 } 1686 1687 return (0); 1688 } 1689 1690 static void 1691 pfsync_out_state_1301(struct pf_kstate *st, void *buf) 1692 { 1693 union pfsync_state_union *sp = buf; 1694 1695 pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1301); 1696 } 1697 1698 static void 1699 pfsync_out_state_1400(struct pf_kstate *st, void *buf) 1700 { 1701 union pfsync_state_union *sp = buf; 1702 1703 pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1400); 1704 } 1705 1706 static void 1707 pfsync_out_iack(struct pf_kstate *st, void *buf) 1708 { 1709 struct pfsync_ins_ack *iack = buf; 1710 1711 iack->id = st->id; 1712 iack->creatorid = st->creatorid; 1713 } 1714 1715 static void 1716 pfsync_out_upd_c(struct pf_kstate *st, void *buf) 1717 { 1718 struct pfsync_upd_c *up = buf; 1719 1720 bzero(up, sizeof(*up)); 1721 up->id = st->id; 1722 pf_state_peer_hton(&st->src, &up->src); 1723 pf_state_peer_hton(&st->dst, &up->dst); 1724 up->creatorid = st->creatorid; 1725 up->timeout = st->timeout; 1726 } 1727 1728 static void 1729 pfsync_out_del_c(struct pf_kstate *st, void *buf) 1730 { 1731 struct pfsync_del_c *dp = buf; 1732 1733 dp->id = st->id; 1734 dp->creatorid = st->creatorid; 1735 st->state_flags |= PFSTATE_NOSYNC; 1736 } 1737 1738 static void 1739 pfsync_drop(struct pfsync_softc *sc) 1740 { 1741 struct pf_kstate *st, *next; 1742 struct pfsync_upd_req_item *ur; 1743 struct pfsync_bucket *b; 1744 int c; 1745 enum pfsync_q_id q; 1746 1747 for (c = 0; c < pfsync_buckets; c++) { 1748 b = &sc->sc_buckets[c]; 1749 for (q = 0; q < PFSYNC_Q_COUNT; q++) { 1750 if (TAILQ_EMPTY(&b->b_qs[q])) 1751 continue; 1752 1753 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) { 1754 KASSERT(st->sync_state == pfsync_qid_sstate[q], 1755 ("%s: st->sync_state == q", 1756 __func__)); 1757 st->sync_state = PFSYNC_S_NONE; 1758 pf_release_state(st); 1759 } 1760 TAILQ_INIT(&b->b_qs[q]); 1761 } 1762 1763 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { 1764 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); 1765 free(ur, M_PFSYNC); 1766 } 1767 1768 b->b_len = PFSYNC_MINPKT; 1769 b->b_plus = NULL; 1770 } 1771 } 1772 1773 static void 1774 pfsync_sendout(int schedswi, int c) 1775 { 1776 struct pfsync_softc *sc = V_pfsyncif; 1777 struct ifnet *ifp = sc->sc_ifp; 1778 struct mbuf *m; 1779 struct pfsync_header *ph; 1780 struct pfsync_subheader *subh; 1781 struct pf_kstate *st, *st_next; 1782 struct pfsync_upd_req_item *ur; 1783 struct pfsync_bucket *b = &sc->sc_buckets[c]; 1784 size_t len; 1785 int aflen, offset, count = 0; 1786 enum pfsync_q_id q; 1787 1788 KASSERT(sc != NULL, ("%s: null sc", __func__)); 1789 KASSERT(b->b_len > PFSYNC_MINPKT, 1790 ("%s: sc_len %zu", __func__, b->b_len)); 1791 PFSYNC_BUCKET_LOCK_ASSERT(b); 1792 1793 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 1794 pfsync_drop(sc); 1795 return; 1796 } 1797 1798 m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR); 1799 if (m == NULL) { 1800 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 1801 V_pfsyncstats.pfsyncs_onomem++; 1802 return; 1803 } 1804 m->m_data += max_linkhdr; 1805 bzero(m->m_data, b->b_len); 1806 1807 len = b->b_len; 1808 1809 /* build the ip header */ 1810 switch (sc->sc_sync_peer.ss_family) { 1811 #ifdef INET 1812 case AF_INET: 1813 { 1814 struct ip *ip; 1815 1816 ip = mtod(m, struct ip *); 1817 bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip)); 1818 aflen = offset = sizeof(*ip); 1819 1820 len -= sizeof(union inet_template) - sizeof(struct ip); 1821 ip->ip_len = htons(len); 1822 ip_fillid(ip); 1823 break; 1824 } 1825 #endif 1826 #ifdef INET6 1827 case AF_INET6: 1828 { 1829 struct ip6_hdr *ip6; 1830 1831 ip6 = mtod(m, struct ip6_hdr *); 1832 bcopy(&sc->sc_template.ipv6, ip6, sizeof(*ip6)); 1833 aflen = offset = sizeof(*ip6); 1834 1835 len -= sizeof(union inet_template) - sizeof(struct ip6_hdr); 1836 ip6->ip6_plen = htons(len); 1837 break; 1838 } 1839 #endif 1840 default: 1841 m_freem(m); 1842 return; 1843 } 1844 m->m_len = m->m_pkthdr.len = len; 1845 1846 /* build the pfsync header */ 1847 ph = (struct pfsync_header *)(m->m_data + offset); 1848 offset += sizeof(*ph); 1849 1850 ph->version = PFSYNC_VERSION; 1851 ph->len = htons(len - aflen); 1852 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 1853 1854 /* walk the queues */ 1855 for (q = 0; q < PFSYNC_Q_COUNT; q++) { 1856 if (TAILQ_EMPTY(&b->b_qs[q])) 1857 continue; 1858 1859 subh = (struct pfsync_subheader *)(m->m_data + offset); 1860 offset += sizeof(*subh); 1861 1862 count = 0; 1863 TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) { 1864 KASSERT(st->sync_state == pfsync_qid_sstate[q], 1865 ("%s: st->sync_state == q", 1866 __func__)); 1867 /* 1868 * XXXGL: some of write methods do unlocked reads 1869 * of state data :( 1870 */ 1871 pfsync_qs[q].write(st, m->m_data + offset); 1872 offset += pfsync_qs[q].len; 1873 st->sync_state = PFSYNC_S_NONE; 1874 pf_release_state(st); 1875 count++; 1876 } 1877 TAILQ_INIT(&b->b_qs[q]); 1878 1879 subh->action = pfsync_qs[q].action; 1880 subh->count = htons(count); 1881 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; 1882 } 1883 1884 if (!TAILQ_EMPTY(&b->b_upd_req_list)) { 1885 subh = (struct pfsync_subheader *)(m->m_data + offset); 1886 offset += sizeof(*subh); 1887 1888 count = 0; 1889 while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) { 1890 TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry); 1891 1892 bcopy(&ur->ur_msg, m->m_data + offset, 1893 sizeof(ur->ur_msg)); 1894 offset += sizeof(ur->ur_msg); 1895 free(ur, M_PFSYNC); 1896 count++; 1897 } 1898 1899 subh->action = PFSYNC_ACT_UPD_REQ; 1900 subh->count = htons(count); 1901 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count; 1902 } 1903 1904 /* has someone built a custom region for us to add? */ 1905 if (b->b_plus != NULL) { 1906 bcopy(b->b_plus, m->m_data + offset, b->b_pluslen); 1907 offset += b->b_pluslen; 1908 1909 b->b_plus = NULL; 1910 } 1911 1912 subh = (struct pfsync_subheader *)(m->m_data + offset); 1913 offset += sizeof(*subh); 1914 1915 subh->action = PFSYNC_ACT_EOF; 1916 subh->count = htons(1); 1917 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++; 1918 1919 /* we're done, let's put it on the wire */ 1920 if (ifp->if_bpf) { 1921 m->m_data += aflen; 1922 m->m_len = m->m_pkthdr.len = len - aflen; 1923 BPF_MTAP(ifp, m); 1924 m->m_data -= aflen; 1925 m->m_len = m->m_pkthdr.len = len; 1926 } 1927 1928 if (sc->sc_sync_if == NULL) { 1929 b->b_len = PFSYNC_MINPKT; 1930 m_freem(m); 1931 return; 1932 } 1933 1934 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); 1935 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); 1936 b->b_len = PFSYNC_MINPKT; 1937 1938 if (!_IF_QFULL(&b->b_snd)) 1939 _IF_ENQUEUE(&b->b_snd, m); 1940 else { 1941 m_freem(m); 1942 if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); 1943 } 1944 if (schedswi) 1945 swi_sched(V_pfsync_swi_cookie, 0); 1946 } 1947 1948 static void 1949 pfsync_insert_state(struct pf_kstate *st) 1950 { 1951 struct pfsync_softc *sc = V_pfsyncif; 1952 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 1953 1954 if (st->state_flags & PFSTATE_NOSYNC) 1955 return; 1956 1957 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || 1958 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 1959 st->state_flags |= PFSTATE_NOSYNC; 1960 return; 1961 } 1962 1963 KASSERT(st->sync_state == PFSYNC_S_NONE, 1964 ("%s: st->sync_state %u", __func__, st->sync_state)); 1965 1966 PFSYNC_BUCKET_LOCK(b); 1967 if (b->b_len == PFSYNC_MINPKT) 1968 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 1969 1970 pfsync_q_ins(st, PFSYNC_S_INS, true); 1971 PFSYNC_BUCKET_UNLOCK(b); 1972 1973 st->sync_updates = 0; 1974 } 1975 1976 static int 1977 pfsync_defer(struct pf_kstate *st, struct mbuf *m) 1978 { 1979 struct pfsync_softc *sc = V_pfsyncif; 1980 struct pfsync_deferral *pd; 1981 struct pfsync_bucket *b; 1982 1983 if (m->m_flags & (M_BCAST|M_MCAST)) 1984 return (0); 1985 1986 if (sc == NULL) 1987 return (0); 1988 1989 b = pfsync_get_bucket(sc, st); 1990 1991 PFSYNC_LOCK(sc); 1992 1993 if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) || 1994 !(sc->sc_flags & PFSYNCF_DEFER)) { 1995 PFSYNC_UNLOCK(sc); 1996 return (0); 1997 } 1998 1999 PFSYNC_BUCKET_LOCK(b); 2000 PFSYNC_UNLOCK(sc); 2001 2002 if (b->b_deferred >= 128) 2003 pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0); 2004 2005 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); 2006 if (pd == NULL) { 2007 PFSYNC_BUCKET_UNLOCK(b); 2008 return (0); 2009 } 2010 b->b_deferred++; 2011 2012 m->m_flags |= M_SKIP_FIREWALL; 2013 st->state_flags |= PFSTATE_ACK; 2014 2015 pd->pd_sc = sc; 2016 pd->pd_st = st; 2017 pf_ref_state(st); 2018 pd->pd_m = m; 2019 2020 TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry); 2021 callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED); 2022 callout_reset(&pd->pd_tmo, (V_pfsync_defer_timeout * hz) / 1000, 2023 pfsync_defer_tmo, pd); 2024 2025 pfsync_push(b); 2026 PFSYNC_BUCKET_UNLOCK(b); 2027 2028 return (1); 2029 } 2030 2031 static void 2032 pfsync_undefer(struct pfsync_deferral *pd, int drop) 2033 { 2034 struct pfsync_softc *sc = pd->pd_sc; 2035 struct mbuf *m = pd->pd_m; 2036 struct pf_kstate *st = pd->pd_st; 2037 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2038 2039 PFSYNC_BUCKET_LOCK_ASSERT(b); 2040 2041 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 2042 b->b_deferred--; 2043 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 2044 free(pd, M_PFSYNC); 2045 pf_release_state(st); 2046 2047 if (drop) 2048 m_freem(m); 2049 else { 2050 _IF_ENQUEUE(&b->b_snd, m); 2051 pfsync_push(b); 2052 } 2053 } 2054 2055 static void 2056 pfsync_defer_tmo(void *arg) 2057 { 2058 struct epoch_tracker et; 2059 struct pfsync_deferral *pd = arg; 2060 struct pfsync_softc *sc = pd->pd_sc; 2061 struct mbuf *m = pd->pd_m; 2062 struct pf_kstate *st = pd->pd_st; 2063 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2064 2065 PFSYNC_BUCKET_LOCK_ASSERT(b); 2066 2067 TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry); 2068 b->b_deferred--; 2069 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 2070 PFSYNC_BUCKET_UNLOCK(b); 2071 free(pd, M_PFSYNC); 2072 2073 if (sc->sc_sync_if == NULL) { 2074 pf_release_state(st); 2075 m_freem(m); 2076 return; 2077 } 2078 2079 NET_EPOCH_ENTER(et); 2080 CURVNET_SET(sc->sc_sync_if->if_vnet); 2081 2082 pfsync_tx(sc, m); 2083 2084 pf_release_state(st); 2085 2086 CURVNET_RESTORE(); 2087 NET_EPOCH_EXIT(et); 2088 } 2089 2090 static void 2091 pfsync_undefer_state_locked(struct pf_kstate *st, int drop) 2092 { 2093 struct pfsync_softc *sc = V_pfsyncif; 2094 struct pfsync_deferral *pd; 2095 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2096 2097 PFSYNC_BUCKET_LOCK_ASSERT(b); 2098 2099 TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) { 2100 if (pd->pd_st == st) { 2101 if (callout_stop(&pd->pd_tmo) > 0) 2102 pfsync_undefer(pd, drop); 2103 2104 return; 2105 } 2106 } 2107 2108 panic("%s: unable to find deferred state", __func__); 2109 } 2110 2111 static void 2112 pfsync_undefer_state(struct pf_kstate *st, int drop) 2113 { 2114 struct pfsync_softc *sc = V_pfsyncif; 2115 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2116 2117 PFSYNC_BUCKET_LOCK(b); 2118 pfsync_undefer_state_locked(st, drop); 2119 PFSYNC_BUCKET_UNLOCK(b); 2120 } 2121 2122 static struct pfsync_bucket* 2123 pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st) 2124 { 2125 int c = PF_IDHASH(st) % pfsync_buckets; 2126 return &sc->sc_buckets[c]; 2127 } 2128 2129 static void 2130 pfsync_update_state(struct pf_kstate *st) 2131 { 2132 struct pfsync_softc *sc = V_pfsyncif; 2133 bool sync = false, ref = true; 2134 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2135 2136 PF_STATE_LOCK_ASSERT(st); 2137 PFSYNC_BUCKET_LOCK(b); 2138 2139 if (st->state_flags & PFSTATE_ACK) 2140 pfsync_undefer_state_locked(st, 0); 2141 if (st->state_flags & PFSTATE_NOSYNC) { 2142 if (st->sync_state != PFSYNC_S_NONE) 2143 pfsync_q_del(st, true, b); 2144 PFSYNC_BUCKET_UNLOCK(b); 2145 return; 2146 } 2147 2148 if (b->b_len == PFSYNC_MINPKT) 2149 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 2150 2151 switch (st->sync_state) { 2152 case PFSYNC_S_UPD_C: 2153 case PFSYNC_S_UPD: 2154 case PFSYNC_S_INS: 2155 /* we're already handling it */ 2156 2157 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 2158 st->sync_updates++; 2159 if (st->sync_updates >= sc->sc_maxupdates) 2160 sync = true; 2161 } 2162 break; 2163 2164 case PFSYNC_S_IACK: 2165 pfsync_q_del(st, false, b); 2166 ref = false; 2167 /* FALLTHROUGH */ 2168 2169 case PFSYNC_S_NONE: 2170 pfsync_q_ins(st, PFSYNC_S_UPD_C, ref); 2171 st->sync_updates = 0; 2172 break; 2173 2174 default: 2175 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2176 } 2177 2178 if (sync || (time_uptime - st->pfsync_time) < 2) 2179 pfsync_push(b); 2180 2181 PFSYNC_BUCKET_UNLOCK(b); 2182 } 2183 2184 static void 2185 pfsync_request_update(u_int32_t creatorid, u_int64_t id) 2186 { 2187 struct pfsync_softc *sc = V_pfsyncif; 2188 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2189 struct pfsync_upd_req_item *item; 2190 size_t nlen = sizeof(struct pfsync_upd_req); 2191 2192 PFSYNC_BUCKET_LOCK_ASSERT(b); 2193 2194 /* 2195 * This code does a bit to prevent multiple update requests for the 2196 * same state being generated. It searches current subheader queue, 2197 * but it doesn't lookup into queue of already packed datagrams. 2198 */ 2199 TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry) 2200 if (item->ur_msg.id == id && 2201 item->ur_msg.creatorid == creatorid) 2202 return; 2203 2204 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT); 2205 if (item == NULL) 2206 return; /* XXX stats */ 2207 2208 item->ur_msg.id = id; 2209 item->ur_msg.creatorid = creatorid; 2210 2211 if (TAILQ_EMPTY(&b->b_upd_req_list)) 2212 nlen += sizeof(struct pfsync_subheader); 2213 2214 if (b->b_len + nlen > sc->sc_ifp->if_mtu) { 2215 pfsync_sendout(0, 0); 2216 2217 nlen = sizeof(struct pfsync_subheader) + 2218 sizeof(struct pfsync_upd_req); 2219 } 2220 2221 TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry); 2222 b->b_len += nlen; 2223 2224 pfsync_push(b); 2225 } 2226 2227 static bool 2228 pfsync_update_state_req(struct pf_kstate *st) 2229 { 2230 struct pfsync_softc *sc = V_pfsyncif; 2231 bool ref = true, full = false; 2232 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2233 2234 PF_STATE_LOCK_ASSERT(st); 2235 PFSYNC_BUCKET_LOCK(b); 2236 2237 if (st->state_flags & PFSTATE_NOSYNC) { 2238 if (st->sync_state != PFSYNC_S_NONE) 2239 pfsync_q_del(st, true, b); 2240 PFSYNC_BUCKET_UNLOCK(b); 2241 return (full); 2242 } 2243 2244 switch (st->sync_state) { 2245 case PFSYNC_S_UPD_C: 2246 case PFSYNC_S_IACK: 2247 pfsync_q_del(st, false, b); 2248 ref = false; 2249 /* FALLTHROUGH */ 2250 2251 case PFSYNC_S_NONE: 2252 pfsync_q_ins(st, PFSYNC_S_UPD, ref); 2253 pfsync_push(b); 2254 break; 2255 2256 case PFSYNC_S_INS: 2257 case PFSYNC_S_UPD: 2258 case PFSYNC_S_DEL_C: 2259 /* we're already handling it */ 2260 break; 2261 2262 default: 2263 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2264 } 2265 2266 if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(union pfsync_state_union)) 2267 full = true; 2268 2269 PFSYNC_BUCKET_UNLOCK(b); 2270 2271 return (full); 2272 } 2273 2274 static void 2275 pfsync_delete_state(struct pf_kstate *st) 2276 { 2277 struct pfsync_softc *sc = V_pfsyncif; 2278 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2279 bool ref = true; 2280 2281 PFSYNC_BUCKET_LOCK(b); 2282 if (st->state_flags & PFSTATE_ACK) 2283 pfsync_undefer_state_locked(st, 1); 2284 if (st->state_flags & PFSTATE_NOSYNC) { 2285 if (st->sync_state != PFSYNC_S_NONE) 2286 pfsync_q_del(st, true, b); 2287 PFSYNC_BUCKET_UNLOCK(b); 2288 return; 2289 } 2290 2291 if (b->b_len == PFSYNC_MINPKT) 2292 callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b); 2293 2294 switch (st->sync_state) { 2295 case PFSYNC_S_INS: 2296 /* We never got to tell the world so just forget about it. */ 2297 pfsync_q_del(st, true, b); 2298 break; 2299 2300 case PFSYNC_S_UPD_C: 2301 case PFSYNC_S_UPD: 2302 case PFSYNC_S_IACK: 2303 pfsync_q_del(st, false, b); 2304 ref = false; 2305 /* FALLTHROUGH */ 2306 2307 case PFSYNC_S_NONE: 2308 pfsync_q_ins(st, PFSYNC_S_DEL_C, ref); 2309 break; 2310 2311 default: 2312 panic("%s: unexpected sync state %d", __func__, st->sync_state); 2313 } 2314 2315 PFSYNC_BUCKET_UNLOCK(b); 2316 } 2317 2318 static void 2319 pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2320 { 2321 struct { 2322 struct pfsync_subheader subh; 2323 struct pfsync_clr clr; 2324 } __packed r; 2325 2326 bzero(&r, sizeof(r)); 2327 2328 r.subh.action = PFSYNC_ACT_CLR; 2329 r.subh.count = htons(1); 2330 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++; 2331 2332 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2333 r.clr.creatorid = creatorid; 2334 2335 pfsync_send_plus(&r, sizeof(r)); 2336 } 2337 2338 static enum pfsync_q_id 2339 pfsync_sstate_to_qid(u_int8_t sync_state) 2340 { 2341 struct pfsync_softc *sc = V_pfsyncif; 2342 2343 switch (sync_state) { 2344 case PFSYNC_S_INS: 2345 switch (sc->sc_version) { 2346 case PFSYNC_MSG_VERSION_1301: 2347 return PFSYNC_Q_INS_1301; 2348 case PFSYNC_MSG_VERSION_1400: 2349 return PFSYNC_Q_INS_1400; 2350 } 2351 break; 2352 case PFSYNC_S_IACK: 2353 return PFSYNC_Q_IACK; 2354 case PFSYNC_S_UPD: 2355 switch (sc->sc_version) { 2356 case PFSYNC_MSG_VERSION_1301: 2357 return PFSYNC_Q_UPD_1301; 2358 case PFSYNC_MSG_VERSION_1400: 2359 return PFSYNC_Q_UPD_1400; 2360 } 2361 break; 2362 case PFSYNC_S_UPD_C: 2363 return PFSYNC_Q_UPD_C; 2364 case PFSYNC_S_DEL_C: 2365 return PFSYNC_Q_DEL_C; 2366 default: 2367 panic("%s: Unsupported st->sync_state 0x%02x", 2368 __func__, sync_state); 2369 } 2370 2371 panic("%s: Unsupported pfsync_msg_version %d", 2372 __func__, sc->sc_version); 2373 } 2374 2375 static void 2376 pfsync_q_ins(struct pf_kstate *st, int sync_state, bool ref) 2377 { 2378 enum pfsync_q_id q = pfsync_sstate_to_qid(sync_state); 2379 struct pfsync_softc *sc = V_pfsyncif; 2380 size_t nlen = pfsync_qs[q].len; 2381 struct pfsync_bucket *b = pfsync_get_bucket(sc, st); 2382 2383 PFSYNC_BUCKET_LOCK_ASSERT(b); 2384 2385 KASSERT(st->sync_state == PFSYNC_S_NONE, 2386 ("%s: st->sync_state %u", __func__, st->sync_state)); 2387 KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", 2388 b->b_len)); 2389 2390 if (TAILQ_EMPTY(&b->b_qs[q])) 2391 nlen += sizeof(struct pfsync_subheader); 2392 2393 if (b->b_len + nlen > sc->sc_ifp->if_mtu) { 2394 pfsync_sendout(1, b->b_id); 2395 2396 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2397 } 2398 2399 b->b_len += nlen; 2400 TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list); 2401 st->sync_state = pfsync_qid_sstate[q]; 2402 if (ref) 2403 pf_ref_state(st); 2404 } 2405 2406 static void 2407 pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b) 2408 { 2409 enum pfsync_q_id q; 2410 2411 PFSYNC_BUCKET_LOCK_ASSERT(b); 2412 KASSERT(st->sync_state != PFSYNC_S_NONE, 2413 ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); 2414 2415 q = pfsync_sstate_to_qid(st->sync_state); 2416 b->b_len -= pfsync_qs[q].len; 2417 TAILQ_REMOVE(&b->b_qs[q], st, sync_list); 2418 st->sync_state = PFSYNC_S_NONE; 2419 if (unref) 2420 pf_release_state(st); 2421 2422 if (TAILQ_EMPTY(&b->b_qs[q])) 2423 b->b_len -= sizeof(struct pfsync_subheader); 2424 } 2425 2426 static void 2427 pfsync_bulk_start(void) 2428 { 2429 struct pfsync_softc *sc = V_pfsyncif; 2430 2431 if (V_pf_status.debug >= PF_DEBUG_MISC) 2432 printf("pfsync: received bulk update request\n"); 2433 2434 PFSYNC_BLOCK(sc); 2435 2436 sc->sc_ureq_received = time_uptime; 2437 sc->sc_bulk_hashid = 0; 2438 sc->sc_bulk_stateid = 0; 2439 pfsync_bulk_status(PFSYNC_BUS_START); 2440 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); 2441 PFSYNC_BUNLOCK(sc); 2442 } 2443 2444 static void 2445 pfsync_bulk_update(void *arg) 2446 { 2447 struct pfsync_softc *sc = arg; 2448 struct pf_kstate *s; 2449 int i; 2450 2451 PFSYNC_BLOCK_ASSERT(sc); 2452 CURVNET_SET(sc->sc_ifp->if_vnet); 2453 2454 /* 2455 * Start with last state from previous invocation. 2456 * It may had gone, in this case start from the 2457 * hash slot. 2458 */ 2459 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid); 2460 2461 if (s != NULL) 2462 i = PF_IDHASH(s); 2463 else 2464 i = sc->sc_bulk_hashid; 2465 2466 for (; i <= pf_hashmask; i++) { 2467 struct pf_idhash *ih = &V_pf_idhash[i]; 2468 2469 if (s != NULL) 2470 PF_HASHROW_ASSERT(ih); 2471 else { 2472 PF_HASHROW_LOCK(ih); 2473 s = LIST_FIRST(&ih->states); 2474 } 2475 2476 for (; s; s = LIST_NEXT(s, entry)) { 2477 if (s->sync_state == PFSYNC_S_NONE && 2478 s->timeout < PFTM_MAX && 2479 s->pfsync_time <= sc->sc_ureq_received) { 2480 if (pfsync_update_state_req(s)) { 2481 /* We've filled a packet. */ 2482 sc->sc_bulk_hashid = i; 2483 sc->sc_bulk_stateid = s->id; 2484 sc->sc_bulk_creatorid = s->creatorid; 2485 PF_HASHROW_UNLOCK(ih); 2486 callout_reset(&sc->sc_bulk_tmo, 1, 2487 pfsync_bulk_update, sc); 2488 goto full; 2489 } 2490 } 2491 } 2492 PF_HASHROW_UNLOCK(ih); 2493 } 2494 2495 /* We're done. */ 2496 pfsync_bulk_status(PFSYNC_BUS_END); 2497 full: 2498 CURVNET_RESTORE(); 2499 } 2500 2501 static void 2502 pfsync_bulk_status(u_int8_t status) 2503 { 2504 struct { 2505 struct pfsync_subheader subh; 2506 struct pfsync_bus bus; 2507 } __packed r; 2508 2509 struct pfsync_softc *sc = V_pfsyncif; 2510 2511 bzero(&r, sizeof(r)); 2512 2513 r.subh.action = PFSYNC_ACT_BUS; 2514 r.subh.count = htons(1); 2515 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++; 2516 2517 r.bus.creatorid = V_pf_status.hostid; 2518 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 2519 r.bus.status = status; 2520 2521 pfsync_send_plus(&r, sizeof(r)); 2522 } 2523 2524 static void 2525 pfsync_bulk_fail(void *arg) 2526 { 2527 struct pfsync_softc *sc = arg; 2528 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2529 2530 CURVNET_SET(sc->sc_ifp->if_vnet); 2531 2532 PFSYNC_BLOCK_ASSERT(sc); 2533 2534 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 2535 /* Try again */ 2536 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2537 pfsync_bulk_fail, V_pfsyncif); 2538 PFSYNC_BUCKET_LOCK(b); 2539 pfsync_request_update(0, 0); 2540 PFSYNC_BUCKET_UNLOCK(b); 2541 } else { 2542 /* Pretend like the transfer was ok. */ 2543 sc->sc_ureq_sent = 0; 2544 sc->sc_bulk_tries = 0; 2545 PFSYNC_LOCK(sc); 2546 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 2547 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 2548 "pfsync bulk fail"); 2549 sc->sc_flags |= PFSYNCF_OK; 2550 PFSYNC_UNLOCK(sc); 2551 if (V_pf_status.debug >= PF_DEBUG_MISC) 2552 printf("pfsync: failed to receive bulk update\n"); 2553 } 2554 2555 CURVNET_RESTORE(); 2556 } 2557 2558 static void 2559 pfsync_send_plus(void *plus, size_t pluslen) 2560 { 2561 struct pfsync_softc *sc = V_pfsyncif; 2562 struct pfsync_bucket *b = &sc->sc_buckets[0]; 2563 2564 PFSYNC_BUCKET_LOCK(b); 2565 2566 if (b->b_len + pluslen > sc->sc_ifp->if_mtu) 2567 pfsync_sendout(1, b->b_id); 2568 2569 b->b_plus = plus; 2570 b->b_len += (b->b_pluslen = pluslen); 2571 2572 pfsync_sendout(1, b->b_id); 2573 PFSYNC_BUCKET_UNLOCK(b); 2574 } 2575 2576 static void 2577 pfsync_timeout(void *arg) 2578 { 2579 struct pfsync_bucket *b = arg; 2580 2581 CURVNET_SET(b->b_sc->sc_ifp->if_vnet); 2582 PFSYNC_BUCKET_LOCK(b); 2583 pfsync_push(b); 2584 PFSYNC_BUCKET_UNLOCK(b); 2585 CURVNET_RESTORE(); 2586 } 2587 2588 static void 2589 pfsync_push(struct pfsync_bucket *b) 2590 { 2591 2592 PFSYNC_BUCKET_LOCK_ASSERT(b); 2593 2594 b->b_flags |= PFSYNCF_BUCKET_PUSH; 2595 swi_sched(V_pfsync_swi_cookie, 0); 2596 } 2597 2598 static void 2599 pfsync_push_all(struct pfsync_softc *sc) 2600 { 2601 int c; 2602 struct pfsync_bucket *b; 2603 2604 for (c = 0; c < pfsync_buckets; c++) { 2605 b = &sc->sc_buckets[c]; 2606 2607 PFSYNC_BUCKET_LOCK(b); 2608 pfsync_push(b); 2609 PFSYNC_BUCKET_UNLOCK(b); 2610 } 2611 } 2612 2613 static void 2614 pfsync_tx(struct pfsync_softc *sc, struct mbuf *m) 2615 { 2616 struct ip *ip; 2617 int af, error = 0; 2618 2619 ip = mtod(m, struct ip *); 2620 MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4)); 2621 2622 af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6; 2623 2624 /* 2625 * We distinguish between a deferral packet and our 2626 * own pfsync packet based on M_SKIP_FIREWALL 2627 * flag. This is XXX. 2628 */ 2629 switch (af) { 2630 #ifdef INET 2631 case AF_INET: 2632 if (m->m_flags & M_SKIP_FIREWALL) { 2633 error = ip_output(m, NULL, NULL, 0, 2634 NULL, NULL); 2635 } else { 2636 error = ip_output(m, NULL, NULL, 2637 IP_RAWOUTPUT, &sc->sc_imo, NULL); 2638 } 2639 break; 2640 #endif 2641 #ifdef INET6 2642 case AF_INET6: 2643 if (m->m_flags & M_SKIP_FIREWALL) { 2644 error = ip6_output(m, NULL, NULL, 0, 2645 NULL, NULL, NULL); 2646 } else { 2647 error = ip6_output(m, NULL, NULL, 0, 2648 &sc->sc_im6o, NULL, NULL); 2649 } 2650 break; 2651 #endif 2652 } 2653 2654 if (error == 0) 2655 V_pfsyncstats.pfsyncs_opackets++; 2656 else 2657 V_pfsyncstats.pfsyncs_oerrors++; 2658 2659 } 2660 2661 static void 2662 pfsyncintr(void *arg) 2663 { 2664 struct epoch_tracker et; 2665 struct pfsync_softc *sc = arg; 2666 struct pfsync_bucket *b; 2667 struct mbuf *m, *n; 2668 int c; 2669 2670 NET_EPOCH_ENTER(et); 2671 CURVNET_SET(sc->sc_ifp->if_vnet); 2672 2673 for (c = 0; c < pfsync_buckets; c++) { 2674 b = &sc->sc_buckets[c]; 2675 2676 PFSYNC_BUCKET_LOCK(b); 2677 if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) { 2678 pfsync_sendout(0, b->b_id); 2679 b->b_flags &= ~PFSYNCF_BUCKET_PUSH; 2680 } 2681 _IF_DEQUEUE_ALL(&b->b_snd, m); 2682 PFSYNC_BUCKET_UNLOCK(b); 2683 2684 for (; m != NULL; m = n) { 2685 n = m->m_nextpkt; 2686 m->m_nextpkt = NULL; 2687 2688 pfsync_tx(sc, m); 2689 } 2690 } 2691 CURVNET_RESTORE(); 2692 NET_EPOCH_EXIT(et); 2693 } 2694 2695 static int 2696 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, 2697 struct in_mfilter* imf, struct in6_mfilter* im6f) 2698 { 2699 #ifdef INET 2700 struct ip_moptions *imo = &sc->sc_imo; 2701 #endif 2702 #ifdef INET6 2703 struct ip6_moptions *im6o = &sc->sc_im6o; 2704 struct sockaddr_in6 *syncpeer_sa6 = NULL; 2705 #endif 2706 2707 if (!(ifp->if_flags & IFF_MULTICAST)) 2708 return (EADDRNOTAVAIL); 2709 2710 switch (sc->sc_sync_peer.ss_family) { 2711 #ifdef INET 2712 case AF_INET: 2713 { 2714 int error; 2715 2716 ip_mfilter_init(&imo->imo_head); 2717 imo->imo_multicast_vif = -1; 2718 if ((error = in_joingroup(ifp, 2719 &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL, 2720 &imf->imf_inm)) != 0) 2721 return (error); 2722 2723 ip_mfilter_insert(&imo->imo_head, imf); 2724 imo->imo_multicast_ifp = ifp; 2725 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 2726 imo->imo_multicast_loop = 0; 2727 break; 2728 } 2729 #endif 2730 #ifdef INET6 2731 case AF_INET6: 2732 { 2733 int error; 2734 2735 syncpeer_sa6 = (struct sockaddr_in6 *)&sc->sc_sync_peer; 2736 if ((error = in6_setscope(&syncpeer_sa6->sin6_addr, ifp, NULL))) 2737 return (error); 2738 2739 ip6_mfilter_init(&im6o->im6o_head); 2740 if ((error = in6_joingroup(ifp, &syncpeer_sa6->sin6_addr, NULL, 2741 &(im6f->im6f_in6m), 0)) != 0) 2742 return (error); 2743 2744 ip6_mfilter_insert(&im6o->im6o_head, im6f); 2745 im6o->im6o_multicast_ifp = ifp; 2746 im6o->im6o_multicast_hlim = PFSYNC_DFLTTL; 2747 im6o->im6o_multicast_loop = 0; 2748 break; 2749 } 2750 #endif 2751 } 2752 2753 return (0); 2754 } 2755 2756 static void 2757 pfsync_multicast_cleanup(struct pfsync_softc *sc) 2758 { 2759 #ifdef INET 2760 struct ip_moptions *imo = &sc->sc_imo; 2761 struct in_mfilter *imf; 2762 2763 while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) { 2764 ip_mfilter_remove(&imo->imo_head, imf); 2765 in_leavegroup(imf->imf_inm, NULL); 2766 ip_mfilter_free(imf); 2767 } 2768 imo->imo_multicast_ifp = NULL; 2769 #endif 2770 2771 #ifdef INET6 2772 struct ip6_moptions *im6o = &sc->sc_im6o; 2773 struct in6_mfilter *im6f; 2774 2775 while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) { 2776 ip6_mfilter_remove(&im6o->im6o_head, im6f); 2777 in6_leavegroup(im6f->im6f_in6m, NULL); 2778 ip6_mfilter_free(im6f); 2779 } 2780 im6o->im6o_multicast_ifp = NULL; 2781 #endif 2782 } 2783 2784 void 2785 pfsync_detach_ifnet(struct ifnet *ifp) 2786 { 2787 struct pfsync_softc *sc = V_pfsyncif; 2788 2789 if (sc == NULL) 2790 return; 2791 2792 PFSYNC_LOCK(sc); 2793 2794 if (sc->sc_sync_if == ifp) { 2795 /* We don't need mutlicast cleanup here, because the interface 2796 * is going away. We do need to ensure we don't try to do 2797 * cleanup later. 2798 */ 2799 ip_mfilter_init(&sc->sc_imo.imo_head); 2800 sc->sc_imo.imo_multicast_ifp = NULL; 2801 sc->sc_im6o.im6o_multicast_ifp = NULL; 2802 sc->sc_sync_if = NULL; 2803 } 2804 2805 PFSYNC_UNLOCK(sc); 2806 } 2807 2808 static int 2809 pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status) 2810 { 2811 struct sockaddr_storage sa; 2812 status->maxupdates = pfsyncr->pfsyncr_maxupdates; 2813 status->flags = pfsyncr->pfsyncr_defer; 2814 2815 strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ); 2816 2817 memset(&sa, 0, sizeof(sa)); 2818 if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) { 2819 struct sockaddr_in *in = (struct sockaddr_in *)&sa; 2820 in->sin_family = AF_INET; 2821 in->sin_len = sizeof(*in); 2822 in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr; 2823 } 2824 status->syncpeer = sa; 2825 2826 return 0; 2827 } 2828 2829 static int 2830 pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc) 2831 { 2832 struct ifnet *sifp; 2833 struct in_mfilter *imf = NULL; 2834 struct in6_mfilter *im6f = NULL; 2835 int error; 2836 int c; 2837 2838 if ((status->maxupdates < 0) || (status->maxupdates > 255)) 2839 return (EINVAL); 2840 2841 if (status->syncdev[0] == '\0') 2842 sifp = NULL; 2843 else if ((sifp = ifunit_ref(status->syncdev)) == NULL) 2844 return (EINVAL); 2845 2846 switch (status->syncpeer.ss_family) { 2847 #ifdef INET 2848 case AF_UNSPEC: 2849 case AF_INET: { 2850 struct sockaddr_in *status_sin; 2851 status_sin = (struct sockaddr_in *)&(status->syncpeer); 2852 if (sifp != NULL) { 2853 if (status_sin->sin_addr.s_addr == 0 || 2854 status_sin->sin_addr.s_addr == 2855 htonl(INADDR_PFSYNC_GROUP)) { 2856 status_sin->sin_family = AF_INET; 2857 status_sin->sin_len = sizeof(*status_sin); 2858 status_sin->sin_addr.s_addr = 2859 htonl(INADDR_PFSYNC_GROUP); 2860 } 2861 2862 if (IN_MULTICAST(ntohl(status_sin->sin_addr.s_addr))) { 2863 imf = ip_mfilter_alloc(M_WAITOK, 0, 0); 2864 } 2865 } 2866 break; 2867 } 2868 #endif 2869 #ifdef INET6 2870 case AF_INET6: { 2871 struct sockaddr_in6 *status_sin6; 2872 status_sin6 = (struct sockaddr_in6*)&(status->syncpeer); 2873 if (sifp != NULL) { 2874 if (IN6_IS_ADDR_UNSPECIFIED(&status_sin6->sin6_addr) || 2875 IN6_ARE_ADDR_EQUAL(&status_sin6->sin6_addr, 2876 &in6addr_linklocal_pfsync_group)) { 2877 status_sin6->sin6_family = AF_INET6; 2878 status_sin6->sin6_len = sizeof(*status_sin6); 2879 status_sin6->sin6_addr = 2880 in6addr_linklocal_pfsync_group; 2881 } 2882 2883 if (IN6_IS_ADDR_MULTICAST(&status_sin6->sin6_addr)) { 2884 im6f = ip6_mfilter_alloc(M_WAITOK, 0, 0); 2885 } 2886 } 2887 break; 2888 } 2889 #endif 2890 } 2891 2892 PFSYNC_LOCK(sc); 2893 2894 switch (status->version) { 2895 case PFSYNC_MSG_VERSION_UNSPECIFIED: 2896 sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT; 2897 break; 2898 case PFSYNC_MSG_VERSION_1301: 2899 case PFSYNC_MSG_VERSION_1400: 2900 sc->sc_version = status->version; 2901 break; 2902 default: 2903 PFSYNC_UNLOCK(sc); 2904 return (EINVAL); 2905 } 2906 2907 switch (status->syncpeer.ss_family) { 2908 case AF_INET: { 2909 struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer); 2910 struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer; 2911 sc_sin->sin_family = AF_INET; 2912 sc_sin->sin_len = sizeof(*sc_sin); 2913 if (status_sin->sin_addr.s_addr == 0) { 2914 sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP); 2915 } else { 2916 sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr; 2917 } 2918 break; 2919 } 2920 case AF_INET6: { 2921 struct sockaddr_in6 *status_sin = (struct sockaddr_in6 *)&(status->syncpeer); 2922 struct sockaddr_in6 *sc_sin = (struct sockaddr_in6 *)&sc->sc_sync_peer; 2923 sc_sin->sin6_family = AF_INET6; 2924 sc_sin->sin6_len = sizeof(*sc_sin); 2925 if(IN6_IS_ADDR_UNSPECIFIED(&status_sin->sin6_addr)) { 2926 sc_sin->sin6_addr = in6addr_linklocal_pfsync_group; 2927 } else { 2928 sc_sin->sin6_addr = status_sin->sin6_addr; 2929 } 2930 break; 2931 } 2932 } 2933 2934 sc->sc_maxupdates = status->maxupdates; 2935 if (status->flags & PFSYNCF_DEFER) { 2936 sc->sc_flags |= PFSYNCF_DEFER; 2937 V_pfsync_defer_ptr = pfsync_defer; 2938 } else { 2939 sc->sc_flags &= ~PFSYNCF_DEFER; 2940 V_pfsync_defer_ptr = NULL; 2941 } 2942 2943 if (sifp == NULL) { 2944 if (sc->sc_sync_if) 2945 if_rele(sc->sc_sync_if); 2946 sc->sc_sync_if = NULL; 2947 pfsync_multicast_cleanup(sc); 2948 PFSYNC_UNLOCK(sc); 2949 return (0); 2950 } 2951 2952 for (c = 0; c < pfsync_buckets; c++) { 2953 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]); 2954 if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT && 2955 (sifp->if_mtu < sc->sc_ifp->if_mtu || 2956 (sc->sc_sync_if != NULL && 2957 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 2958 sifp->if_mtu < MCLBYTES - sizeof(struct ip))) 2959 pfsync_sendout(1, c); 2960 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]); 2961 } 2962 2963 pfsync_multicast_cleanup(sc); 2964 2965 if (((sc->sc_sync_peer.ss_family == AF_INET) && 2966 IN_MULTICAST(ntohl(((struct sockaddr_in *) 2967 &sc->sc_sync_peer)->sin_addr.s_addr))) || 2968 ((sc->sc_sync_peer.ss_family == AF_INET6) && 2969 IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6*) 2970 &sc->sc_sync_peer)->sin6_addr))) { 2971 error = pfsync_multicast_setup(sc, sifp, imf, im6f); 2972 if (error) { 2973 if_rele(sifp); 2974 PFSYNC_UNLOCK(sc); 2975 #ifdef INET 2976 if (imf != NULL) 2977 ip_mfilter_free(imf); 2978 #endif 2979 #ifdef INET6 2980 if (im6f != NULL) 2981 ip6_mfilter_free(im6f); 2982 #endif 2983 return (error); 2984 } 2985 } 2986 if (sc->sc_sync_if) 2987 if_rele(sc->sc_sync_if); 2988 sc->sc_sync_if = sifp; 2989 2990 switch (sc->sc_sync_peer.ss_family) { 2991 #ifdef INET 2992 case AF_INET: { 2993 struct ip *ip; 2994 ip = &sc->sc_template.ipv4; 2995 bzero(ip, sizeof(*ip)); 2996 ip->ip_v = IPVERSION; 2997 ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2; 2998 ip->ip_tos = IPTOS_LOWDELAY; 2999 /* len and id are set later. */ 3000 ip->ip_off = htons(IP_DF); 3001 ip->ip_ttl = PFSYNC_DFLTTL; 3002 ip->ip_p = IPPROTO_PFSYNC; 3003 ip->ip_src.s_addr = INADDR_ANY; 3004 ip->ip_dst = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr; 3005 break; 3006 } 3007 #endif 3008 #ifdef INET6 3009 case AF_INET6: { 3010 struct ip6_hdr *ip6; 3011 ip6 = &sc->sc_template.ipv6; 3012 bzero(ip6, sizeof(*ip6)); 3013 ip6->ip6_vfc = IPV6_VERSION; 3014 ip6->ip6_hlim = PFSYNC_DFLTTL; 3015 ip6->ip6_nxt = IPPROTO_PFSYNC; 3016 ip6->ip6_dst = ((struct sockaddr_in6 *)&sc->sc_sync_peer)->sin6_addr; 3017 3018 struct epoch_tracker et; 3019 NET_EPOCH_ENTER(et); 3020 in6_selectsrc_addr(if_getfib(sc->sc_sync_if), &ip6->ip6_dst, 0, 3021 sc->sc_sync_if, &ip6->ip6_src, NULL); 3022 NET_EPOCH_EXIT(et); 3023 break; 3024 } 3025 #endif 3026 } 3027 3028 /* Request a full state table update. */ 3029 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 3030 (*carp_demote_adj_p)(V_pfsync_carp_adj, 3031 "pfsync bulk start"); 3032 sc->sc_flags &= ~PFSYNCF_OK; 3033 if (V_pf_status.debug >= PF_DEBUG_MISC) 3034 printf("pfsync: requesting bulk update\n"); 3035 PFSYNC_UNLOCK(sc); 3036 PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]); 3037 pfsync_request_update(0, 0); 3038 PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]); 3039 PFSYNC_BLOCK(sc); 3040 sc->sc_ureq_sent = time_uptime; 3041 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc); 3042 PFSYNC_BUNLOCK(sc); 3043 return (0); 3044 } 3045 3046 static void 3047 pfsync_pointers_init(void) 3048 { 3049 3050 PF_RULES_WLOCK(); 3051 V_pfsync_state_import_ptr = pfsync_state_import; 3052 V_pfsync_insert_state_ptr = pfsync_insert_state; 3053 V_pfsync_update_state_ptr = pfsync_update_state; 3054 V_pfsync_delete_state_ptr = pfsync_delete_state; 3055 V_pfsync_clear_states_ptr = pfsync_clear_states; 3056 V_pfsync_defer_ptr = pfsync_defer; 3057 PF_RULES_WUNLOCK(); 3058 } 3059 3060 static void 3061 pfsync_pointers_uninit(void) 3062 { 3063 3064 PF_RULES_WLOCK(); 3065 V_pfsync_state_import_ptr = NULL; 3066 V_pfsync_insert_state_ptr = NULL; 3067 V_pfsync_update_state_ptr = NULL; 3068 V_pfsync_delete_state_ptr = NULL; 3069 V_pfsync_clear_states_ptr = NULL; 3070 V_pfsync_defer_ptr = NULL; 3071 PF_RULES_WUNLOCK(); 3072 } 3073 3074 static void 3075 vnet_pfsync_init(const void *unused __unused) 3076 { 3077 int error; 3078 3079 V_pfsync_cloner = if_clone_simple(pfsyncname, 3080 pfsync_clone_create, pfsync_clone_destroy, 1); 3081 error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif, 3082 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); 3083 if (error) { 3084 if_clone_detach(V_pfsync_cloner); 3085 log(LOG_INFO, "swi_add() failed in %s\n", __func__); 3086 } 3087 3088 pfsync_pointers_init(); 3089 } 3090 VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY, 3091 vnet_pfsync_init, NULL); 3092 3093 static void 3094 vnet_pfsync_uninit(const void *unused __unused) 3095 { 3096 int ret __diagused; 3097 3098 pfsync_pointers_uninit(); 3099 3100 if_clone_detach(V_pfsync_cloner); 3101 ret = swi_remove(V_pfsync_swi_cookie); 3102 MPASS(ret == 0); 3103 ret = intr_event_destroy(V_pfsync_swi_ie); 3104 MPASS(ret == 0); 3105 } 3106 3107 VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH, 3108 vnet_pfsync_uninit, NULL); 3109 3110 static int 3111 pfsync_init(void) 3112 { 3113 int error; 3114 3115 pfsync_detach_ifnet_ptr = pfsync_detach_ifnet; 3116 3117 #ifdef INET 3118 error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL); 3119 if (error) 3120 return (error); 3121 #endif 3122 #ifdef INET6 3123 error = ip6proto_register(IPPROTO_PFSYNC, pfsync6_input, NULL); 3124 if (error) { 3125 ipproto_unregister(IPPROTO_PFSYNC); 3126 return (error); 3127 } 3128 #endif 3129 3130 return (0); 3131 } 3132 3133 static void 3134 pfsync_uninit(void) 3135 { 3136 pfsync_detach_ifnet_ptr = NULL; 3137 3138 #ifdef INET 3139 ipproto_unregister(IPPROTO_PFSYNC); 3140 #endif 3141 #ifdef INET6 3142 ip6proto_unregister(IPPROTO_PFSYNC); 3143 #endif 3144 } 3145 3146 static int 3147 pfsync_modevent(module_t mod, int type, void *data) 3148 { 3149 int error = 0; 3150 3151 switch (type) { 3152 case MOD_LOAD: 3153 error = pfsync_init(); 3154 break; 3155 case MOD_UNLOAD: 3156 pfsync_uninit(); 3157 break; 3158 default: 3159 error = EINVAL; 3160 break; 3161 } 3162 3163 return (error); 3164 } 3165 3166 static moduledata_t pfsync_mod = { 3167 pfsyncname, 3168 pfsync_modevent, 3169 0 3170 }; 3171 3172 #define PFSYNC_MODVER 1 3173 3174 /* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */ 3175 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); 3176 MODULE_VERSION(pfsync, PFSYNC_MODVER); 3177 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 3178