1 /*- 2 * Copyright (c) 2002 Michael Shalayeff 3 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 25 * THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /*- 29 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 30 * 31 * Permission to use, copy, modify, and distribute this software for any 32 * purpose with or without fee is hereby granted, provided that the above 33 * copyright notice and this permission notice appear in all copies. 34 * 35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 42 */ 43 44 /* 45 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ 46 * 47 * Revisions picked from OpenBSD after revision 1.110 import: 48 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input() 49 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 50 * 1.120, 1.175 - use monotonic time_uptime 51 * 1.122 - reduce number of updates for non-TCP sessions 52 * 1.125, 1.127 - rewrite merge or stale processing 53 * 1.128 - cleanups 54 * 1.146 - bzero() mbuf before sparsely filling it with data 55 * 1.170 - SIOCSIFMTU checks 56 * 1.126, 1.142 - deferred packets processing 57 * 1.173 - correct expire time processing 58 */ 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include "opt_inet.h" 64 #include "opt_inet6.h" 65 #include "opt_pf.h" 66 67 #include <sys/param.h> 68 #include <sys/bus.h> 69 #include <sys/endian.h> 70 #include <sys/interrupt.h> 71 #include <sys/kernel.h> 72 #include <sys/lock.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/mutex.h> 76 #include <sys/priv.h> 77 #include <sys/protosw.h> 78 #include <sys/socket.h> 79 #include <sys/sockio.h> 80 #include <sys/sysctl.h> 81 82 #include <net/bpf.h> 83 #include <net/if.h> 84 #include <net/if_var.h> 85 #include <net/if_clone.h> 86 #include <net/if_types.h> 87 #include <net/vnet.h> 88 #include <net/pfvar.h> 89 #include <net/if_pfsync.h> 90 91 #include <netinet/if_ether.h> 92 #include <netinet/in.h> 93 #include <netinet/in_var.h> 94 #include <netinet/ip.h> 95 #include <netinet/ip_carp.h> 96 #include <netinet/ip_var.h> 97 #include <netinet/tcp.h> 98 #include <netinet/tcp_fsm.h> 99 #include <netinet/tcp_seq.h> 100 101 #define PFSYNC_MINPKT ( \ 102 sizeof(struct ip) + \ 103 sizeof(struct pfsync_header) + \ 104 sizeof(struct pfsync_subheader) ) 105 106 struct pfsync_pkt { 107 struct ip *ip; 108 struct in_addr src; 109 u_int8_t flags; 110 }; 111 112 static int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *, 113 struct pfsync_state_peer *); 114 static int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int); 115 static int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int); 116 static int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int); 117 static int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int); 118 static int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int); 119 static int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int); 120 static int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int); 121 static int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int); 122 static int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int); 123 static int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int); 124 static int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int); 125 static int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int); 126 127 static int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = { 128 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 129 pfsync_in_ins, /* PFSYNC_ACT_INS */ 130 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 131 pfsync_in_upd, /* PFSYNC_ACT_UPD */ 132 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 133 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 134 pfsync_in_del, /* PFSYNC_ACT_DEL */ 135 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 136 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 137 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 138 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 139 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 140 pfsync_in_eof /* PFSYNC_ACT_EOF */ 141 }; 142 143 struct pfsync_q { 144 void (*write)(struct pf_state *, void *); 145 size_t len; 146 u_int8_t action; 147 }; 148 149 /* we have one of these for every PFSYNC_S_ */ 150 static void pfsync_out_state(struct pf_state *, void *); 151 static void pfsync_out_iack(struct pf_state *, void *); 152 static void pfsync_out_upd_c(struct pf_state *, void *); 153 static void pfsync_out_del(struct pf_state *, void *); 154 155 static struct pfsync_q pfsync_qs[] = { 156 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, 157 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 158 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, 159 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 160 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 161 }; 162 163 static void pfsync_q_ins(struct pf_state *, int); 164 static void pfsync_q_del(struct pf_state *); 165 166 static void pfsync_update_state(struct pf_state *); 167 168 struct pfsync_upd_req_item { 169 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 170 struct pfsync_upd_req ur_msg; 171 }; 172 173 struct pfsync_deferral { 174 struct pfsync_softc *pd_sc; 175 TAILQ_ENTRY(pfsync_deferral) pd_entry; 176 u_int pd_refs; 177 struct callout pd_tmo; 178 179 struct pf_state *pd_st; 180 struct mbuf *pd_m; 181 }; 182 183 struct pfsync_softc { 184 /* Configuration */ 185 struct ifnet *sc_ifp; 186 struct ifnet *sc_sync_if; 187 struct ip_moptions sc_imo; 188 struct in_addr sc_sync_peer; 189 uint32_t sc_flags; 190 #define PFSYNCF_OK 0x00000001 191 #define PFSYNCF_DEFER 0x00000002 192 #define PFSYNCF_PUSH 0x00000004 193 uint8_t sc_maxupdates; 194 struct ip sc_template; 195 struct callout sc_tmo; 196 struct mtx sc_mtx; 197 198 /* Queued data */ 199 size_t sc_len; 200 TAILQ_HEAD(, pf_state) sc_qs[PFSYNC_S_COUNT]; 201 TAILQ_HEAD(, pfsync_upd_req_item) sc_upd_req_list; 202 TAILQ_HEAD(, pfsync_deferral) sc_deferrals; 203 u_int sc_deferred; 204 void *sc_plus; 205 size_t sc_pluslen; 206 207 /* Bulk update info */ 208 struct mtx sc_bulk_mtx; 209 uint32_t sc_ureq_sent; 210 int sc_bulk_tries; 211 uint32_t sc_ureq_received; 212 int sc_bulk_hashid; 213 uint64_t sc_bulk_stateid; 214 uint32_t sc_bulk_creatorid; 215 struct callout sc_bulk_tmo; 216 struct callout sc_bulkfail_tmo; 217 }; 218 219 #define PFSYNC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 220 #define PFSYNC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 221 #define PFSYNC_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 222 223 #define PFSYNC_BLOCK(sc) mtx_lock(&(sc)->sc_bulk_mtx) 224 #define PFSYNC_BUNLOCK(sc) mtx_unlock(&(sc)->sc_bulk_mtx) 225 #define PFSYNC_BLOCK_ASSERT(sc) mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED) 226 227 static const char pfsyncname[] = "pfsync"; 228 static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data"); 229 static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL; 230 #define V_pfsyncif VNET(pfsyncif) 231 static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL; 232 #define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) 233 static VNET_DEFINE(struct pfsyncstats, pfsyncstats); 234 #define V_pfsyncstats VNET(pfsyncstats) 235 static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW; 236 #define V_pfsync_carp_adj VNET(pfsync_carp_adj) 237 238 static void pfsync_timeout(void *); 239 static void pfsync_push(struct pfsync_softc *); 240 static void pfsyncintr(void *); 241 static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *, 242 void *); 243 static void pfsync_multicast_cleanup(struct pfsync_softc *); 244 static void pfsync_pointers_init(void); 245 static void pfsync_pointers_uninit(void); 246 static int pfsync_init(void); 247 static void pfsync_uninit(void); 248 249 SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); 250 SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW, 251 &VNET_NAME(pfsyncstats), pfsyncstats, 252 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 253 SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, 254 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 255 256 static int pfsync_clone_create(struct if_clone *, int, caddr_t); 257 static void pfsync_clone_destroy(struct ifnet *); 258 static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 259 struct pf_state_peer *); 260 static int pfsyncoutput(struct ifnet *, struct mbuf *, 261 const struct sockaddr *, struct route *); 262 static int pfsyncioctl(struct ifnet *, u_long, caddr_t); 263 264 static int pfsync_defer(struct pf_state *, struct mbuf *); 265 static void pfsync_undefer(struct pfsync_deferral *, int); 266 static void pfsync_undefer_state(struct pf_state *, int); 267 static void pfsync_defer_tmo(void *); 268 269 static void pfsync_request_update(u_int32_t, u_int64_t); 270 static void pfsync_update_state_req(struct pf_state *); 271 272 static void pfsync_drop(struct pfsync_softc *); 273 static void pfsync_sendout(int); 274 static void pfsync_send_plus(void *, size_t); 275 276 static void pfsync_bulk_start(void); 277 static void pfsync_bulk_status(u_int8_t); 278 static void pfsync_bulk_update(void *); 279 static void pfsync_bulk_fail(void *); 280 281 #ifdef IPSEC 282 static void pfsync_update_net_tdb(struct pfsync_tdb *); 283 #endif 284 285 #define PFSYNC_MAX_BULKTRIES 12 286 287 VNET_DEFINE(struct if_clone *, pfsync_cloner); 288 #define V_pfsync_cloner VNET(pfsync_cloner) 289 290 static int 291 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 292 { 293 struct pfsync_softc *sc; 294 struct ifnet *ifp; 295 int q; 296 297 if (unit != 0) 298 return (EINVAL); 299 300 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); 301 sc->sc_flags |= PFSYNCF_OK; 302 303 for (q = 0; q < PFSYNC_S_COUNT; q++) 304 TAILQ_INIT(&sc->sc_qs[q]); 305 306 TAILQ_INIT(&sc->sc_upd_req_list); 307 TAILQ_INIT(&sc->sc_deferrals); 308 309 sc->sc_len = PFSYNC_MINPKT; 310 sc->sc_maxupdates = 128; 311 312 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 313 if (ifp == NULL) { 314 free(sc, M_PFSYNC); 315 return (ENOSPC); 316 } 317 if_initname(ifp, pfsyncname, unit); 318 ifp->if_softc = sc; 319 ifp->if_ioctl = pfsyncioctl; 320 ifp->if_output = pfsyncoutput; 321 ifp->if_type = IFT_PFSYNC; 322 ifp->if_snd.ifq_maxlen = ifqmaxlen; 323 ifp->if_hdrlen = sizeof(struct pfsync_header); 324 ifp->if_mtu = ETHERMTU; 325 mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF); 326 mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF); 327 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE); 328 callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0); 329 callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0); 330 331 if_attach(ifp); 332 333 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 334 335 V_pfsyncif = sc; 336 337 return (0); 338 } 339 340 static void 341 pfsync_clone_destroy(struct ifnet *ifp) 342 { 343 struct pfsync_softc *sc = ifp->if_softc; 344 345 /* 346 * At this stage, everything should have already been 347 * cleared by pfsync_uninit(), and we have only to 348 * drain callouts. 349 */ 350 while (sc->sc_deferred > 0) { 351 struct pfsync_deferral *pd = TAILQ_FIRST(&sc->sc_deferrals); 352 353 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 354 sc->sc_deferred--; 355 if (callout_stop(&pd->pd_tmo)) { 356 pf_release_state(pd->pd_st); 357 m_freem(pd->pd_m); 358 free(pd, M_PFSYNC); 359 } else { 360 pd->pd_refs++; 361 callout_drain(&pd->pd_tmo); 362 free(pd, M_PFSYNC); 363 } 364 } 365 366 callout_drain(&sc->sc_tmo); 367 callout_drain(&sc->sc_bulkfail_tmo); 368 callout_drain(&sc->sc_bulk_tmo); 369 370 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 371 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 372 bpfdetach(ifp); 373 if_detach(ifp); 374 375 pfsync_drop(sc); 376 377 if_free(ifp); 378 if (sc->sc_imo.imo_membership) 379 pfsync_multicast_cleanup(sc); 380 mtx_destroy(&sc->sc_mtx); 381 mtx_destroy(&sc->sc_bulk_mtx); 382 free(sc, M_PFSYNC); 383 384 V_pfsyncif = NULL; 385 } 386 387 static int 388 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 389 struct pf_state_peer *d) 390 { 391 if (s->scrub.scrub_flag && d->scrub == NULL) { 392 d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); 393 if (d->scrub == NULL) 394 return (ENOMEM); 395 } 396 397 return (0); 398 } 399 400 401 static int 402 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) 403 { 404 struct pfsync_softc *sc = V_pfsyncif; 405 struct pf_state *st = NULL; 406 struct pf_state_key *skw = NULL, *sks = NULL; 407 struct pf_rule *r = NULL; 408 struct pfi_kif *kif; 409 int error; 410 411 PF_RULES_RASSERT(); 412 413 if (sp->creatorid == 0) { 414 if (V_pf_status.debug >= PF_DEBUG_MISC) 415 printf("%s: invalid creator id: %08x\n", __func__, 416 ntohl(sp->creatorid)); 417 return (EINVAL); 418 } 419 420 if ((kif = pfi_kif_find(sp->ifname)) == NULL) { 421 if (V_pf_status.debug >= PF_DEBUG_MISC) 422 printf("%s: unknown interface: %s\n", __func__, 423 sp->ifname); 424 if (flags & PFSYNC_SI_IOCTL) 425 return (EINVAL); 426 return (0); /* skip this state */ 427 } 428 429 /* 430 * If the ruleset checksums match or the state is coming from the ioctl, 431 * it's safe to associate the state with the rule of that number. 432 */ 433 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 434 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 435 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 436 r = pf_main_ruleset.rules[ 437 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 438 else 439 r = &V_pf_default_rule; 440 441 if ((r->max_states && r->states_cur >= r->max_states)) 442 goto cleanup; 443 444 /* 445 * XXXGL: consider M_WAITOK in ioctl path after. 446 */ 447 if ((st = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO)) == NULL) 448 goto cleanup; 449 450 if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL) 451 goto cleanup; 452 453 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], 454 &sp->key[PF_SK_STACK].addr[0], sp->af) || 455 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], 456 &sp->key[PF_SK_STACK].addr[1], sp->af) || 457 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || 458 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) { 459 sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 460 if (sks == NULL) 461 goto cleanup; 462 } else 463 sks = skw; 464 465 /* allocate memory for scrub info */ 466 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 467 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 468 goto cleanup; 469 470 /* copy to state key(s) */ 471 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; 472 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; 473 skw->port[0] = sp->key[PF_SK_WIRE].port[0]; 474 skw->port[1] = sp->key[PF_SK_WIRE].port[1]; 475 skw->proto = sp->proto; 476 skw->af = sp->af; 477 if (sks != skw) { 478 sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; 479 sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; 480 sks->port[0] = sp->key[PF_SK_STACK].port[0]; 481 sks->port[1] = sp->key[PF_SK_STACK].port[1]; 482 sks->proto = sp->proto; 483 sks->af = sp->af; 484 } 485 486 /* copy to state */ 487 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 488 st->creation = time_uptime - ntohl(sp->creation); 489 st->expire = time_uptime; 490 if (sp->expire) { 491 uint32_t timeout; 492 493 timeout = r->timeout[sp->timeout]; 494 if (!timeout) 495 timeout = V_pf_default_rule.timeout[sp->timeout]; 496 497 /* sp->expire may have been adaptively scaled by export. */ 498 st->expire -= timeout - ntohl(sp->expire); 499 } 500 501 st->direction = sp->direction; 502 st->log = sp->log; 503 st->timeout = sp->timeout; 504 st->state_flags = sp->state_flags; 505 506 st->id = sp->id; 507 st->creatorid = sp->creatorid; 508 pf_state_peer_ntoh(&sp->src, &st->src); 509 pf_state_peer_ntoh(&sp->dst, &st->dst); 510 511 st->rule.ptr = r; 512 st->nat_rule.ptr = NULL; 513 st->anchor.ptr = NULL; 514 st->rt_kif = NULL; 515 516 st->pfsync_time = time_uptime; 517 st->sync_state = PFSYNC_S_NONE; 518 519 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 520 r->states_cur++; 521 r->states_tot++; 522 523 if (!(flags & PFSYNC_SI_IOCTL)) 524 st->state_flags |= PFSTATE_NOSYNC; 525 526 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) { 527 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */ 528 r->states_cur--; 529 goto cleanup_state; 530 } 531 532 if (!(flags & PFSYNC_SI_IOCTL)) { 533 st->state_flags &= ~PFSTATE_NOSYNC; 534 if (st->state_flags & PFSTATE_ACK) { 535 pfsync_q_ins(st, PFSYNC_S_IACK); 536 pfsync_push(sc); 537 } 538 } 539 st->state_flags &= ~PFSTATE_ACK; 540 PF_STATE_UNLOCK(st); 541 542 return (0); 543 544 cleanup: 545 error = ENOMEM; 546 if (skw == sks) 547 sks = NULL; 548 if (skw != NULL) 549 uma_zfree(V_pf_state_key_z, skw); 550 if (sks != NULL) 551 uma_zfree(V_pf_state_key_z, sks); 552 553 cleanup_state: /* pf_state_insert() frees the state keys. */ 554 if (st) { 555 if (st->dst.scrub) 556 uma_zfree(V_pf_state_scrub_z, st->dst.scrub); 557 if (st->src.scrub) 558 uma_zfree(V_pf_state_scrub_z, st->src.scrub); 559 uma_zfree(V_pf_state_z, st); 560 } 561 return (error); 562 } 563 564 static void 565 pfsync_input(struct mbuf *m, __unused int off) 566 { 567 struct pfsync_softc *sc = V_pfsyncif; 568 struct pfsync_pkt pkt; 569 struct ip *ip = mtod(m, struct ip *); 570 struct pfsync_header *ph; 571 struct pfsync_subheader subh; 572 573 int offset, len; 574 int rv; 575 uint16_t count; 576 577 V_pfsyncstats.pfsyncs_ipackets++; 578 579 /* Verify that we have a sync interface configured. */ 580 if (!sc || !sc->sc_sync_if || !V_pf_status.running || 581 (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 582 goto done; 583 584 /* verify that the packet came in on the right interface */ 585 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 586 V_pfsyncstats.pfsyncs_badif++; 587 goto done; 588 } 589 590 sc->sc_ifp->if_ipackets++; 591 sc->sc_ifp->if_ibytes += m->m_pkthdr.len; 592 /* verify that the IP TTL is 255. */ 593 if (ip->ip_ttl != PFSYNC_DFLTTL) { 594 V_pfsyncstats.pfsyncs_badttl++; 595 goto done; 596 } 597 598 offset = ip->ip_hl << 2; 599 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 600 V_pfsyncstats.pfsyncs_hdrops++; 601 goto done; 602 } 603 604 if (offset + sizeof(*ph) > m->m_len) { 605 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 606 V_pfsyncstats.pfsyncs_hdrops++; 607 return; 608 } 609 ip = mtod(m, struct ip *); 610 } 611 ph = (struct pfsync_header *)((char *)ip + offset); 612 613 /* verify the version */ 614 if (ph->version != PFSYNC_VERSION) { 615 V_pfsyncstats.pfsyncs_badver++; 616 goto done; 617 } 618 619 len = ntohs(ph->len) + offset; 620 if (m->m_pkthdr.len < len) { 621 V_pfsyncstats.pfsyncs_badlen++; 622 goto done; 623 } 624 625 /* Cheaper to grab this now than having to mess with mbufs later */ 626 pkt.ip = ip; 627 pkt.src = ip->ip_src; 628 pkt.flags = 0; 629 630 /* 631 * Trusting pf_chksum during packet processing, as well as seeking 632 * in interface name tree, require holding PF_RULES_RLOCK(). 633 */ 634 PF_RULES_RLOCK(); 635 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 636 pkt.flags |= PFSYNC_SI_CKSUM; 637 638 offset += sizeof(*ph); 639 while (offset <= len - sizeof(subh)) { 640 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 641 offset += sizeof(subh); 642 643 if (subh.action >= PFSYNC_ACT_MAX) { 644 V_pfsyncstats.pfsyncs_badact++; 645 PF_RULES_RUNLOCK(); 646 goto done; 647 } 648 649 count = ntohs(subh.count); 650 V_pfsyncstats.pfsyncs_iacts[subh.action] += count; 651 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, count); 652 if (rv == -1) { 653 PF_RULES_RUNLOCK(); 654 return; 655 } 656 657 offset += rv; 658 } 659 PF_RULES_RUNLOCK(); 660 661 done: 662 m_freem(m); 663 } 664 665 static int 666 pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 667 { 668 struct pfsync_clr *clr; 669 struct mbuf *mp; 670 int len = sizeof(*clr) * count; 671 int i, offp; 672 u_int32_t creatorid; 673 674 mp = m_pulldown(m, offset, len, &offp); 675 if (mp == NULL) { 676 V_pfsyncstats.pfsyncs_badlen++; 677 return (-1); 678 } 679 clr = (struct pfsync_clr *)(mp->m_data + offp); 680 681 for (i = 0; i < count; i++) { 682 creatorid = clr[i].creatorid; 683 684 if (clr[i].ifname[0] != '\0' && 685 pfi_kif_find(clr[i].ifname) == NULL) 686 continue; 687 688 for (int i = 0; i <= V_pf_hashmask; i++) { 689 struct pf_idhash *ih = &V_pf_idhash[i]; 690 struct pf_state *s; 691 relock: 692 PF_HASHROW_LOCK(ih); 693 LIST_FOREACH(s, &ih->states, entry) { 694 if (s->creatorid == creatorid) { 695 s->state_flags |= PFSTATE_NOSYNC; 696 pf_unlink_state(s, PF_ENTER_LOCKED); 697 goto relock; 698 } 699 } 700 PF_HASHROW_UNLOCK(ih); 701 } 702 } 703 704 return (len); 705 } 706 707 static int 708 pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 709 { 710 struct mbuf *mp; 711 struct pfsync_state *sa, *sp; 712 int len = sizeof(*sp) * count; 713 int i, offp; 714 715 mp = m_pulldown(m, offset, len, &offp); 716 if (mp == NULL) { 717 V_pfsyncstats.pfsyncs_badlen++; 718 return (-1); 719 } 720 sa = (struct pfsync_state *)(mp->m_data + offp); 721 722 for (i = 0; i < count; i++) { 723 sp = &sa[i]; 724 725 /* Check for invalid values. */ 726 if (sp->timeout >= PFTM_MAX || 727 sp->src.state > PF_TCPS_PROXY_DST || 728 sp->dst.state > PF_TCPS_PROXY_DST || 729 sp->direction > PF_OUT || 730 (sp->af != AF_INET && sp->af != AF_INET6)) { 731 if (V_pf_status.debug >= PF_DEBUG_MISC) 732 printf("%s: invalid value\n", __func__); 733 V_pfsyncstats.pfsyncs_badval++; 734 continue; 735 } 736 737 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) 738 /* Drop out, but process the rest of the actions. */ 739 break; 740 } 741 742 return (len); 743 } 744 745 static int 746 pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 747 { 748 struct pfsync_ins_ack *ia, *iaa; 749 struct pf_state *st; 750 751 struct mbuf *mp; 752 int len = count * sizeof(*ia); 753 int offp, i; 754 755 mp = m_pulldown(m, offset, len, &offp); 756 if (mp == NULL) { 757 V_pfsyncstats.pfsyncs_badlen++; 758 return (-1); 759 } 760 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 761 762 for (i = 0; i < count; i++) { 763 ia = &iaa[i]; 764 765 st = pf_find_state_byid(ia->id, ia->creatorid); 766 if (st == NULL) 767 continue; 768 769 if (st->state_flags & PFSTATE_ACK) { 770 PFSYNC_LOCK(V_pfsyncif); 771 pfsync_undefer_state(st, 0); 772 PFSYNC_UNLOCK(V_pfsyncif); 773 } 774 PF_STATE_UNLOCK(st); 775 } 776 /* 777 * XXX this is not yet implemented, but we know the size of the 778 * message so we can skip it. 779 */ 780 781 return (count * sizeof(struct pfsync_ins_ack)); 782 } 783 784 static int 785 pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src, 786 struct pfsync_state_peer *dst) 787 { 788 int sync = 0; 789 790 PF_STATE_LOCK_ASSERT(st); 791 792 /* 793 * The state should never go backwards except 794 * for syn-proxy states. Neither should the 795 * sequence window slide backwards. 796 */ 797 if ((st->src.state > src->state && 798 (st->src.state < PF_TCPS_PROXY_SRC || 799 src->state >= PF_TCPS_PROXY_SRC)) || 800 801 (st->src.state == src->state && 802 SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))) 803 sync++; 804 else 805 pf_state_peer_ntoh(src, &st->src); 806 807 if ((st->dst.state > dst->state) || 808 809 (st->dst.state >= TCPS_SYN_SENT && 810 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))) 811 sync++; 812 else 813 pf_state_peer_ntoh(dst, &st->dst); 814 815 return (sync); 816 } 817 818 static int 819 pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 820 { 821 struct pfsync_softc *sc = V_pfsyncif; 822 struct pfsync_state *sa, *sp; 823 struct pf_state *st; 824 int sync; 825 826 struct mbuf *mp; 827 int len = count * sizeof(*sp); 828 int offp, i; 829 830 mp = m_pulldown(m, offset, len, &offp); 831 if (mp == NULL) { 832 V_pfsyncstats.pfsyncs_badlen++; 833 return (-1); 834 } 835 sa = (struct pfsync_state *)(mp->m_data + offp); 836 837 for (i = 0; i < count; i++) { 838 sp = &sa[i]; 839 840 /* check for invalid values */ 841 if (sp->timeout >= PFTM_MAX || 842 sp->src.state > PF_TCPS_PROXY_DST || 843 sp->dst.state > PF_TCPS_PROXY_DST) { 844 if (V_pf_status.debug >= PF_DEBUG_MISC) { 845 printf("pfsync_input: PFSYNC_ACT_UPD: " 846 "invalid value\n"); 847 } 848 V_pfsyncstats.pfsyncs_badval++; 849 continue; 850 } 851 852 st = pf_find_state_byid(sp->id, sp->creatorid); 853 if (st == NULL) { 854 /* insert the update */ 855 if (pfsync_state_import(sp, 0)) 856 V_pfsyncstats.pfsyncs_badstate++; 857 continue; 858 } 859 860 if (st->state_flags & PFSTATE_ACK) { 861 PFSYNC_LOCK(sc); 862 pfsync_undefer_state(st, 1); 863 PFSYNC_UNLOCK(sc); 864 } 865 866 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 867 sync = pfsync_upd_tcp(st, &sp->src, &sp->dst); 868 else { 869 sync = 0; 870 871 /* 872 * Non-TCP protocol state machine always go 873 * forwards 874 */ 875 if (st->src.state > sp->src.state) 876 sync++; 877 else 878 pf_state_peer_ntoh(&sp->src, &st->src); 879 if (st->dst.state > sp->dst.state) 880 sync++; 881 else 882 pf_state_peer_ntoh(&sp->dst, &st->dst); 883 } 884 if (sync < 2) { 885 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 886 pf_state_peer_ntoh(&sp->dst, &st->dst); 887 st->expire = time_uptime; 888 st->timeout = sp->timeout; 889 } 890 st->pfsync_time = time_uptime; 891 892 if (sync) { 893 V_pfsyncstats.pfsyncs_stale++; 894 895 pfsync_update_state(st); 896 PF_STATE_UNLOCK(st); 897 PFSYNC_LOCK(sc); 898 pfsync_push(sc); 899 PFSYNC_UNLOCK(sc); 900 continue; 901 } 902 PF_STATE_UNLOCK(st); 903 } 904 905 return (len); 906 } 907 908 static int 909 pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 910 { 911 struct pfsync_softc *sc = V_pfsyncif; 912 struct pfsync_upd_c *ua, *up; 913 struct pf_state *st; 914 int len = count * sizeof(*up); 915 int sync; 916 struct mbuf *mp; 917 int offp, i; 918 919 mp = m_pulldown(m, offset, len, &offp); 920 if (mp == NULL) { 921 V_pfsyncstats.pfsyncs_badlen++; 922 return (-1); 923 } 924 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 925 926 for (i = 0; i < count; i++) { 927 up = &ua[i]; 928 929 /* check for invalid values */ 930 if (up->timeout >= PFTM_MAX || 931 up->src.state > PF_TCPS_PROXY_DST || 932 up->dst.state > PF_TCPS_PROXY_DST) { 933 if (V_pf_status.debug >= PF_DEBUG_MISC) { 934 printf("pfsync_input: " 935 "PFSYNC_ACT_UPD_C: " 936 "invalid value\n"); 937 } 938 V_pfsyncstats.pfsyncs_badval++; 939 continue; 940 } 941 942 st = pf_find_state_byid(up->id, up->creatorid); 943 if (st == NULL) { 944 /* We don't have this state. Ask for it. */ 945 PFSYNC_LOCK(sc); 946 pfsync_request_update(up->creatorid, up->id); 947 PFSYNC_UNLOCK(sc); 948 continue; 949 } 950 951 if (st->state_flags & PFSTATE_ACK) { 952 PFSYNC_LOCK(sc); 953 pfsync_undefer_state(st, 1); 954 PFSYNC_UNLOCK(sc); 955 } 956 957 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) 958 sync = pfsync_upd_tcp(st, &up->src, &up->dst); 959 else { 960 sync = 0; 961 962 /* 963 * Non-TCP protocol state machine always go 964 * forwards 965 */ 966 if (st->src.state > up->src.state) 967 sync++; 968 else 969 pf_state_peer_ntoh(&up->src, &st->src); 970 if (st->dst.state > up->dst.state) 971 sync++; 972 else 973 pf_state_peer_ntoh(&up->dst, &st->dst); 974 } 975 if (sync < 2) { 976 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 977 pf_state_peer_ntoh(&up->dst, &st->dst); 978 st->expire = time_uptime; 979 st->timeout = up->timeout; 980 } 981 st->pfsync_time = time_uptime; 982 983 if (sync) { 984 V_pfsyncstats.pfsyncs_stale++; 985 986 pfsync_update_state(st); 987 PF_STATE_UNLOCK(st); 988 PFSYNC_LOCK(sc); 989 pfsync_push(sc); 990 PFSYNC_UNLOCK(sc); 991 continue; 992 } 993 PF_STATE_UNLOCK(st); 994 } 995 996 return (len); 997 } 998 999 static int 1000 pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1001 { 1002 struct pfsync_upd_req *ur, *ura; 1003 struct mbuf *mp; 1004 int len = count * sizeof(*ur); 1005 int i, offp; 1006 1007 struct pf_state *st; 1008 1009 mp = m_pulldown(m, offset, len, &offp); 1010 if (mp == NULL) { 1011 V_pfsyncstats.pfsyncs_badlen++; 1012 return (-1); 1013 } 1014 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1015 1016 for (i = 0; i < count; i++) { 1017 ur = &ura[i]; 1018 1019 if (ur->id == 0 && ur->creatorid == 0) 1020 pfsync_bulk_start(); 1021 else { 1022 st = pf_find_state_byid(ur->id, ur->creatorid); 1023 if (st == NULL) { 1024 V_pfsyncstats.pfsyncs_badstate++; 1025 continue; 1026 } 1027 if (st->state_flags & PFSTATE_NOSYNC) { 1028 PF_STATE_UNLOCK(st); 1029 continue; 1030 } 1031 1032 pfsync_update_state_req(st); 1033 PF_STATE_UNLOCK(st); 1034 } 1035 } 1036 1037 return (len); 1038 } 1039 1040 static int 1041 pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1042 { 1043 struct mbuf *mp; 1044 struct pfsync_state *sa, *sp; 1045 struct pf_state *st; 1046 int len = count * sizeof(*sp); 1047 int offp, i; 1048 1049 mp = m_pulldown(m, offset, len, &offp); 1050 if (mp == NULL) { 1051 V_pfsyncstats.pfsyncs_badlen++; 1052 return (-1); 1053 } 1054 sa = (struct pfsync_state *)(mp->m_data + offp); 1055 1056 for (i = 0; i < count; i++) { 1057 sp = &sa[i]; 1058 1059 st = pf_find_state_byid(sp->id, sp->creatorid); 1060 if (st == NULL) { 1061 V_pfsyncstats.pfsyncs_badstate++; 1062 continue; 1063 } 1064 st->state_flags |= PFSTATE_NOSYNC; 1065 pf_unlink_state(st, PF_ENTER_LOCKED); 1066 } 1067 1068 return (len); 1069 } 1070 1071 static int 1072 pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1073 { 1074 struct mbuf *mp; 1075 struct pfsync_del_c *sa, *sp; 1076 struct pf_state *st; 1077 int len = count * sizeof(*sp); 1078 int offp, i; 1079 1080 mp = m_pulldown(m, offset, len, &offp); 1081 if (mp == NULL) { 1082 V_pfsyncstats.pfsyncs_badlen++; 1083 return (-1); 1084 } 1085 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1086 1087 for (i = 0; i < count; i++) { 1088 sp = &sa[i]; 1089 1090 st = pf_find_state_byid(sp->id, sp->creatorid); 1091 if (st == NULL) { 1092 V_pfsyncstats.pfsyncs_badstate++; 1093 continue; 1094 } 1095 1096 st->state_flags |= PFSTATE_NOSYNC; 1097 pf_unlink_state(st, PF_ENTER_LOCKED); 1098 } 1099 1100 return (len); 1101 } 1102 1103 static int 1104 pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1105 { 1106 struct pfsync_softc *sc = V_pfsyncif; 1107 struct pfsync_bus *bus; 1108 struct mbuf *mp; 1109 int len = count * sizeof(*bus); 1110 int offp; 1111 1112 PFSYNC_BLOCK(sc); 1113 1114 /* If we're not waiting for a bulk update, who cares. */ 1115 if (sc->sc_ureq_sent == 0) { 1116 PFSYNC_BUNLOCK(sc); 1117 return (len); 1118 } 1119 1120 mp = m_pulldown(m, offset, len, &offp); 1121 if (mp == NULL) { 1122 PFSYNC_BUNLOCK(sc); 1123 V_pfsyncstats.pfsyncs_badlen++; 1124 return (-1); 1125 } 1126 bus = (struct pfsync_bus *)(mp->m_data + offp); 1127 1128 switch (bus->status) { 1129 case PFSYNC_BUS_START: 1130 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1131 V_pf_limits[PF_LIMIT_STATES].limit / 1132 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / 1133 sizeof(struct pfsync_state)), 1134 pfsync_bulk_fail, sc); 1135 if (V_pf_status.debug >= PF_DEBUG_MISC) 1136 printf("pfsync: received bulk update start\n"); 1137 break; 1138 1139 case PFSYNC_BUS_END: 1140 if (time_uptime - ntohl(bus->endtime) >= 1141 sc->sc_ureq_sent) { 1142 /* that's it, we're happy */ 1143 sc->sc_ureq_sent = 0; 1144 sc->sc_bulk_tries = 0; 1145 callout_stop(&sc->sc_bulkfail_tmo); 1146 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 1147 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1148 "pfsync bulk done"); 1149 sc->sc_flags |= PFSYNCF_OK; 1150 if (V_pf_status.debug >= PF_DEBUG_MISC) 1151 printf("pfsync: received valid " 1152 "bulk update end\n"); 1153 } else { 1154 if (V_pf_status.debug >= PF_DEBUG_MISC) 1155 printf("pfsync: received invalid " 1156 "bulk update end: bad timestamp\n"); 1157 } 1158 break; 1159 } 1160 PFSYNC_BUNLOCK(sc); 1161 1162 return (len); 1163 } 1164 1165 static int 1166 pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1167 { 1168 int len = count * sizeof(struct pfsync_tdb); 1169 1170 #if defined(IPSEC) 1171 struct pfsync_tdb *tp; 1172 struct mbuf *mp; 1173 int offp; 1174 int i; 1175 int s; 1176 1177 mp = m_pulldown(m, offset, len, &offp); 1178 if (mp == NULL) { 1179 V_pfsyncstats.pfsyncs_badlen++; 1180 return (-1); 1181 } 1182 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1183 1184 for (i = 0; i < count; i++) 1185 pfsync_update_net_tdb(&tp[i]); 1186 #endif 1187 1188 return (len); 1189 } 1190 1191 #if defined(IPSEC) 1192 /* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1193 static void 1194 pfsync_update_net_tdb(struct pfsync_tdb *pt) 1195 { 1196 struct tdb *tdb; 1197 int s; 1198 1199 /* check for invalid values */ 1200 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1201 (pt->dst.sa.sa_family != AF_INET && 1202 pt->dst.sa.sa_family != AF_INET6)) 1203 goto bad; 1204 1205 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1206 if (tdb) { 1207 pt->rpl = ntohl(pt->rpl); 1208 pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes); 1209 1210 /* Neither replay nor byte counter should ever decrease. */ 1211 if (pt->rpl < tdb->tdb_rpl || 1212 pt->cur_bytes < tdb->tdb_cur_bytes) { 1213 goto bad; 1214 } 1215 1216 tdb->tdb_rpl = pt->rpl; 1217 tdb->tdb_cur_bytes = pt->cur_bytes; 1218 } 1219 return; 1220 1221 bad: 1222 if (V_pf_status.debug >= PF_DEBUG_MISC) 1223 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1224 "invalid value\n"); 1225 V_pfsyncstats.pfsyncs_badstate++; 1226 return; 1227 } 1228 #endif 1229 1230 1231 static int 1232 pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1233 { 1234 /* check if we are at the right place in the packet */ 1235 if (offset != m->m_pkthdr.len) 1236 V_pfsyncstats.pfsyncs_badlen++; 1237 1238 /* we're done. free and let the caller return */ 1239 m_freem(m); 1240 return (-1); 1241 } 1242 1243 static int 1244 pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1245 { 1246 V_pfsyncstats.pfsyncs_badact++; 1247 1248 m_freem(m); 1249 return (-1); 1250 } 1251 1252 static int 1253 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1254 struct route *rt) 1255 { 1256 m_freem(m); 1257 return (0); 1258 } 1259 1260 /* ARGSUSED */ 1261 static int 1262 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1263 { 1264 struct pfsync_softc *sc = ifp->if_softc; 1265 struct ifreq *ifr = (struct ifreq *)data; 1266 struct pfsyncreq pfsyncr; 1267 int error; 1268 1269 switch (cmd) { 1270 case SIOCSIFFLAGS: 1271 PFSYNC_LOCK(sc); 1272 if (ifp->if_flags & IFF_UP) { 1273 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1274 PFSYNC_UNLOCK(sc); 1275 pfsync_pointers_init(); 1276 } else { 1277 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1278 PFSYNC_UNLOCK(sc); 1279 pfsync_pointers_uninit(); 1280 } 1281 break; 1282 case SIOCSIFMTU: 1283 if (!sc->sc_sync_if || 1284 ifr->ifr_mtu <= PFSYNC_MINPKT || 1285 ifr->ifr_mtu > sc->sc_sync_if->if_mtu) 1286 return (EINVAL); 1287 if (ifr->ifr_mtu < ifp->if_mtu) { 1288 PFSYNC_LOCK(sc); 1289 if (sc->sc_len > PFSYNC_MINPKT) 1290 pfsync_sendout(1); 1291 PFSYNC_UNLOCK(sc); 1292 } 1293 ifp->if_mtu = ifr->ifr_mtu; 1294 break; 1295 case SIOCGETPFSYNC: 1296 bzero(&pfsyncr, sizeof(pfsyncr)); 1297 PFSYNC_LOCK(sc); 1298 if (sc->sc_sync_if) { 1299 strlcpy(pfsyncr.pfsyncr_syncdev, 1300 sc->sc_sync_if->if_xname, IFNAMSIZ); 1301 } 1302 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1303 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1304 pfsyncr.pfsyncr_defer = (PFSYNCF_DEFER == 1305 (sc->sc_flags & PFSYNCF_DEFER)); 1306 PFSYNC_UNLOCK(sc); 1307 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))); 1308 1309 case SIOCSETPFSYNC: 1310 { 1311 struct ip_moptions *imo = &sc->sc_imo; 1312 struct ifnet *sifp; 1313 struct ip *ip; 1314 void *mship = NULL; 1315 1316 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1317 return (error); 1318 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) 1319 return (error); 1320 1321 if (pfsyncr.pfsyncr_maxupdates > 255) 1322 return (EINVAL); 1323 1324 if (pfsyncr.pfsyncr_syncdev[0] == 0) 1325 sifp = NULL; 1326 else if ((sifp = ifunit_ref(pfsyncr.pfsyncr_syncdev)) == NULL) 1327 return (EINVAL); 1328 1329 if (sifp != NULL && ( 1330 pfsyncr.pfsyncr_syncpeer.s_addr == 0 || 1331 pfsyncr.pfsyncr_syncpeer.s_addr == 1332 htonl(INADDR_PFSYNC_GROUP))) 1333 mship = malloc((sizeof(struct in_multi *) * 1334 IP_MIN_MEMBERSHIPS), M_PFSYNC, M_WAITOK | M_ZERO); 1335 1336 PFSYNC_LOCK(sc); 1337 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1338 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP); 1339 else 1340 sc->sc_sync_peer.s_addr = 1341 pfsyncr.pfsyncr_syncpeer.s_addr; 1342 1343 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1344 if (pfsyncr.pfsyncr_defer) { 1345 sc->sc_flags |= PFSYNCF_DEFER; 1346 pfsync_defer_ptr = pfsync_defer; 1347 } else { 1348 sc->sc_flags &= ~PFSYNCF_DEFER; 1349 pfsync_defer_ptr = NULL; 1350 } 1351 1352 if (sifp == NULL) { 1353 if (sc->sc_sync_if) 1354 if_rele(sc->sc_sync_if); 1355 sc->sc_sync_if = NULL; 1356 if (imo->imo_membership) 1357 pfsync_multicast_cleanup(sc); 1358 PFSYNC_UNLOCK(sc); 1359 break; 1360 } 1361 1362 if (sc->sc_len > PFSYNC_MINPKT && 1363 (sifp->if_mtu < sc->sc_ifp->if_mtu || 1364 (sc->sc_sync_if != NULL && 1365 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 1366 sifp->if_mtu < MCLBYTES - sizeof(struct ip))) 1367 pfsync_sendout(1); 1368 1369 if (imo->imo_membership) 1370 pfsync_multicast_cleanup(sc); 1371 1372 if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) { 1373 error = pfsync_multicast_setup(sc, sifp, mship); 1374 if (error) { 1375 if_rele(sifp); 1376 free(mship, M_PFSYNC); 1377 return (error); 1378 } 1379 } 1380 if (sc->sc_sync_if) 1381 if_rele(sc->sc_sync_if); 1382 sc->sc_sync_if = sifp; 1383 1384 ip = &sc->sc_template; 1385 bzero(ip, sizeof(*ip)); 1386 ip->ip_v = IPVERSION; 1387 ip->ip_hl = sizeof(sc->sc_template) >> 2; 1388 ip->ip_tos = IPTOS_LOWDELAY; 1389 /* len and id are set later. */ 1390 ip->ip_off = htons(IP_DF); 1391 ip->ip_ttl = PFSYNC_DFLTTL; 1392 ip->ip_p = IPPROTO_PFSYNC; 1393 ip->ip_src.s_addr = INADDR_ANY; 1394 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr; 1395 1396 /* Request a full state table update. */ 1397 if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 1398 (*carp_demote_adj_p)(V_pfsync_carp_adj, 1399 "pfsync bulk start"); 1400 sc->sc_flags &= ~PFSYNCF_OK; 1401 if (V_pf_status.debug >= PF_DEBUG_MISC) 1402 printf("pfsync: requesting bulk update\n"); 1403 pfsync_request_update(0, 0); 1404 PFSYNC_UNLOCK(sc); 1405 PFSYNC_BLOCK(sc); 1406 sc->sc_ureq_sent = time_uptime; 1407 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, 1408 sc); 1409 PFSYNC_BUNLOCK(sc); 1410 1411 break; 1412 } 1413 default: 1414 return (ENOTTY); 1415 } 1416 1417 return (0); 1418 } 1419 1420 static void 1421 pfsync_out_state(struct pf_state *st, void *buf) 1422 { 1423 struct pfsync_state *sp = buf; 1424 1425 pfsync_state_export(sp, st); 1426 } 1427 1428 static void 1429 pfsync_out_iack(struct pf_state *st, void *buf) 1430 { 1431 struct pfsync_ins_ack *iack = buf; 1432 1433 iack->id = st->id; 1434 iack->creatorid = st->creatorid; 1435 } 1436 1437 static void 1438 pfsync_out_upd_c(struct pf_state *st, void *buf) 1439 { 1440 struct pfsync_upd_c *up = buf; 1441 1442 bzero(up, sizeof(*up)); 1443 up->id = st->id; 1444 pf_state_peer_hton(&st->src, &up->src); 1445 pf_state_peer_hton(&st->dst, &up->dst); 1446 up->creatorid = st->creatorid; 1447 up->timeout = st->timeout; 1448 } 1449 1450 static void 1451 pfsync_out_del(struct pf_state *st, void *buf) 1452 { 1453 struct pfsync_del_c *dp = buf; 1454 1455 dp->id = st->id; 1456 dp->creatorid = st->creatorid; 1457 st->state_flags |= PFSTATE_NOSYNC; 1458 } 1459 1460 static void 1461 pfsync_drop(struct pfsync_softc *sc) 1462 { 1463 struct pf_state *st, *next; 1464 struct pfsync_upd_req_item *ur; 1465 int q; 1466 1467 for (q = 0; q < PFSYNC_S_COUNT; q++) { 1468 if (TAILQ_EMPTY(&sc->sc_qs[q])) 1469 continue; 1470 1471 TAILQ_FOREACH_SAFE(st, &sc->sc_qs[q], sync_list, next) { 1472 KASSERT(st->sync_state == q, 1473 ("%s: st->sync_state == q", 1474 __func__)); 1475 st->sync_state = PFSYNC_S_NONE; 1476 pf_release_state(st); 1477 } 1478 TAILQ_INIT(&sc->sc_qs[q]); 1479 } 1480 1481 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 1482 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 1483 free(ur, M_PFSYNC); 1484 } 1485 1486 sc->sc_plus = NULL; 1487 sc->sc_len = PFSYNC_MINPKT; 1488 } 1489 1490 static void 1491 pfsync_sendout(int schedswi) 1492 { 1493 struct pfsync_softc *sc = V_pfsyncif; 1494 struct ifnet *ifp = sc->sc_ifp; 1495 struct mbuf *m; 1496 struct ip *ip; 1497 struct pfsync_header *ph; 1498 struct pfsync_subheader *subh; 1499 struct pf_state *st; 1500 struct pfsync_upd_req_item *ur; 1501 int offset; 1502 int q, count = 0; 1503 1504 KASSERT(sc != NULL, ("%s: null sc", __func__)); 1505 KASSERT(sc->sc_len > PFSYNC_MINPKT, 1506 ("%s: sc_len %zu", __func__, sc->sc_len)); 1507 PFSYNC_LOCK_ASSERT(sc); 1508 1509 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 1510 pfsync_drop(sc); 1511 return; 1512 } 1513 1514 m = m_get2(max_linkhdr + sc->sc_len, M_NOWAIT, MT_DATA, M_PKTHDR); 1515 if (m == NULL) { 1516 sc->sc_ifp->if_oerrors++; 1517 V_pfsyncstats.pfsyncs_onomem++; 1518 return; 1519 } 1520 m->m_data += max_linkhdr; 1521 m->m_len = m->m_pkthdr.len = sc->sc_len; 1522 1523 /* build the ip header */ 1524 ip = (struct ip *)m->m_data; 1525 bcopy(&sc->sc_template, ip, sizeof(*ip)); 1526 offset = sizeof(*ip); 1527 1528 ip->ip_len = htons(m->m_pkthdr.len); 1529 ip->ip_id = htons(ip_randomid()); 1530 1531 /* build the pfsync header */ 1532 ph = (struct pfsync_header *)(m->m_data + offset); 1533 bzero(ph, sizeof(*ph)); 1534 offset += sizeof(*ph); 1535 1536 ph->version = PFSYNC_VERSION; 1537 ph->len = htons(sc->sc_len - sizeof(*ip)); 1538 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 1539 1540 /* walk the queues */ 1541 for (q = 0; q < PFSYNC_S_COUNT; q++) { 1542 if (TAILQ_EMPTY(&sc->sc_qs[q])) 1543 continue; 1544 1545 subh = (struct pfsync_subheader *)(m->m_data + offset); 1546 offset += sizeof(*subh); 1547 1548 count = 0; 1549 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 1550 KASSERT(st->sync_state == q, 1551 ("%s: st->sync_state == q", 1552 __func__)); 1553 /* 1554 * XXXGL: some of write methods do unlocked reads 1555 * of state data :( 1556 */ 1557 pfsync_qs[q].write(st, m->m_data + offset); 1558 offset += pfsync_qs[q].len; 1559 st->sync_state = PFSYNC_S_NONE; 1560 pf_release_state(st); 1561 count++; 1562 } 1563 TAILQ_INIT(&sc->sc_qs[q]); 1564 1565 bzero(subh, sizeof(*subh)); 1566 subh->action = pfsync_qs[q].action; 1567 subh->count = htons(count); 1568 V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count; 1569 } 1570 1571 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) { 1572 subh = (struct pfsync_subheader *)(m->m_data + offset); 1573 offset += sizeof(*subh); 1574 1575 count = 0; 1576 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 1577 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 1578 1579 bcopy(&ur->ur_msg, m->m_data + offset, 1580 sizeof(ur->ur_msg)); 1581 offset += sizeof(ur->ur_msg); 1582 free(ur, M_PFSYNC); 1583 count++; 1584 } 1585 1586 bzero(subh, sizeof(*subh)); 1587 subh->action = PFSYNC_ACT_UPD_REQ; 1588 subh->count = htons(count); 1589 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count; 1590 } 1591 1592 /* has someone built a custom region for us to add? */ 1593 if (sc->sc_plus != NULL) { 1594 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen); 1595 offset += sc->sc_pluslen; 1596 1597 sc->sc_plus = NULL; 1598 } 1599 1600 subh = (struct pfsync_subheader *)(m->m_data + offset); 1601 offset += sizeof(*subh); 1602 1603 bzero(subh, sizeof(*subh)); 1604 subh->action = PFSYNC_ACT_EOF; 1605 subh->count = htons(1); 1606 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++; 1607 1608 /* we're done, let's put it on the wire */ 1609 if (ifp->if_bpf) { 1610 m->m_data += sizeof(*ip); 1611 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip); 1612 BPF_MTAP(ifp, m); 1613 m->m_data -= sizeof(*ip); 1614 m->m_len = m->m_pkthdr.len = sc->sc_len; 1615 } 1616 1617 if (sc->sc_sync_if == NULL) { 1618 sc->sc_len = PFSYNC_MINPKT; 1619 m_freem(m); 1620 return; 1621 } 1622 1623 sc->sc_ifp->if_opackets++; 1624 sc->sc_ifp->if_obytes += m->m_pkthdr.len; 1625 sc->sc_len = PFSYNC_MINPKT; 1626 1627 if (!_IF_QFULL(&sc->sc_ifp->if_snd)) 1628 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m); 1629 else { 1630 m_freem(m); 1631 sc->sc_ifp->if_snd.ifq_drops++; 1632 } 1633 if (schedswi) 1634 swi_sched(V_pfsync_swi_cookie, 0); 1635 } 1636 1637 static void 1638 pfsync_insert_state(struct pf_state *st) 1639 { 1640 struct pfsync_softc *sc = V_pfsyncif; 1641 1642 if (st->state_flags & PFSTATE_NOSYNC) 1643 return; 1644 1645 if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || 1646 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 1647 st->state_flags |= PFSTATE_NOSYNC; 1648 return; 1649 } 1650 1651 KASSERT(st->sync_state == PFSYNC_S_NONE, 1652 ("%s: st->sync_state %u", __func__, st->sync_state)); 1653 1654 PFSYNC_LOCK(sc); 1655 if (sc->sc_len == PFSYNC_MINPKT) 1656 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); 1657 1658 pfsync_q_ins(st, PFSYNC_S_INS); 1659 PFSYNC_UNLOCK(sc); 1660 1661 st->sync_updates = 0; 1662 } 1663 1664 static int 1665 pfsync_defer(struct pf_state *st, struct mbuf *m) 1666 { 1667 struct pfsync_softc *sc = V_pfsyncif; 1668 struct pfsync_deferral *pd; 1669 1670 if (m->m_flags & (M_BCAST|M_MCAST)) 1671 return (0); 1672 1673 PFSYNC_LOCK(sc); 1674 1675 if (sc == NULL || !(sc->sc_ifp->if_flags & IFF_DRV_RUNNING) || 1676 !(sc->sc_flags & PFSYNCF_DEFER)) { 1677 PFSYNC_UNLOCK(sc); 1678 return (0); 1679 } 1680 1681 if (sc->sc_deferred >= 128) 1682 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 1683 1684 pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT); 1685 if (pd == NULL) 1686 return (0); 1687 sc->sc_deferred++; 1688 1689 m->m_flags |= M_SKIP_FIREWALL; 1690 st->state_flags |= PFSTATE_ACK; 1691 1692 pd->pd_sc = sc; 1693 pd->pd_refs = 0; 1694 pd->pd_st = st; 1695 pf_ref_state(st); 1696 pd->pd_m = m; 1697 1698 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); 1699 callout_init_mtx(&pd->pd_tmo, &sc->sc_mtx, CALLOUT_RETURNUNLOCKED); 1700 callout_reset(&pd->pd_tmo, 10, pfsync_defer_tmo, pd); 1701 1702 pfsync_push(sc); 1703 1704 return (1); 1705 } 1706 1707 static void 1708 pfsync_undefer(struct pfsync_deferral *pd, int drop) 1709 { 1710 struct pfsync_softc *sc = pd->pd_sc; 1711 struct mbuf *m = pd->pd_m; 1712 struct pf_state *st = pd->pd_st; 1713 1714 PFSYNC_LOCK_ASSERT(sc); 1715 1716 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 1717 sc->sc_deferred--; 1718 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 1719 free(pd, M_PFSYNC); 1720 pf_release_state(st); 1721 1722 if (drop) 1723 m_freem(m); 1724 else { 1725 _IF_ENQUEUE(&sc->sc_ifp->if_snd, m); 1726 pfsync_push(sc); 1727 } 1728 } 1729 1730 static void 1731 pfsync_defer_tmo(void *arg) 1732 { 1733 struct pfsync_deferral *pd = arg; 1734 struct pfsync_softc *sc = pd->pd_sc; 1735 struct mbuf *m = pd->pd_m; 1736 struct pf_state *st = pd->pd_st; 1737 1738 PFSYNC_LOCK_ASSERT(sc); 1739 1740 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); 1741 1742 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 1743 sc->sc_deferred--; 1744 pd->pd_st->state_flags &= ~PFSTATE_ACK; /* XXX: locking! */ 1745 if (pd->pd_refs == 0) 1746 free(pd, M_PFSYNC); 1747 PFSYNC_UNLOCK(sc); 1748 1749 ip_output(m, NULL, NULL, 0, NULL, NULL); 1750 1751 pf_release_state(st); 1752 1753 CURVNET_RESTORE(); 1754 } 1755 1756 static void 1757 pfsync_undefer_state(struct pf_state *st, int drop) 1758 { 1759 struct pfsync_softc *sc = V_pfsyncif; 1760 struct pfsync_deferral *pd; 1761 1762 PFSYNC_LOCK_ASSERT(sc); 1763 1764 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { 1765 if (pd->pd_st == st) { 1766 if (callout_stop(&pd->pd_tmo)) 1767 pfsync_undefer(pd, drop); 1768 return; 1769 } 1770 } 1771 1772 panic("%s: unable to find deferred state", __func__); 1773 } 1774 1775 static void 1776 pfsync_update_state(struct pf_state *st) 1777 { 1778 struct pfsync_softc *sc = V_pfsyncif; 1779 int sync = 0; 1780 1781 PF_STATE_LOCK_ASSERT(st); 1782 PFSYNC_LOCK(sc); 1783 1784 if (st->state_flags & PFSTATE_ACK) 1785 pfsync_undefer_state(st, 0); 1786 if (st->state_flags & PFSTATE_NOSYNC) { 1787 if (st->sync_state != PFSYNC_S_NONE) 1788 pfsync_q_del(st); 1789 PFSYNC_UNLOCK(sc); 1790 return; 1791 } 1792 1793 if (sc->sc_len == PFSYNC_MINPKT) 1794 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); 1795 1796 switch (st->sync_state) { 1797 case PFSYNC_S_UPD_C: 1798 case PFSYNC_S_UPD: 1799 case PFSYNC_S_INS: 1800 /* we're already handling it */ 1801 1802 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 1803 st->sync_updates++; 1804 if (st->sync_updates >= sc->sc_maxupdates) 1805 sync = 1; 1806 } 1807 break; 1808 1809 case PFSYNC_S_IACK: 1810 pfsync_q_del(st); 1811 case PFSYNC_S_NONE: 1812 pfsync_q_ins(st, PFSYNC_S_UPD_C); 1813 st->sync_updates = 0; 1814 break; 1815 1816 default: 1817 panic("%s: unexpected sync state %d", __func__, st->sync_state); 1818 } 1819 1820 if (sync || (time_uptime - st->pfsync_time) < 2) 1821 pfsync_push(sc); 1822 1823 PFSYNC_UNLOCK(sc); 1824 } 1825 1826 static void 1827 pfsync_request_update(u_int32_t creatorid, u_int64_t id) 1828 { 1829 struct pfsync_softc *sc = V_pfsyncif; 1830 struct pfsync_upd_req_item *item; 1831 size_t nlen = sizeof(struct pfsync_upd_req); 1832 1833 PFSYNC_LOCK_ASSERT(sc); 1834 1835 /* 1836 * This code does a bit to prevent multiple update requests for the 1837 * same state being generated. It searches current subheader queue, 1838 * but it doesn't lookup into queue of already packed datagrams. 1839 */ 1840 TAILQ_FOREACH(item, &sc->sc_upd_req_list, ur_entry) 1841 if (item->ur_msg.id == id && 1842 item->ur_msg.creatorid == creatorid) 1843 return; 1844 1845 item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT); 1846 if (item == NULL) 1847 return; /* XXX stats */ 1848 1849 item->ur_msg.id = id; 1850 item->ur_msg.creatorid = creatorid; 1851 1852 if (TAILQ_EMPTY(&sc->sc_upd_req_list)) 1853 nlen += sizeof(struct pfsync_subheader); 1854 1855 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 1856 pfsync_sendout(1); 1857 1858 nlen = sizeof(struct pfsync_subheader) + 1859 sizeof(struct pfsync_upd_req); 1860 } 1861 1862 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry); 1863 sc->sc_len += nlen; 1864 } 1865 1866 static void 1867 pfsync_update_state_req(struct pf_state *st) 1868 { 1869 struct pfsync_softc *sc = V_pfsyncif; 1870 1871 PF_STATE_LOCK_ASSERT(st); 1872 PFSYNC_LOCK(sc); 1873 1874 if (st->state_flags & PFSTATE_NOSYNC) { 1875 if (st->sync_state != PFSYNC_S_NONE) 1876 pfsync_q_del(st); 1877 PFSYNC_UNLOCK(sc); 1878 return; 1879 } 1880 1881 switch (st->sync_state) { 1882 case PFSYNC_S_UPD_C: 1883 case PFSYNC_S_IACK: 1884 pfsync_q_del(st); 1885 case PFSYNC_S_NONE: 1886 pfsync_q_ins(st, PFSYNC_S_UPD); 1887 pfsync_push(sc); 1888 break; 1889 1890 case PFSYNC_S_INS: 1891 case PFSYNC_S_UPD: 1892 case PFSYNC_S_DEL: 1893 /* we're already handling it */ 1894 break; 1895 1896 default: 1897 panic("%s: unexpected sync state %d", __func__, st->sync_state); 1898 } 1899 1900 PFSYNC_UNLOCK(sc); 1901 } 1902 1903 static void 1904 pfsync_delete_state(struct pf_state *st) 1905 { 1906 struct pfsync_softc *sc = V_pfsyncif; 1907 1908 PFSYNC_LOCK(sc); 1909 if (st->state_flags & PFSTATE_ACK) 1910 pfsync_undefer_state(st, 1); 1911 if (st->state_flags & PFSTATE_NOSYNC) { 1912 if (st->sync_state != PFSYNC_S_NONE) 1913 pfsync_q_del(st); 1914 PFSYNC_UNLOCK(sc); 1915 return; 1916 } 1917 1918 if (sc->sc_len == PFSYNC_MINPKT) 1919 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, V_pfsyncif); 1920 1921 switch (st->sync_state) { 1922 case PFSYNC_S_INS: 1923 /* We never got to tell the world so just forget about it. */ 1924 pfsync_q_del(st); 1925 break; 1926 1927 case PFSYNC_S_UPD_C: 1928 case PFSYNC_S_UPD: 1929 case PFSYNC_S_IACK: 1930 pfsync_q_del(st); 1931 /* FALLTHROUGH to putting it on the del list */ 1932 1933 case PFSYNC_S_NONE: 1934 pfsync_q_ins(st, PFSYNC_S_DEL); 1935 break; 1936 1937 default: 1938 panic("%s: unexpected sync state %d", __func__, st->sync_state); 1939 } 1940 PFSYNC_UNLOCK(sc); 1941 } 1942 1943 static void 1944 pfsync_clear_states(u_int32_t creatorid, const char *ifname) 1945 { 1946 struct pfsync_softc *sc = V_pfsyncif; 1947 struct { 1948 struct pfsync_subheader subh; 1949 struct pfsync_clr clr; 1950 } __packed r; 1951 1952 bzero(&r, sizeof(r)); 1953 1954 r.subh.action = PFSYNC_ACT_CLR; 1955 r.subh.count = htons(1); 1956 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++; 1957 1958 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 1959 r.clr.creatorid = creatorid; 1960 1961 PFSYNC_LOCK(sc); 1962 pfsync_send_plus(&r, sizeof(r)); 1963 PFSYNC_UNLOCK(sc); 1964 } 1965 1966 static void 1967 pfsync_q_ins(struct pf_state *st, int q) 1968 { 1969 struct pfsync_softc *sc = V_pfsyncif; 1970 size_t nlen = pfsync_qs[q].len; 1971 1972 PFSYNC_LOCK_ASSERT(sc); 1973 1974 KASSERT(st->sync_state == PFSYNC_S_NONE, 1975 ("%s: st->sync_state %u", __func__, st->sync_state)); 1976 KASSERT(sc->sc_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu", 1977 sc->sc_len)); 1978 1979 if (TAILQ_EMPTY(&sc->sc_qs[q])) 1980 nlen += sizeof(struct pfsync_subheader); 1981 1982 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 1983 pfsync_sendout(1); 1984 1985 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 1986 } 1987 1988 sc->sc_len += nlen; 1989 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list); 1990 st->sync_state = q; 1991 pf_ref_state(st); 1992 } 1993 1994 static void 1995 pfsync_q_del(struct pf_state *st) 1996 { 1997 struct pfsync_softc *sc = V_pfsyncif; 1998 int q = st->sync_state; 1999 2000 PFSYNC_LOCK_ASSERT(sc); 2001 KASSERT(st->sync_state != PFSYNC_S_NONE, 2002 ("%s: st->sync_state != PFSYNC_S_NONE", __func__)); 2003 2004 sc->sc_len -= pfsync_qs[q].len; 2005 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list); 2006 st->sync_state = PFSYNC_S_NONE; 2007 pf_release_state(st); 2008 2009 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2010 sc->sc_len -= sizeof(struct pfsync_subheader); 2011 } 2012 2013 static void 2014 pfsync_bulk_start(void) 2015 { 2016 struct pfsync_softc *sc = V_pfsyncif; 2017 2018 if (V_pf_status.debug >= PF_DEBUG_MISC) 2019 printf("pfsync: received bulk update request\n"); 2020 2021 PFSYNC_BLOCK(sc); 2022 2023 sc->sc_ureq_received = time_uptime; 2024 sc->sc_bulk_hashid = 0; 2025 sc->sc_bulk_stateid = 0; 2026 pfsync_bulk_status(PFSYNC_BUS_START); 2027 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); 2028 PFSYNC_BUNLOCK(sc); 2029 } 2030 2031 static void 2032 pfsync_bulk_update(void *arg) 2033 { 2034 struct pfsync_softc *sc = arg; 2035 struct pf_state *s; 2036 int i, sent = 0; 2037 2038 PFSYNC_BLOCK_ASSERT(sc); 2039 CURVNET_SET(sc->sc_ifp->if_vnet); 2040 2041 /* 2042 * Start with last state from previous invocation. 2043 * It may had gone, in this case start from the 2044 * hash slot. 2045 */ 2046 s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid); 2047 2048 if (s != NULL) 2049 i = PF_IDHASH(s); 2050 else 2051 i = sc->sc_bulk_hashid; 2052 2053 for (; i <= V_pf_hashmask; i++) { 2054 struct pf_idhash *ih = &V_pf_idhash[i]; 2055 2056 if (s != NULL) 2057 PF_HASHROW_ASSERT(ih); 2058 else { 2059 PF_HASHROW_LOCK(ih); 2060 s = LIST_FIRST(&ih->states); 2061 } 2062 2063 for (; s; s = LIST_NEXT(s, entry)) { 2064 2065 if (sent > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) < 2066 sizeof(struct pfsync_state)) { 2067 /* We've filled a packet. */ 2068 sc->sc_bulk_hashid = i; 2069 sc->sc_bulk_stateid = s->id; 2070 sc->sc_bulk_creatorid = s->creatorid; 2071 PF_HASHROW_UNLOCK(ih); 2072 callout_reset(&sc->sc_bulk_tmo, 1, 2073 pfsync_bulk_update, sc); 2074 goto full; 2075 } 2076 2077 if (s->sync_state == PFSYNC_S_NONE && 2078 s->timeout < PFTM_MAX && 2079 s->pfsync_time <= sc->sc_ureq_received) { 2080 pfsync_update_state_req(s); 2081 sent++; 2082 } 2083 } 2084 PF_HASHROW_UNLOCK(ih); 2085 } 2086 2087 /* We're done. */ 2088 pfsync_bulk_status(PFSYNC_BUS_END); 2089 2090 full: 2091 CURVNET_RESTORE(); 2092 } 2093 2094 static void 2095 pfsync_bulk_status(u_int8_t status) 2096 { 2097 struct { 2098 struct pfsync_subheader subh; 2099 struct pfsync_bus bus; 2100 } __packed r; 2101 2102 struct pfsync_softc *sc = V_pfsyncif; 2103 2104 bzero(&r, sizeof(r)); 2105 2106 r.subh.action = PFSYNC_ACT_BUS; 2107 r.subh.count = htons(1); 2108 V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++; 2109 2110 r.bus.creatorid = V_pf_status.hostid; 2111 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 2112 r.bus.status = status; 2113 2114 PFSYNC_LOCK(sc); 2115 pfsync_send_plus(&r, sizeof(r)); 2116 PFSYNC_UNLOCK(sc); 2117 } 2118 2119 static void 2120 pfsync_bulk_fail(void *arg) 2121 { 2122 struct pfsync_softc *sc = arg; 2123 2124 CURVNET_SET(sc->sc_ifp->if_vnet); 2125 2126 PFSYNC_BLOCK_ASSERT(sc); 2127 2128 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 2129 /* Try again */ 2130 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2131 pfsync_bulk_fail, V_pfsyncif); 2132 PFSYNC_LOCK(sc); 2133 pfsync_request_update(0, 0); 2134 PFSYNC_UNLOCK(sc); 2135 } else { 2136 /* Pretend like the transfer was ok. */ 2137 sc->sc_ureq_sent = 0; 2138 sc->sc_bulk_tries = 0; 2139 PFSYNC_LOCK(sc); 2140 if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p) 2141 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 2142 "pfsync bulk fail"); 2143 sc->sc_flags |= PFSYNCF_OK; 2144 PFSYNC_UNLOCK(sc); 2145 if (V_pf_status.debug >= PF_DEBUG_MISC) 2146 printf("pfsync: failed to receive bulk update\n"); 2147 } 2148 2149 CURVNET_RESTORE(); 2150 } 2151 2152 static void 2153 pfsync_send_plus(void *plus, size_t pluslen) 2154 { 2155 struct pfsync_softc *sc = V_pfsyncif; 2156 2157 PFSYNC_LOCK_ASSERT(sc); 2158 2159 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) 2160 pfsync_sendout(1); 2161 2162 sc->sc_plus = plus; 2163 sc->sc_len += (sc->sc_pluslen = pluslen); 2164 2165 pfsync_sendout(1); 2166 } 2167 2168 static void 2169 pfsync_timeout(void *arg) 2170 { 2171 struct pfsync_softc *sc = arg; 2172 2173 CURVNET_SET(sc->sc_ifp->if_vnet); 2174 PFSYNC_LOCK(sc); 2175 pfsync_push(sc); 2176 PFSYNC_UNLOCK(sc); 2177 CURVNET_RESTORE(); 2178 } 2179 2180 static void 2181 pfsync_push(struct pfsync_softc *sc) 2182 { 2183 2184 PFSYNC_LOCK_ASSERT(sc); 2185 2186 sc->sc_flags |= PFSYNCF_PUSH; 2187 swi_sched(V_pfsync_swi_cookie, 0); 2188 } 2189 2190 static void 2191 pfsyncintr(void *arg) 2192 { 2193 struct pfsync_softc *sc = arg; 2194 struct mbuf *m, *n; 2195 2196 CURVNET_SET(sc->sc_ifp->if_vnet); 2197 2198 PFSYNC_LOCK(sc); 2199 if ((sc->sc_flags & PFSYNCF_PUSH) && sc->sc_len > PFSYNC_MINPKT) { 2200 pfsync_sendout(0); 2201 sc->sc_flags &= ~PFSYNCF_PUSH; 2202 } 2203 _IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m); 2204 PFSYNC_UNLOCK(sc); 2205 2206 for (; m != NULL; m = n) { 2207 2208 n = m->m_nextpkt; 2209 m->m_nextpkt = NULL; 2210 2211 /* 2212 * We distinguish between a deferral packet and our 2213 * own pfsync packet based on M_SKIP_FIREWALL 2214 * flag. This is XXX. 2215 */ 2216 if (m->m_flags & M_SKIP_FIREWALL) 2217 ip_output(m, NULL, NULL, 0, NULL, NULL); 2218 else if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, 2219 NULL) == 0) 2220 V_pfsyncstats.pfsyncs_opackets++; 2221 else 2222 V_pfsyncstats.pfsyncs_oerrors++; 2223 } 2224 CURVNET_RESTORE(); 2225 } 2226 2227 static int 2228 pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship) 2229 { 2230 struct ip_moptions *imo = &sc->sc_imo; 2231 int error; 2232 2233 if (!(ifp->if_flags & IFF_MULTICAST)) 2234 return (EADDRNOTAVAIL); 2235 2236 imo->imo_membership = (struct in_multi **)mship; 2237 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS; 2238 imo->imo_multicast_vif = -1; 2239 2240 if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL, 2241 &imo->imo_membership[0])) != 0) { 2242 imo->imo_membership = NULL; 2243 return (error); 2244 } 2245 imo->imo_num_memberships++; 2246 imo->imo_multicast_ifp = ifp; 2247 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 2248 imo->imo_multicast_loop = 0; 2249 2250 return (0); 2251 } 2252 2253 static void 2254 pfsync_multicast_cleanup(struct pfsync_softc *sc) 2255 { 2256 struct ip_moptions *imo = &sc->sc_imo; 2257 2258 in_leavegroup(imo->imo_membership[0], NULL); 2259 free(imo->imo_membership, M_PFSYNC); 2260 imo->imo_membership = NULL; 2261 imo->imo_multicast_ifp = NULL; 2262 } 2263 2264 #ifdef INET 2265 extern struct domain inetdomain; 2266 static struct protosw in_pfsync_protosw = { 2267 .pr_type = SOCK_RAW, 2268 .pr_domain = &inetdomain, 2269 .pr_protocol = IPPROTO_PFSYNC, 2270 .pr_flags = PR_ATOMIC|PR_ADDR, 2271 .pr_input = pfsync_input, 2272 .pr_output = (pr_output_t *)rip_output, 2273 .pr_ctloutput = rip_ctloutput, 2274 .pr_usrreqs = &rip_usrreqs 2275 }; 2276 #endif 2277 2278 static void 2279 pfsync_pointers_init() 2280 { 2281 2282 PF_RULES_WLOCK(); 2283 pfsync_state_import_ptr = pfsync_state_import; 2284 pfsync_insert_state_ptr = pfsync_insert_state; 2285 pfsync_update_state_ptr = pfsync_update_state; 2286 pfsync_delete_state_ptr = pfsync_delete_state; 2287 pfsync_clear_states_ptr = pfsync_clear_states; 2288 pfsync_defer_ptr = pfsync_defer; 2289 PF_RULES_WUNLOCK(); 2290 } 2291 2292 static void 2293 pfsync_pointers_uninit() 2294 { 2295 2296 PF_RULES_WLOCK(); 2297 pfsync_state_import_ptr = NULL; 2298 pfsync_insert_state_ptr = NULL; 2299 pfsync_update_state_ptr = NULL; 2300 pfsync_delete_state_ptr = NULL; 2301 pfsync_clear_states_ptr = NULL; 2302 pfsync_defer_ptr = NULL; 2303 PF_RULES_WUNLOCK(); 2304 } 2305 2306 static int 2307 pfsync_init() 2308 { 2309 VNET_ITERATOR_DECL(vnet_iter); 2310 int error = 0; 2311 2312 VNET_LIST_RLOCK(); 2313 VNET_FOREACH(vnet_iter) { 2314 CURVNET_SET(vnet_iter); 2315 V_pfsync_cloner = if_clone_simple(pfsyncname, 2316 pfsync_clone_create, pfsync_clone_destroy, 1); 2317 error = swi_add(NULL, pfsyncname, pfsyncintr, V_pfsyncif, 2318 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); 2319 CURVNET_RESTORE(); 2320 if (error) 2321 goto fail_locked; 2322 } 2323 VNET_LIST_RUNLOCK(); 2324 #ifdef INET 2325 error = pf_proto_register(PF_INET, &in_pfsync_protosw); 2326 if (error) 2327 goto fail; 2328 error = ipproto_register(IPPROTO_PFSYNC); 2329 if (error) { 2330 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW); 2331 goto fail; 2332 } 2333 #endif 2334 pfsync_pointers_init(); 2335 2336 return (0); 2337 2338 fail: 2339 VNET_LIST_RLOCK(); 2340 fail_locked: 2341 VNET_FOREACH(vnet_iter) { 2342 CURVNET_SET(vnet_iter); 2343 if (V_pfsync_swi_cookie) { 2344 swi_remove(V_pfsync_swi_cookie); 2345 if_clone_detach(V_pfsync_cloner); 2346 } 2347 CURVNET_RESTORE(); 2348 } 2349 VNET_LIST_RUNLOCK(); 2350 2351 return (error); 2352 } 2353 2354 static void 2355 pfsync_uninit() 2356 { 2357 VNET_ITERATOR_DECL(vnet_iter); 2358 2359 pfsync_pointers_uninit(); 2360 2361 ipproto_unregister(IPPROTO_PFSYNC); 2362 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW); 2363 VNET_LIST_RLOCK(); 2364 VNET_FOREACH(vnet_iter) { 2365 CURVNET_SET(vnet_iter); 2366 if_clone_detach(V_pfsync_cloner); 2367 swi_remove(V_pfsync_swi_cookie); 2368 CURVNET_RESTORE(); 2369 } 2370 VNET_LIST_RUNLOCK(); 2371 } 2372 2373 static int 2374 pfsync_modevent(module_t mod, int type, void *data) 2375 { 2376 int error = 0; 2377 2378 switch (type) { 2379 case MOD_LOAD: 2380 error = pfsync_init(); 2381 break; 2382 case MOD_QUIESCE: 2383 /* 2384 * Module should not be unloaded due to race conditions. 2385 */ 2386 error = EBUSY; 2387 break; 2388 case MOD_UNLOAD: 2389 pfsync_uninit(); 2390 break; 2391 default: 2392 error = EINVAL; 2393 break; 2394 } 2395 2396 return (error); 2397 } 2398 2399 static moduledata_t pfsync_mod = { 2400 pfsyncname, 2401 pfsync_modevent, 2402 0 2403 }; 2404 2405 #define PFSYNC_MODVER 1 2406 2407 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 2408 MODULE_VERSION(pfsync, PFSYNC_MODVER); 2409 MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 2410