1*744bfb21SJohn Baldwin /* SPDX-License-Identifier: ISC 2*744bfb21SJohn Baldwin * 3*744bfb21SJohn Baldwin * Copyright (C) 2015-2021 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4*744bfb21SJohn Baldwin * Copyright (C) 2019-2021 Matt Dunwoodie <ncon@noconroy.net> 5*744bfb21SJohn Baldwin * Copyright (c) 2019-2020 Rubicon Communications, LLC (Netgate) 6*744bfb21SJohn Baldwin * Copyright (c) 2021 Kyle Evans <kevans@FreeBSD.org> 7*744bfb21SJohn Baldwin * Copyright (c) 2022 The FreeBSD Foundation 8*744bfb21SJohn Baldwin */ 9*744bfb21SJohn Baldwin 10*744bfb21SJohn Baldwin #include "opt_inet.h" 11*744bfb21SJohn Baldwin #include "opt_inet6.h" 12*744bfb21SJohn Baldwin 13*744bfb21SJohn Baldwin #include <sys/param.h> 14*744bfb21SJohn Baldwin #include <sys/systm.h> 15*744bfb21SJohn Baldwin #include <sys/counter.h> 16*744bfb21SJohn Baldwin #include <sys/gtaskqueue.h> 17*744bfb21SJohn Baldwin #include <sys/jail.h> 18*744bfb21SJohn Baldwin #include <sys/kernel.h> 19*744bfb21SJohn Baldwin #include <sys/lock.h> 20*744bfb21SJohn Baldwin #include <sys/mbuf.h> 21*744bfb21SJohn Baldwin #include <sys/module.h> 22*744bfb21SJohn Baldwin #include <sys/nv.h> 23*744bfb21SJohn Baldwin #include <sys/priv.h> 24*744bfb21SJohn Baldwin #include <sys/protosw.h> 25*744bfb21SJohn Baldwin #include <sys/rmlock.h> 26*744bfb21SJohn Baldwin #include <sys/rwlock.h> 27*744bfb21SJohn Baldwin #include <sys/smp.h> 28*744bfb21SJohn Baldwin #include <sys/socket.h> 29*744bfb21SJohn Baldwin #include <sys/socketvar.h> 30*744bfb21SJohn Baldwin #include <sys/sockio.h> 31*744bfb21SJohn Baldwin #include <sys/sysctl.h> 32*744bfb21SJohn Baldwin #include <sys/sx.h> 33*744bfb21SJohn Baldwin #include <machine/_inttypes.h> 34*744bfb21SJohn Baldwin #include <net/bpf.h> 35*744bfb21SJohn Baldwin #include <net/ethernet.h> 36*744bfb21SJohn Baldwin #include <net/if.h> 37*744bfb21SJohn Baldwin #include <net/if_clone.h> 38*744bfb21SJohn Baldwin #include <net/if_types.h> 39*744bfb21SJohn Baldwin #include <net/if_var.h> 40*744bfb21SJohn Baldwin #include <net/netisr.h> 41*744bfb21SJohn Baldwin #include <net/radix.h> 42*744bfb21SJohn Baldwin #include <netinet/in.h> 43*744bfb21SJohn Baldwin #include <netinet6/in6_var.h> 44*744bfb21SJohn Baldwin #include <netinet/ip.h> 45*744bfb21SJohn Baldwin #include <netinet/ip6.h> 46*744bfb21SJohn Baldwin #include <netinet/ip_icmp.h> 47*744bfb21SJohn Baldwin #include <netinet/icmp6.h> 48*744bfb21SJohn Baldwin #include <netinet/udp_var.h> 49*744bfb21SJohn Baldwin #include <netinet6/nd6.h> 50*744bfb21SJohn Baldwin 51*744bfb21SJohn Baldwin #include "support.h" 52*744bfb21SJohn Baldwin #include "wg_noise.h" 53*744bfb21SJohn Baldwin #include "wg_cookie.h" 54*744bfb21SJohn Baldwin #include "version.h" 55*744bfb21SJohn Baldwin #include "if_wg.h" 56*744bfb21SJohn Baldwin 57*744bfb21SJohn Baldwin #define DEFAULT_MTU (ETHERMTU - 80) 58*744bfb21SJohn Baldwin #define MAX_MTU (IF_MAXMTU - 80) 59*744bfb21SJohn Baldwin 60*744bfb21SJohn Baldwin #define MAX_STAGED_PKT 128 61*744bfb21SJohn Baldwin #define MAX_QUEUED_PKT 1024 62*744bfb21SJohn Baldwin #define MAX_QUEUED_PKT_MASK (MAX_QUEUED_PKT - 1) 63*744bfb21SJohn Baldwin 64*744bfb21SJohn Baldwin #define MAX_QUEUED_HANDSHAKES 4096 65*744bfb21SJohn Baldwin 66*744bfb21SJohn Baldwin #define REKEY_TIMEOUT_JITTER 334 /* 1/3 sec, round for arc4random_uniform */ 67*744bfb21SJohn Baldwin #define MAX_TIMER_HANDSHAKES (90 / REKEY_TIMEOUT) 68*744bfb21SJohn Baldwin #define NEW_HANDSHAKE_TIMEOUT (REKEY_TIMEOUT + KEEPALIVE_TIMEOUT) 69*744bfb21SJohn Baldwin #define UNDERLOAD_TIMEOUT 1 70*744bfb21SJohn Baldwin 71*744bfb21SJohn Baldwin #define DPRINTF(sc, ...) if (sc->sc_ifp->if_flags & IFF_DEBUG) if_printf(sc->sc_ifp, ##__VA_ARGS__) 72*744bfb21SJohn Baldwin 73*744bfb21SJohn Baldwin /* First byte indicating packet type on the wire */ 74*744bfb21SJohn Baldwin #define WG_PKT_INITIATION htole32(1) 75*744bfb21SJohn Baldwin #define WG_PKT_RESPONSE htole32(2) 76*744bfb21SJohn Baldwin #define WG_PKT_COOKIE htole32(3) 77*744bfb21SJohn Baldwin #define WG_PKT_DATA htole32(4) 78*744bfb21SJohn Baldwin 79*744bfb21SJohn Baldwin #define WG_PKT_PADDING 16 80*744bfb21SJohn Baldwin #define WG_KEY_SIZE 32 81*744bfb21SJohn Baldwin 82*744bfb21SJohn Baldwin struct wg_pkt_initiation { 83*744bfb21SJohn Baldwin uint32_t t; 84*744bfb21SJohn Baldwin uint32_t s_idx; 85*744bfb21SJohn Baldwin uint8_t ue[NOISE_PUBLIC_KEY_LEN]; 86*744bfb21SJohn Baldwin uint8_t es[NOISE_PUBLIC_KEY_LEN + NOISE_AUTHTAG_LEN]; 87*744bfb21SJohn Baldwin uint8_t ets[NOISE_TIMESTAMP_LEN + NOISE_AUTHTAG_LEN]; 88*744bfb21SJohn Baldwin struct cookie_macs m; 89*744bfb21SJohn Baldwin }; 90*744bfb21SJohn Baldwin 91*744bfb21SJohn Baldwin struct wg_pkt_response { 92*744bfb21SJohn Baldwin uint32_t t; 93*744bfb21SJohn Baldwin uint32_t s_idx; 94*744bfb21SJohn Baldwin uint32_t r_idx; 95*744bfb21SJohn Baldwin uint8_t ue[NOISE_PUBLIC_KEY_LEN]; 96*744bfb21SJohn Baldwin uint8_t en[0 + NOISE_AUTHTAG_LEN]; 97*744bfb21SJohn Baldwin struct cookie_macs m; 98*744bfb21SJohn Baldwin }; 99*744bfb21SJohn Baldwin 100*744bfb21SJohn Baldwin struct wg_pkt_cookie { 101*744bfb21SJohn Baldwin uint32_t t; 102*744bfb21SJohn Baldwin uint32_t r_idx; 103*744bfb21SJohn Baldwin uint8_t nonce[COOKIE_NONCE_SIZE]; 104*744bfb21SJohn Baldwin uint8_t ec[COOKIE_ENCRYPTED_SIZE]; 105*744bfb21SJohn Baldwin }; 106*744bfb21SJohn Baldwin 107*744bfb21SJohn Baldwin struct wg_pkt_data { 108*744bfb21SJohn Baldwin uint32_t t; 109*744bfb21SJohn Baldwin uint32_t r_idx; 110*744bfb21SJohn Baldwin uint64_t nonce; 111*744bfb21SJohn Baldwin uint8_t buf[]; 112*744bfb21SJohn Baldwin }; 113*744bfb21SJohn Baldwin 114*744bfb21SJohn Baldwin struct wg_endpoint { 115*744bfb21SJohn Baldwin union { 116*744bfb21SJohn Baldwin struct sockaddr r_sa; 117*744bfb21SJohn Baldwin struct sockaddr_in r_sin; 118*744bfb21SJohn Baldwin #ifdef INET6 119*744bfb21SJohn Baldwin struct sockaddr_in6 r_sin6; 120*744bfb21SJohn Baldwin #endif 121*744bfb21SJohn Baldwin } e_remote; 122*744bfb21SJohn Baldwin union { 123*744bfb21SJohn Baldwin struct in_addr l_in; 124*744bfb21SJohn Baldwin #ifdef INET6 125*744bfb21SJohn Baldwin struct in6_pktinfo l_pktinfo6; 126*744bfb21SJohn Baldwin #define l_in6 l_pktinfo6.ipi6_addr 127*744bfb21SJohn Baldwin #endif 128*744bfb21SJohn Baldwin } e_local; 129*744bfb21SJohn Baldwin }; 130*744bfb21SJohn Baldwin 131*744bfb21SJohn Baldwin struct aip_addr { 132*744bfb21SJohn Baldwin uint8_t length; 133*744bfb21SJohn Baldwin union { 134*744bfb21SJohn Baldwin uint8_t bytes[16]; 135*744bfb21SJohn Baldwin uint32_t ip; 136*744bfb21SJohn Baldwin uint32_t ip6[4]; 137*744bfb21SJohn Baldwin struct in_addr in; 138*744bfb21SJohn Baldwin struct in6_addr in6; 139*744bfb21SJohn Baldwin }; 140*744bfb21SJohn Baldwin }; 141*744bfb21SJohn Baldwin 142*744bfb21SJohn Baldwin struct wg_aip { 143*744bfb21SJohn Baldwin struct radix_node a_nodes[2]; 144*744bfb21SJohn Baldwin LIST_ENTRY(wg_aip) a_entry; 145*744bfb21SJohn Baldwin struct aip_addr a_addr; 146*744bfb21SJohn Baldwin struct aip_addr a_mask; 147*744bfb21SJohn Baldwin struct wg_peer *a_peer; 148*744bfb21SJohn Baldwin sa_family_t a_af; 149*744bfb21SJohn Baldwin }; 150*744bfb21SJohn Baldwin 151*744bfb21SJohn Baldwin struct wg_packet { 152*744bfb21SJohn Baldwin STAILQ_ENTRY(wg_packet) p_serial; 153*744bfb21SJohn Baldwin STAILQ_ENTRY(wg_packet) p_parallel; 154*744bfb21SJohn Baldwin struct wg_endpoint p_endpoint; 155*744bfb21SJohn Baldwin struct noise_keypair *p_keypair; 156*744bfb21SJohn Baldwin uint64_t p_nonce; 157*744bfb21SJohn Baldwin struct mbuf *p_mbuf; 158*744bfb21SJohn Baldwin int p_mtu; 159*744bfb21SJohn Baldwin sa_family_t p_af; 160*744bfb21SJohn Baldwin enum wg_ring_state { 161*744bfb21SJohn Baldwin WG_PACKET_UNCRYPTED, 162*744bfb21SJohn Baldwin WG_PACKET_CRYPTED, 163*744bfb21SJohn Baldwin WG_PACKET_DEAD, 164*744bfb21SJohn Baldwin } p_state; 165*744bfb21SJohn Baldwin }; 166*744bfb21SJohn Baldwin 167*744bfb21SJohn Baldwin STAILQ_HEAD(wg_packet_list, wg_packet); 168*744bfb21SJohn Baldwin 169*744bfb21SJohn Baldwin struct wg_queue { 170*744bfb21SJohn Baldwin struct mtx q_mtx; 171*744bfb21SJohn Baldwin struct wg_packet_list q_queue; 172*744bfb21SJohn Baldwin size_t q_len; 173*744bfb21SJohn Baldwin }; 174*744bfb21SJohn Baldwin 175*744bfb21SJohn Baldwin struct wg_peer { 176*744bfb21SJohn Baldwin TAILQ_ENTRY(wg_peer) p_entry; 177*744bfb21SJohn Baldwin uint64_t p_id; 178*744bfb21SJohn Baldwin struct wg_softc *p_sc; 179*744bfb21SJohn Baldwin 180*744bfb21SJohn Baldwin struct noise_remote *p_remote; 181*744bfb21SJohn Baldwin struct cookie_maker p_cookie; 182*744bfb21SJohn Baldwin 183*744bfb21SJohn Baldwin struct rwlock p_endpoint_lock; 184*744bfb21SJohn Baldwin struct wg_endpoint p_endpoint; 185*744bfb21SJohn Baldwin 186*744bfb21SJohn Baldwin struct wg_queue p_stage_queue; 187*744bfb21SJohn Baldwin struct wg_queue p_encrypt_serial; 188*744bfb21SJohn Baldwin struct wg_queue p_decrypt_serial; 189*744bfb21SJohn Baldwin 190*744bfb21SJohn Baldwin bool p_enabled; 191*744bfb21SJohn Baldwin bool p_need_another_keepalive; 192*744bfb21SJohn Baldwin uint16_t p_persistent_keepalive_interval; 193*744bfb21SJohn Baldwin struct callout p_new_handshake; 194*744bfb21SJohn Baldwin struct callout p_send_keepalive; 195*744bfb21SJohn Baldwin struct callout p_retry_handshake; 196*744bfb21SJohn Baldwin struct callout p_zero_key_material; 197*744bfb21SJohn Baldwin struct callout p_persistent_keepalive; 198*744bfb21SJohn Baldwin 199*744bfb21SJohn Baldwin struct mtx p_handshake_mtx; 200*744bfb21SJohn Baldwin struct timespec p_handshake_complete; /* nanotime */ 201*744bfb21SJohn Baldwin int p_handshake_retries; 202*744bfb21SJohn Baldwin 203*744bfb21SJohn Baldwin struct grouptask p_send; 204*744bfb21SJohn Baldwin struct grouptask p_recv; 205*744bfb21SJohn Baldwin 206*744bfb21SJohn Baldwin counter_u64_t p_tx_bytes; 207*744bfb21SJohn Baldwin counter_u64_t p_rx_bytes; 208*744bfb21SJohn Baldwin 209*744bfb21SJohn Baldwin LIST_HEAD(, wg_aip) p_aips; 210*744bfb21SJohn Baldwin size_t p_aips_num; 211*744bfb21SJohn Baldwin }; 212*744bfb21SJohn Baldwin 213*744bfb21SJohn Baldwin struct wg_socket { 214*744bfb21SJohn Baldwin struct socket *so_so4; 215*744bfb21SJohn Baldwin struct socket *so_so6; 216*744bfb21SJohn Baldwin uint32_t so_user_cookie; 217*744bfb21SJohn Baldwin int so_fibnum; 218*744bfb21SJohn Baldwin in_port_t so_port; 219*744bfb21SJohn Baldwin }; 220*744bfb21SJohn Baldwin 221*744bfb21SJohn Baldwin struct wg_softc { 222*744bfb21SJohn Baldwin LIST_ENTRY(wg_softc) sc_entry; 223*744bfb21SJohn Baldwin struct ifnet *sc_ifp; 224*744bfb21SJohn Baldwin int sc_flags; 225*744bfb21SJohn Baldwin 226*744bfb21SJohn Baldwin struct ucred *sc_ucred; 227*744bfb21SJohn Baldwin struct wg_socket sc_socket; 228*744bfb21SJohn Baldwin 229*744bfb21SJohn Baldwin TAILQ_HEAD(,wg_peer) sc_peers; 230*744bfb21SJohn Baldwin size_t sc_peers_num; 231*744bfb21SJohn Baldwin 232*744bfb21SJohn Baldwin struct noise_local *sc_local; 233*744bfb21SJohn Baldwin struct cookie_checker sc_cookie; 234*744bfb21SJohn Baldwin 235*744bfb21SJohn Baldwin struct radix_node_head *sc_aip4; 236*744bfb21SJohn Baldwin struct radix_node_head *sc_aip6; 237*744bfb21SJohn Baldwin 238*744bfb21SJohn Baldwin struct grouptask sc_handshake; 239*744bfb21SJohn Baldwin struct wg_queue sc_handshake_queue; 240*744bfb21SJohn Baldwin 241*744bfb21SJohn Baldwin struct grouptask *sc_encrypt; 242*744bfb21SJohn Baldwin struct grouptask *sc_decrypt; 243*744bfb21SJohn Baldwin struct wg_queue sc_encrypt_parallel; 244*744bfb21SJohn Baldwin struct wg_queue sc_decrypt_parallel; 245*744bfb21SJohn Baldwin u_int sc_encrypt_last_cpu; 246*744bfb21SJohn Baldwin u_int sc_decrypt_last_cpu; 247*744bfb21SJohn Baldwin 248*744bfb21SJohn Baldwin struct sx sc_lock; 249*744bfb21SJohn Baldwin }; 250*744bfb21SJohn Baldwin 251*744bfb21SJohn Baldwin #define WGF_DYING 0x0001 252*744bfb21SJohn Baldwin 253*744bfb21SJohn Baldwin #define MAX_LOOPS 8 254*744bfb21SJohn Baldwin #define MTAG_WGLOOP 0x77676c70 /* wglp */ 255*744bfb21SJohn Baldwin #ifndef ENOKEY 256*744bfb21SJohn Baldwin #define ENOKEY ENOTCAPABLE 257*744bfb21SJohn Baldwin #endif 258*744bfb21SJohn Baldwin 259*744bfb21SJohn Baldwin #define GROUPTASK_DRAIN(gtask) \ 260*744bfb21SJohn Baldwin gtaskqueue_drain((gtask)->gt_taskqueue, &(gtask)->gt_task) 261*744bfb21SJohn Baldwin 262*744bfb21SJohn Baldwin #define BPF_MTAP2_AF(ifp, m, af) do { \ 263*744bfb21SJohn Baldwin uint32_t __bpf_tap_af = (af); \ 264*744bfb21SJohn Baldwin BPF_MTAP2(ifp, &__bpf_tap_af, sizeof(__bpf_tap_af), m); \ 265*744bfb21SJohn Baldwin } while (0) 266*744bfb21SJohn Baldwin 267*744bfb21SJohn Baldwin static int clone_count; 268*744bfb21SJohn Baldwin static uma_zone_t wg_packet_zone; 269*744bfb21SJohn Baldwin static volatile unsigned long peer_counter = 0; 270*744bfb21SJohn Baldwin static const char wgname[] = "wg"; 271*744bfb21SJohn Baldwin static unsigned wg_osd_jail_slot; 272*744bfb21SJohn Baldwin 273*744bfb21SJohn Baldwin static struct sx wg_sx; 274*744bfb21SJohn Baldwin SX_SYSINIT(wg_sx, &wg_sx, "wg_sx"); 275*744bfb21SJohn Baldwin 276*744bfb21SJohn Baldwin static LIST_HEAD(, wg_softc) wg_list = LIST_HEAD_INITIALIZER(wg_list); 277*744bfb21SJohn Baldwin 278*744bfb21SJohn Baldwin static TASKQGROUP_DEFINE(wg_tqg, mp_ncpus, 1); 279*744bfb21SJohn Baldwin 280*744bfb21SJohn Baldwin MALLOC_DEFINE(M_WG, "WG", "wireguard"); 281*744bfb21SJohn Baldwin 282*744bfb21SJohn Baldwin VNET_DEFINE_STATIC(struct if_clone *, wg_cloner); 283*744bfb21SJohn Baldwin 284*744bfb21SJohn Baldwin #define V_wg_cloner VNET(wg_cloner) 285*744bfb21SJohn Baldwin #define WG_CAPS IFCAP_LINKSTATE 286*744bfb21SJohn Baldwin 287*744bfb21SJohn Baldwin struct wg_timespec64 { 288*744bfb21SJohn Baldwin uint64_t tv_sec; 289*744bfb21SJohn Baldwin uint64_t tv_nsec; 290*744bfb21SJohn Baldwin }; 291*744bfb21SJohn Baldwin 292*744bfb21SJohn Baldwin static int wg_socket_init(struct wg_softc *, in_port_t); 293*744bfb21SJohn Baldwin static int wg_socket_bind(struct socket **, struct socket **, in_port_t *); 294*744bfb21SJohn Baldwin static void wg_socket_set(struct wg_softc *, struct socket *, struct socket *); 295*744bfb21SJohn Baldwin static void wg_socket_uninit(struct wg_softc *); 296*744bfb21SJohn Baldwin static int wg_socket_set_sockopt(struct socket *, struct socket *, int, void *, size_t); 297*744bfb21SJohn Baldwin static int wg_socket_set_cookie(struct wg_softc *, uint32_t); 298*744bfb21SJohn Baldwin static int wg_socket_set_fibnum(struct wg_softc *, int); 299*744bfb21SJohn Baldwin static int wg_send(struct wg_softc *, struct wg_endpoint *, struct mbuf *); 300*744bfb21SJohn Baldwin static void wg_timers_enable(struct wg_peer *); 301*744bfb21SJohn Baldwin static void wg_timers_disable(struct wg_peer *); 302*744bfb21SJohn Baldwin static void wg_timers_set_persistent_keepalive(struct wg_peer *, uint16_t); 303*744bfb21SJohn Baldwin static void wg_timers_get_last_handshake(struct wg_peer *, struct wg_timespec64 *); 304*744bfb21SJohn Baldwin static void wg_timers_event_data_sent(struct wg_peer *); 305*744bfb21SJohn Baldwin static void wg_timers_event_data_received(struct wg_peer *); 306*744bfb21SJohn Baldwin static void wg_timers_event_any_authenticated_packet_sent(struct wg_peer *); 307*744bfb21SJohn Baldwin static void wg_timers_event_any_authenticated_packet_received(struct wg_peer *); 308*744bfb21SJohn Baldwin static void wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *); 309*744bfb21SJohn Baldwin static void wg_timers_event_handshake_initiated(struct wg_peer *); 310*744bfb21SJohn Baldwin static void wg_timers_event_handshake_complete(struct wg_peer *); 311*744bfb21SJohn Baldwin static void wg_timers_event_session_derived(struct wg_peer *); 312*744bfb21SJohn Baldwin static void wg_timers_event_want_initiation(struct wg_peer *); 313*744bfb21SJohn Baldwin static void wg_timers_run_send_initiation(struct wg_peer *, bool); 314*744bfb21SJohn Baldwin static void wg_timers_run_retry_handshake(void *); 315*744bfb21SJohn Baldwin static void wg_timers_run_send_keepalive(void *); 316*744bfb21SJohn Baldwin static void wg_timers_run_new_handshake(void *); 317*744bfb21SJohn Baldwin static void wg_timers_run_zero_key_material(void *); 318*744bfb21SJohn Baldwin static void wg_timers_run_persistent_keepalive(void *); 319*744bfb21SJohn Baldwin static int wg_aip_add(struct wg_softc *, struct wg_peer *, sa_family_t, const void *, uint8_t); 320*744bfb21SJohn Baldwin static struct wg_peer *wg_aip_lookup(struct wg_softc *, sa_family_t, void *); 321*744bfb21SJohn Baldwin static void wg_aip_remove_all(struct wg_softc *, struct wg_peer *); 322*744bfb21SJohn Baldwin static struct wg_peer *wg_peer_alloc(struct wg_softc *, const uint8_t [WG_KEY_SIZE]); 323*744bfb21SJohn Baldwin static void wg_peer_free_deferred(struct noise_remote *); 324*744bfb21SJohn Baldwin static void wg_peer_destroy(struct wg_peer *); 325*744bfb21SJohn Baldwin static void wg_peer_destroy_all(struct wg_softc *); 326*744bfb21SJohn Baldwin static void wg_peer_send_buf(struct wg_peer *, uint8_t *, size_t); 327*744bfb21SJohn Baldwin static void wg_send_initiation(struct wg_peer *); 328*744bfb21SJohn Baldwin static void wg_send_response(struct wg_peer *); 329*744bfb21SJohn Baldwin static void wg_send_cookie(struct wg_softc *, struct cookie_macs *, uint32_t, struct wg_endpoint *); 330*744bfb21SJohn Baldwin static void wg_peer_set_endpoint(struct wg_peer *, struct wg_endpoint *); 331*744bfb21SJohn Baldwin static void wg_peer_clear_src(struct wg_peer *); 332*744bfb21SJohn Baldwin static void wg_peer_get_endpoint(struct wg_peer *, struct wg_endpoint *); 333*744bfb21SJohn Baldwin static void wg_send_buf(struct wg_softc *, struct wg_endpoint *, uint8_t *, size_t); 334*744bfb21SJohn Baldwin static void wg_send_keepalive(struct wg_peer *); 335*744bfb21SJohn Baldwin static void wg_handshake(struct wg_softc *, struct wg_packet *); 336*744bfb21SJohn Baldwin static void wg_encrypt(struct wg_softc *, struct wg_packet *); 337*744bfb21SJohn Baldwin static void wg_decrypt(struct wg_softc *, struct wg_packet *); 338*744bfb21SJohn Baldwin static void wg_softc_handshake_receive(struct wg_softc *); 339*744bfb21SJohn Baldwin static void wg_softc_decrypt(struct wg_softc *); 340*744bfb21SJohn Baldwin static void wg_softc_encrypt(struct wg_softc *); 341*744bfb21SJohn Baldwin static void wg_encrypt_dispatch(struct wg_softc *); 342*744bfb21SJohn Baldwin static void wg_decrypt_dispatch(struct wg_softc *); 343*744bfb21SJohn Baldwin static void wg_deliver_out(struct wg_peer *); 344*744bfb21SJohn Baldwin static void wg_deliver_in(struct wg_peer *); 345*744bfb21SJohn Baldwin static struct wg_packet *wg_packet_alloc(struct mbuf *); 346*744bfb21SJohn Baldwin static void wg_packet_free(struct wg_packet *); 347*744bfb21SJohn Baldwin static void wg_queue_init(struct wg_queue *, const char *); 348*744bfb21SJohn Baldwin static void wg_queue_deinit(struct wg_queue *); 349*744bfb21SJohn Baldwin static size_t wg_queue_len(struct wg_queue *); 350*744bfb21SJohn Baldwin static int wg_queue_enqueue_handshake(struct wg_queue *, struct wg_packet *); 351*744bfb21SJohn Baldwin static struct wg_packet *wg_queue_dequeue_handshake(struct wg_queue *); 352*744bfb21SJohn Baldwin static void wg_queue_push_staged(struct wg_queue *, struct wg_packet *); 353*744bfb21SJohn Baldwin static void wg_queue_enlist_staged(struct wg_queue *, struct wg_packet_list *); 354*744bfb21SJohn Baldwin static void wg_queue_delist_staged(struct wg_queue *, struct wg_packet_list *); 355*744bfb21SJohn Baldwin static void wg_queue_purge(struct wg_queue *); 356*744bfb21SJohn Baldwin static int wg_queue_both(struct wg_queue *, struct wg_queue *, struct wg_packet *); 357*744bfb21SJohn Baldwin static struct wg_packet *wg_queue_dequeue_serial(struct wg_queue *); 358*744bfb21SJohn Baldwin static struct wg_packet *wg_queue_dequeue_parallel(struct wg_queue *); 359*744bfb21SJohn Baldwin static bool wg_input(struct mbuf *, int, struct inpcb *, const struct sockaddr *, void *); 360*744bfb21SJohn Baldwin static void wg_peer_send_staged(struct wg_peer *); 361*744bfb21SJohn Baldwin static int wg_clone_create(struct if_clone *, int, caddr_t); 362*744bfb21SJohn Baldwin static void wg_qflush(struct ifnet *); 363*744bfb21SJohn Baldwin static inline int determine_af_and_pullup(struct mbuf **m, sa_family_t *af); 364*744bfb21SJohn Baldwin static int wg_xmit(struct ifnet *, struct mbuf *, sa_family_t, uint32_t); 365*744bfb21SJohn Baldwin static int wg_transmit(struct ifnet *, struct mbuf *); 366*744bfb21SJohn Baldwin static int wg_output(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); 367*744bfb21SJohn Baldwin static void wg_clone_destroy(struct ifnet *); 368*744bfb21SJohn Baldwin static bool wgc_privileged(struct wg_softc *); 369*744bfb21SJohn Baldwin static int wgc_get(struct wg_softc *, struct wg_data_io *); 370*744bfb21SJohn Baldwin static int wgc_set(struct wg_softc *, struct wg_data_io *); 371*744bfb21SJohn Baldwin static int wg_up(struct wg_softc *); 372*744bfb21SJohn Baldwin static void wg_down(struct wg_softc *); 373*744bfb21SJohn Baldwin static void wg_reassign(struct ifnet *, struct vnet *, char *unused); 374*744bfb21SJohn Baldwin static void wg_init(void *); 375*744bfb21SJohn Baldwin static int wg_ioctl(struct ifnet *, u_long, caddr_t); 376*744bfb21SJohn Baldwin static void vnet_wg_init(const void *); 377*744bfb21SJohn Baldwin static void vnet_wg_uninit(const void *); 378*744bfb21SJohn Baldwin static int wg_module_init(void); 379*744bfb21SJohn Baldwin static void wg_module_deinit(void); 380*744bfb21SJohn Baldwin 381*744bfb21SJohn Baldwin /* TODO Peer */ 382*744bfb21SJohn Baldwin static struct wg_peer * 383*744bfb21SJohn Baldwin wg_peer_alloc(struct wg_softc *sc, const uint8_t pub_key[WG_KEY_SIZE]) 384*744bfb21SJohn Baldwin { 385*744bfb21SJohn Baldwin struct wg_peer *peer; 386*744bfb21SJohn Baldwin 387*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 388*744bfb21SJohn Baldwin 389*744bfb21SJohn Baldwin peer = malloc(sizeof(*peer), M_WG, M_WAITOK | M_ZERO); 390*744bfb21SJohn Baldwin peer->p_remote = noise_remote_alloc(sc->sc_local, peer, pub_key); 391*744bfb21SJohn Baldwin peer->p_tx_bytes = counter_u64_alloc(M_WAITOK); 392*744bfb21SJohn Baldwin peer->p_rx_bytes = counter_u64_alloc(M_WAITOK); 393*744bfb21SJohn Baldwin peer->p_id = peer_counter++; 394*744bfb21SJohn Baldwin peer->p_sc = sc; 395*744bfb21SJohn Baldwin 396*744bfb21SJohn Baldwin cookie_maker_init(&peer->p_cookie, pub_key); 397*744bfb21SJohn Baldwin 398*744bfb21SJohn Baldwin rw_init(&peer->p_endpoint_lock, "wg_peer_endpoint"); 399*744bfb21SJohn Baldwin 400*744bfb21SJohn Baldwin wg_queue_init(&peer->p_stage_queue, "stageq"); 401*744bfb21SJohn Baldwin wg_queue_init(&peer->p_encrypt_serial, "txq"); 402*744bfb21SJohn Baldwin wg_queue_init(&peer->p_decrypt_serial, "rxq"); 403*744bfb21SJohn Baldwin 404*744bfb21SJohn Baldwin peer->p_enabled = false; 405*744bfb21SJohn Baldwin peer->p_need_another_keepalive = false; 406*744bfb21SJohn Baldwin peer->p_persistent_keepalive_interval = 0; 407*744bfb21SJohn Baldwin callout_init(&peer->p_new_handshake, true); 408*744bfb21SJohn Baldwin callout_init(&peer->p_send_keepalive, true); 409*744bfb21SJohn Baldwin callout_init(&peer->p_retry_handshake, true); 410*744bfb21SJohn Baldwin callout_init(&peer->p_persistent_keepalive, true); 411*744bfb21SJohn Baldwin callout_init(&peer->p_zero_key_material, true); 412*744bfb21SJohn Baldwin 413*744bfb21SJohn Baldwin mtx_init(&peer->p_handshake_mtx, "peer handshake", NULL, MTX_DEF); 414*744bfb21SJohn Baldwin bzero(&peer->p_handshake_complete, sizeof(peer->p_handshake_complete)); 415*744bfb21SJohn Baldwin peer->p_handshake_retries = 0; 416*744bfb21SJohn Baldwin 417*744bfb21SJohn Baldwin GROUPTASK_INIT(&peer->p_send, 0, (gtask_fn_t *)wg_deliver_out, peer); 418*744bfb21SJohn Baldwin taskqgroup_attach(qgroup_wg_tqg, &peer->p_send, peer, NULL, NULL, "wg send"); 419*744bfb21SJohn Baldwin GROUPTASK_INIT(&peer->p_recv, 0, (gtask_fn_t *)wg_deliver_in, peer); 420*744bfb21SJohn Baldwin taskqgroup_attach(qgroup_wg_tqg, &peer->p_recv, peer, NULL, NULL, "wg recv"); 421*744bfb21SJohn Baldwin 422*744bfb21SJohn Baldwin LIST_INIT(&peer->p_aips); 423*744bfb21SJohn Baldwin peer->p_aips_num = 0; 424*744bfb21SJohn Baldwin 425*744bfb21SJohn Baldwin return (peer); 426*744bfb21SJohn Baldwin } 427*744bfb21SJohn Baldwin 428*744bfb21SJohn Baldwin static void 429*744bfb21SJohn Baldwin wg_peer_free_deferred(struct noise_remote *r) 430*744bfb21SJohn Baldwin { 431*744bfb21SJohn Baldwin struct wg_peer *peer = noise_remote_arg(r); 432*744bfb21SJohn Baldwin 433*744bfb21SJohn Baldwin /* While there are no references remaining, we may still have 434*744bfb21SJohn Baldwin * p_{send,recv} executing (think empty queue, but wg_deliver_{in,out} 435*744bfb21SJohn Baldwin * needs to check the queue. We should wait for them and then free. */ 436*744bfb21SJohn Baldwin GROUPTASK_DRAIN(&peer->p_recv); 437*744bfb21SJohn Baldwin GROUPTASK_DRAIN(&peer->p_send); 438*744bfb21SJohn Baldwin taskqgroup_detach(qgroup_wg_tqg, &peer->p_recv); 439*744bfb21SJohn Baldwin taskqgroup_detach(qgroup_wg_tqg, &peer->p_send); 440*744bfb21SJohn Baldwin 441*744bfb21SJohn Baldwin wg_queue_deinit(&peer->p_decrypt_serial); 442*744bfb21SJohn Baldwin wg_queue_deinit(&peer->p_encrypt_serial); 443*744bfb21SJohn Baldwin wg_queue_deinit(&peer->p_stage_queue); 444*744bfb21SJohn Baldwin 445*744bfb21SJohn Baldwin counter_u64_free(peer->p_tx_bytes); 446*744bfb21SJohn Baldwin counter_u64_free(peer->p_rx_bytes); 447*744bfb21SJohn Baldwin rw_destroy(&peer->p_endpoint_lock); 448*744bfb21SJohn Baldwin mtx_destroy(&peer->p_handshake_mtx); 449*744bfb21SJohn Baldwin 450*744bfb21SJohn Baldwin cookie_maker_free(&peer->p_cookie); 451*744bfb21SJohn Baldwin 452*744bfb21SJohn Baldwin free(peer, M_WG); 453*744bfb21SJohn Baldwin } 454*744bfb21SJohn Baldwin 455*744bfb21SJohn Baldwin static void 456*744bfb21SJohn Baldwin wg_peer_destroy(struct wg_peer *peer) 457*744bfb21SJohn Baldwin { 458*744bfb21SJohn Baldwin struct wg_softc *sc = peer->p_sc; 459*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 460*744bfb21SJohn Baldwin 461*744bfb21SJohn Baldwin /* Disable remote and timers. This will prevent any new handshakes 462*744bfb21SJohn Baldwin * occuring. */ 463*744bfb21SJohn Baldwin noise_remote_disable(peer->p_remote); 464*744bfb21SJohn Baldwin wg_timers_disable(peer); 465*744bfb21SJohn Baldwin 466*744bfb21SJohn Baldwin /* Now we can remove all allowed IPs so no more packets will be routed 467*744bfb21SJohn Baldwin * to the peer. */ 468*744bfb21SJohn Baldwin wg_aip_remove_all(sc, peer); 469*744bfb21SJohn Baldwin 470*744bfb21SJohn Baldwin /* Remove peer from the interface, then free. Some references may still 471*744bfb21SJohn Baldwin * exist to p_remote, so noise_remote_free will wait until they're all 472*744bfb21SJohn Baldwin * put to call wg_peer_free_deferred. */ 473*744bfb21SJohn Baldwin sc->sc_peers_num--; 474*744bfb21SJohn Baldwin TAILQ_REMOVE(&sc->sc_peers, peer, p_entry); 475*744bfb21SJohn Baldwin DPRINTF(sc, "Peer %" PRIu64 " destroyed\n", peer->p_id); 476*744bfb21SJohn Baldwin noise_remote_free(peer->p_remote, wg_peer_free_deferred); 477*744bfb21SJohn Baldwin } 478*744bfb21SJohn Baldwin 479*744bfb21SJohn Baldwin static void 480*744bfb21SJohn Baldwin wg_peer_destroy_all(struct wg_softc *sc) 481*744bfb21SJohn Baldwin { 482*744bfb21SJohn Baldwin struct wg_peer *peer, *tpeer; 483*744bfb21SJohn Baldwin TAILQ_FOREACH_SAFE(peer, &sc->sc_peers, p_entry, tpeer) 484*744bfb21SJohn Baldwin wg_peer_destroy(peer); 485*744bfb21SJohn Baldwin } 486*744bfb21SJohn Baldwin 487*744bfb21SJohn Baldwin static void 488*744bfb21SJohn Baldwin wg_peer_set_endpoint(struct wg_peer *peer, struct wg_endpoint *e) 489*744bfb21SJohn Baldwin { 490*744bfb21SJohn Baldwin MPASS(e->e_remote.r_sa.sa_family != 0); 491*744bfb21SJohn Baldwin if (memcmp(e, &peer->p_endpoint, sizeof(*e)) == 0) 492*744bfb21SJohn Baldwin return; 493*744bfb21SJohn Baldwin 494*744bfb21SJohn Baldwin rw_wlock(&peer->p_endpoint_lock); 495*744bfb21SJohn Baldwin peer->p_endpoint = *e; 496*744bfb21SJohn Baldwin rw_wunlock(&peer->p_endpoint_lock); 497*744bfb21SJohn Baldwin } 498*744bfb21SJohn Baldwin 499*744bfb21SJohn Baldwin static void 500*744bfb21SJohn Baldwin wg_peer_clear_src(struct wg_peer *peer) 501*744bfb21SJohn Baldwin { 502*744bfb21SJohn Baldwin rw_wlock(&peer->p_endpoint_lock); 503*744bfb21SJohn Baldwin bzero(&peer->p_endpoint.e_local, sizeof(peer->p_endpoint.e_local)); 504*744bfb21SJohn Baldwin rw_wunlock(&peer->p_endpoint_lock); 505*744bfb21SJohn Baldwin } 506*744bfb21SJohn Baldwin 507*744bfb21SJohn Baldwin static void 508*744bfb21SJohn Baldwin wg_peer_get_endpoint(struct wg_peer *peer, struct wg_endpoint *e) 509*744bfb21SJohn Baldwin { 510*744bfb21SJohn Baldwin rw_rlock(&peer->p_endpoint_lock); 511*744bfb21SJohn Baldwin *e = peer->p_endpoint; 512*744bfb21SJohn Baldwin rw_runlock(&peer->p_endpoint_lock); 513*744bfb21SJohn Baldwin } 514*744bfb21SJohn Baldwin 515*744bfb21SJohn Baldwin /* Allowed IP */ 516*744bfb21SJohn Baldwin static int 517*744bfb21SJohn Baldwin wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af, const void *addr, uint8_t cidr) 518*744bfb21SJohn Baldwin { 519*744bfb21SJohn Baldwin struct radix_node_head *root; 520*744bfb21SJohn Baldwin struct radix_node *node; 521*744bfb21SJohn Baldwin struct wg_aip *aip; 522*744bfb21SJohn Baldwin int ret = 0; 523*744bfb21SJohn Baldwin 524*744bfb21SJohn Baldwin aip = malloc(sizeof(*aip), M_WG, M_WAITOK | M_ZERO); 525*744bfb21SJohn Baldwin aip->a_peer = peer; 526*744bfb21SJohn Baldwin aip->a_af = af; 527*744bfb21SJohn Baldwin 528*744bfb21SJohn Baldwin switch (af) { 529*744bfb21SJohn Baldwin #ifdef INET 530*744bfb21SJohn Baldwin case AF_INET: 531*744bfb21SJohn Baldwin if (cidr > 32) cidr = 32; 532*744bfb21SJohn Baldwin root = sc->sc_aip4; 533*744bfb21SJohn Baldwin aip->a_addr.in = *(const struct in_addr *)addr; 534*744bfb21SJohn Baldwin aip->a_mask.ip = htonl(~((1LL << (32 - cidr)) - 1) & 0xffffffff); 535*744bfb21SJohn Baldwin aip->a_addr.ip &= aip->a_mask.ip; 536*744bfb21SJohn Baldwin aip->a_addr.length = aip->a_mask.length = offsetof(struct aip_addr, in) + sizeof(struct in_addr); 537*744bfb21SJohn Baldwin break; 538*744bfb21SJohn Baldwin #endif 539*744bfb21SJohn Baldwin #ifdef INET6 540*744bfb21SJohn Baldwin case AF_INET6: 541*744bfb21SJohn Baldwin if (cidr > 128) cidr = 128; 542*744bfb21SJohn Baldwin root = sc->sc_aip6; 543*744bfb21SJohn Baldwin aip->a_addr.in6 = *(const struct in6_addr *)addr; 544*744bfb21SJohn Baldwin in6_prefixlen2mask(&aip->a_mask.in6, cidr); 545*744bfb21SJohn Baldwin for (int i = 0; i < 4; i++) 546*744bfb21SJohn Baldwin aip->a_addr.ip6[i] &= aip->a_mask.ip6[i]; 547*744bfb21SJohn Baldwin aip->a_addr.length = aip->a_mask.length = offsetof(struct aip_addr, in6) + sizeof(struct in6_addr); 548*744bfb21SJohn Baldwin break; 549*744bfb21SJohn Baldwin #endif 550*744bfb21SJohn Baldwin default: 551*744bfb21SJohn Baldwin free(aip, M_WG); 552*744bfb21SJohn Baldwin return (EAFNOSUPPORT); 553*744bfb21SJohn Baldwin } 554*744bfb21SJohn Baldwin 555*744bfb21SJohn Baldwin RADIX_NODE_HEAD_LOCK(root); 556*744bfb21SJohn Baldwin node = root->rnh_addaddr(&aip->a_addr, &aip->a_mask, &root->rh, aip->a_nodes); 557*744bfb21SJohn Baldwin if (node == aip->a_nodes) { 558*744bfb21SJohn Baldwin LIST_INSERT_HEAD(&peer->p_aips, aip, a_entry); 559*744bfb21SJohn Baldwin peer->p_aips_num++; 560*744bfb21SJohn Baldwin } else if (!node) 561*744bfb21SJohn Baldwin node = root->rnh_lookup(&aip->a_addr, &aip->a_mask, &root->rh); 562*744bfb21SJohn Baldwin if (!node) { 563*744bfb21SJohn Baldwin free(aip, M_WG); 564*744bfb21SJohn Baldwin return (ENOMEM); 565*744bfb21SJohn Baldwin } else if (node != aip->a_nodes) { 566*744bfb21SJohn Baldwin free(aip, M_WG); 567*744bfb21SJohn Baldwin aip = (struct wg_aip *)node; 568*744bfb21SJohn Baldwin if (aip->a_peer != peer) { 569*744bfb21SJohn Baldwin LIST_REMOVE(aip, a_entry); 570*744bfb21SJohn Baldwin aip->a_peer->p_aips_num--; 571*744bfb21SJohn Baldwin aip->a_peer = peer; 572*744bfb21SJohn Baldwin LIST_INSERT_HEAD(&peer->p_aips, aip, a_entry); 573*744bfb21SJohn Baldwin aip->a_peer->p_aips_num++; 574*744bfb21SJohn Baldwin } 575*744bfb21SJohn Baldwin } 576*744bfb21SJohn Baldwin RADIX_NODE_HEAD_UNLOCK(root); 577*744bfb21SJohn Baldwin return (ret); 578*744bfb21SJohn Baldwin } 579*744bfb21SJohn Baldwin 580*744bfb21SJohn Baldwin static struct wg_peer * 581*744bfb21SJohn Baldwin wg_aip_lookup(struct wg_softc *sc, sa_family_t af, void *a) 582*744bfb21SJohn Baldwin { 583*744bfb21SJohn Baldwin struct radix_node_head *root; 584*744bfb21SJohn Baldwin struct radix_node *node; 585*744bfb21SJohn Baldwin struct wg_peer *peer; 586*744bfb21SJohn Baldwin struct aip_addr addr; 587*744bfb21SJohn Baldwin RADIX_NODE_HEAD_RLOCK_TRACKER; 588*744bfb21SJohn Baldwin 589*744bfb21SJohn Baldwin switch (af) { 590*744bfb21SJohn Baldwin case AF_INET: 591*744bfb21SJohn Baldwin root = sc->sc_aip4; 592*744bfb21SJohn Baldwin memcpy(&addr.in, a, sizeof(addr.in)); 593*744bfb21SJohn Baldwin addr.length = offsetof(struct aip_addr, in) + sizeof(struct in_addr); 594*744bfb21SJohn Baldwin break; 595*744bfb21SJohn Baldwin case AF_INET6: 596*744bfb21SJohn Baldwin root = sc->sc_aip6; 597*744bfb21SJohn Baldwin memcpy(&addr.in6, a, sizeof(addr.in6)); 598*744bfb21SJohn Baldwin addr.length = offsetof(struct aip_addr, in6) + sizeof(struct in6_addr); 599*744bfb21SJohn Baldwin break; 600*744bfb21SJohn Baldwin default: 601*744bfb21SJohn Baldwin return NULL; 602*744bfb21SJohn Baldwin } 603*744bfb21SJohn Baldwin 604*744bfb21SJohn Baldwin RADIX_NODE_HEAD_RLOCK(root); 605*744bfb21SJohn Baldwin node = root->rnh_matchaddr(&addr, &root->rh); 606*744bfb21SJohn Baldwin if (node != NULL) { 607*744bfb21SJohn Baldwin peer = ((struct wg_aip *)node)->a_peer; 608*744bfb21SJohn Baldwin noise_remote_ref(peer->p_remote); 609*744bfb21SJohn Baldwin } else { 610*744bfb21SJohn Baldwin peer = NULL; 611*744bfb21SJohn Baldwin } 612*744bfb21SJohn Baldwin RADIX_NODE_HEAD_RUNLOCK(root); 613*744bfb21SJohn Baldwin 614*744bfb21SJohn Baldwin return (peer); 615*744bfb21SJohn Baldwin } 616*744bfb21SJohn Baldwin 617*744bfb21SJohn Baldwin static void 618*744bfb21SJohn Baldwin wg_aip_remove_all(struct wg_softc *sc, struct wg_peer *peer) 619*744bfb21SJohn Baldwin { 620*744bfb21SJohn Baldwin struct wg_aip *aip, *taip; 621*744bfb21SJohn Baldwin 622*744bfb21SJohn Baldwin RADIX_NODE_HEAD_LOCK(sc->sc_aip4); 623*744bfb21SJohn Baldwin LIST_FOREACH_SAFE(aip, &peer->p_aips, a_entry, taip) { 624*744bfb21SJohn Baldwin if (aip->a_af == AF_INET) { 625*744bfb21SJohn Baldwin if (sc->sc_aip4->rnh_deladdr(&aip->a_addr, &aip->a_mask, &sc->sc_aip4->rh) == NULL) 626*744bfb21SJohn Baldwin panic("failed to delete aip %p", aip); 627*744bfb21SJohn Baldwin LIST_REMOVE(aip, a_entry); 628*744bfb21SJohn Baldwin peer->p_aips_num--; 629*744bfb21SJohn Baldwin free(aip, M_WG); 630*744bfb21SJohn Baldwin } 631*744bfb21SJohn Baldwin } 632*744bfb21SJohn Baldwin RADIX_NODE_HEAD_UNLOCK(sc->sc_aip4); 633*744bfb21SJohn Baldwin 634*744bfb21SJohn Baldwin RADIX_NODE_HEAD_LOCK(sc->sc_aip6); 635*744bfb21SJohn Baldwin LIST_FOREACH_SAFE(aip, &peer->p_aips, a_entry, taip) { 636*744bfb21SJohn Baldwin if (aip->a_af == AF_INET6) { 637*744bfb21SJohn Baldwin if (sc->sc_aip6->rnh_deladdr(&aip->a_addr, &aip->a_mask, &sc->sc_aip6->rh) == NULL) 638*744bfb21SJohn Baldwin panic("failed to delete aip %p", aip); 639*744bfb21SJohn Baldwin LIST_REMOVE(aip, a_entry); 640*744bfb21SJohn Baldwin peer->p_aips_num--; 641*744bfb21SJohn Baldwin free(aip, M_WG); 642*744bfb21SJohn Baldwin } 643*744bfb21SJohn Baldwin } 644*744bfb21SJohn Baldwin RADIX_NODE_HEAD_UNLOCK(sc->sc_aip6); 645*744bfb21SJohn Baldwin 646*744bfb21SJohn Baldwin if (!LIST_EMPTY(&peer->p_aips) || peer->p_aips_num != 0) 647*744bfb21SJohn Baldwin panic("wg_aip_remove_all could not delete all %p", peer); 648*744bfb21SJohn Baldwin } 649*744bfb21SJohn Baldwin 650*744bfb21SJohn Baldwin static int 651*744bfb21SJohn Baldwin wg_socket_init(struct wg_softc *sc, in_port_t port) 652*744bfb21SJohn Baldwin { 653*744bfb21SJohn Baldwin struct ucred *cred = sc->sc_ucred; 654*744bfb21SJohn Baldwin struct socket *so4 = NULL, *so6 = NULL; 655*744bfb21SJohn Baldwin int rc; 656*744bfb21SJohn Baldwin 657*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 658*744bfb21SJohn Baldwin 659*744bfb21SJohn Baldwin if (!cred) 660*744bfb21SJohn Baldwin return (EBUSY); 661*744bfb21SJohn Baldwin 662*744bfb21SJohn Baldwin /* 663*744bfb21SJohn Baldwin * For socket creation, we use the creds of the thread that created the 664*744bfb21SJohn Baldwin * tunnel rather than the current thread to maintain the semantics that 665*744bfb21SJohn Baldwin * WireGuard has on Linux with network namespaces -- that the sockets 666*744bfb21SJohn Baldwin * are created in their home vnet so that they can be configured and 667*744bfb21SJohn Baldwin * functionally attached to a foreign vnet as the jail's only interface 668*744bfb21SJohn Baldwin * to the network. 669*744bfb21SJohn Baldwin */ 670*744bfb21SJohn Baldwin #ifdef INET 671*744bfb21SJohn Baldwin rc = socreate(AF_INET, &so4, SOCK_DGRAM, IPPROTO_UDP, cred, curthread); 672*744bfb21SJohn Baldwin if (rc) 673*744bfb21SJohn Baldwin goto out; 674*744bfb21SJohn Baldwin 675*744bfb21SJohn Baldwin rc = udp_set_kernel_tunneling(so4, wg_input, NULL, sc); 676*744bfb21SJohn Baldwin /* 677*744bfb21SJohn Baldwin * udp_set_kernel_tunneling can only fail if there is already a tunneling function set. 678*744bfb21SJohn Baldwin * This should never happen with a new socket. 679*744bfb21SJohn Baldwin */ 680*744bfb21SJohn Baldwin MPASS(rc == 0); 681*744bfb21SJohn Baldwin #endif 682*744bfb21SJohn Baldwin 683*744bfb21SJohn Baldwin #ifdef INET6 684*744bfb21SJohn Baldwin rc = socreate(AF_INET6, &so6, SOCK_DGRAM, IPPROTO_UDP, cred, curthread); 685*744bfb21SJohn Baldwin if (rc) 686*744bfb21SJohn Baldwin goto out; 687*744bfb21SJohn Baldwin rc = udp_set_kernel_tunneling(so6, wg_input, NULL, sc); 688*744bfb21SJohn Baldwin MPASS(rc == 0); 689*744bfb21SJohn Baldwin #endif 690*744bfb21SJohn Baldwin 691*744bfb21SJohn Baldwin if (sc->sc_socket.so_user_cookie) { 692*744bfb21SJohn Baldwin rc = wg_socket_set_sockopt(so4, so6, SO_USER_COOKIE, &sc->sc_socket.so_user_cookie, sizeof(sc->sc_socket.so_user_cookie)); 693*744bfb21SJohn Baldwin if (rc) 694*744bfb21SJohn Baldwin goto out; 695*744bfb21SJohn Baldwin } 696*744bfb21SJohn Baldwin rc = wg_socket_set_sockopt(so4, so6, SO_SETFIB, &sc->sc_socket.so_fibnum, sizeof(sc->sc_socket.so_fibnum)); 697*744bfb21SJohn Baldwin if (rc) 698*744bfb21SJohn Baldwin goto out; 699*744bfb21SJohn Baldwin 700*744bfb21SJohn Baldwin rc = wg_socket_bind(&so4, &so6, &port); 701*744bfb21SJohn Baldwin if (!rc) { 702*744bfb21SJohn Baldwin sc->sc_socket.so_port = port; 703*744bfb21SJohn Baldwin wg_socket_set(sc, so4, so6); 704*744bfb21SJohn Baldwin } 705*744bfb21SJohn Baldwin out: 706*744bfb21SJohn Baldwin if (rc) { 707*744bfb21SJohn Baldwin if (so4 != NULL) 708*744bfb21SJohn Baldwin soclose(so4); 709*744bfb21SJohn Baldwin if (so6 != NULL) 710*744bfb21SJohn Baldwin soclose(so6); 711*744bfb21SJohn Baldwin } 712*744bfb21SJohn Baldwin return (rc); 713*744bfb21SJohn Baldwin } 714*744bfb21SJohn Baldwin 715*744bfb21SJohn Baldwin static int wg_socket_set_sockopt(struct socket *so4, struct socket *so6, int name, void *val, size_t len) 716*744bfb21SJohn Baldwin { 717*744bfb21SJohn Baldwin int ret4 = 0, ret6 = 0; 718*744bfb21SJohn Baldwin struct sockopt sopt = { 719*744bfb21SJohn Baldwin .sopt_dir = SOPT_SET, 720*744bfb21SJohn Baldwin .sopt_level = SOL_SOCKET, 721*744bfb21SJohn Baldwin .sopt_name = name, 722*744bfb21SJohn Baldwin .sopt_val = val, 723*744bfb21SJohn Baldwin .sopt_valsize = len 724*744bfb21SJohn Baldwin }; 725*744bfb21SJohn Baldwin 726*744bfb21SJohn Baldwin if (so4) 727*744bfb21SJohn Baldwin ret4 = sosetopt(so4, &sopt); 728*744bfb21SJohn Baldwin if (so6) 729*744bfb21SJohn Baldwin ret6 = sosetopt(so6, &sopt); 730*744bfb21SJohn Baldwin return (ret4 ?: ret6); 731*744bfb21SJohn Baldwin } 732*744bfb21SJohn Baldwin 733*744bfb21SJohn Baldwin static int wg_socket_set_cookie(struct wg_softc *sc, uint32_t user_cookie) 734*744bfb21SJohn Baldwin { 735*744bfb21SJohn Baldwin struct wg_socket *so = &sc->sc_socket; 736*744bfb21SJohn Baldwin int ret; 737*744bfb21SJohn Baldwin 738*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 739*744bfb21SJohn Baldwin ret = wg_socket_set_sockopt(so->so_so4, so->so_so6, SO_USER_COOKIE, &user_cookie, sizeof(user_cookie)); 740*744bfb21SJohn Baldwin if (!ret) 741*744bfb21SJohn Baldwin so->so_user_cookie = user_cookie; 742*744bfb21SJohn Baldwin return (ret); 743*744bfb21SJohn Baldwin } 744*744bfb21SJohn Baldwin 745*744bfb21SJohn Baldwin static int wg_socket_set_fibnum(struct wg_softc *sc, int fibnum) 746*744bfb21SJohn Baldwin { 747*744bfb21SJohn Baldwin struct wg_socket *so = &sc->sc_socket; 748*744bfb21SJohn Baldwin int ret; 749*744bfb21SJohn Baldwin 750*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 751*744bfb21SJohn Baldwin 752*744bfb21SJohn Baldwin ret = wg_socket_set_sockopt(so->so_so4, so->so_so6, SO_SETFIB, &fibnum, sizeof(fibnum)); 753*744bfb21SJohn Baldwin if (!ret) 754*744bfb21SJohn Baldwin so->so_fibnum = fibnum; 755*744bfb21SJohn Baldwin return (ret); 756*744bfb21SJohn Baldwin } 757*744bfb21SJohn Baldwin 758*744bfb21SJohn Baldwin static void 759*744bfb21SJohn Baldwin wg_socket_uninit(struct wg_softc *sc) 760*744bfb21SJohn Baldwin { 761*744bfb21SJohn Baldwin wg_socket_set(sc, NULL, NULL); 762*744bfb21SJohn Baldwin } 763*744bfb21SJohn Baldwin 764*744bfb21SJohn Baldwin static void 765*744bfb21SJohn Baldwin wg_socket_set(struct wg_softc *sc, struct socket *new_so4, struct socket *new_so6) 766*744bfb21SJohn Baldwin { 767*744bfb21SJohn Baldwin struct wg_socket *so = &sc->sc_socket; 768*744bfb21SJohn Baldwin struct socket *so4, *so6; 769*744bfb21SJohn Baldwin 770*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 771*744bfb21SJohn Baldwin 772*744bfb21SJohn Baldwin so4 = ck_pr_load_ptr(&so->so_so4); 773*744bfb21SJohn Baldwin so6 = ck_pr_load_ptr(&so->so_so6); 774*744bfb21SJohn Baldwin ck_pr_store_ptr(&so->so_so4, new_so4); 775*744bfb21SJohn Baldwin ck_pr_store_ptr(&so->so_so6, new_so6); 776*744bfb21SJohn Baldwin 777*744bfb21SJohn Baldwin if (!so4 && !so6) 778*744bfb21SJohn Baldwin return; 779*744bfb21SJohn Baldwin NET_EPOCH_WAIT(); 780*744bfb21SJohn Baldwin if (so4) 781*744bfb21SJohn Baldwin soclose(so4); 782*744bfb21SJohn Baldwin if (so6) 783*744bfb21SJohn Baldwin soclose(so6); 784*744bfb21SJohn Baldwin } 785*744bfb21SJohn Baldwin 786*744bfb21SJohn Baldwin static int 787*744bfb21SJohn Baldwin wg_socket_bind(struct socket **in_so4, struct socket **in_so6, in_port_t *requested_port) 788*744bfb21SJohn Baldwin { 789*744bfb21SJohn Baldwin struct socket *so4 = *in_so4, *so6 = *in_so6; 790*744bfb21SJohn Baldwin int ret4 = 0, ret6 = 0; 791*744bfb21SJohn Baldwin in_port_t port = *requested_port; 792*744bfb21SJohn Baldwin struct sockaddr_in sin = { 793*744bfb21SJohn Baldwin .sin_len = sizeof(struct sockaddr_in), 794*744bfb21SJohn Baldwin .sin_family = AF_INET, 795*744bfb21SJohn Baldwin .sin_port = htons(port) 796*744bfb21SJohn Baldwin }; 797*744bfb21SJohn Baldwin struct sockaddr_in6 sin6 = { 798*744bfb21SJohn Baldwin .sin6_len = sizeof(struct sockaddr_in6), 799*744bfb21SJohn Baldwin .sin6_family = AF_INET6, 800*744bfb21SJohn Baldwin .sin6_port = htons(port) 801*744bfb21SJohn Baldwin }; 802*744bfb21SJohn Baldwin 803*744bfb21SJohn Baldwin if (so4) { 804*744bfb21SJohn Baldwin ret4 = sobind(so4, (struct sockaddr *)&sin, curthread); 805*744bfb21SJohn Baldwin if (ret4 && ret4 != EADDRNOTAVAIL) 806*744bfb21SJohn Baldwin return (ret4); 807*744bfb21SJohn Baldwin if (!ret4 && !sin.sin_port) { 808*744bfb21SJohn Baldwin struct sockaddr_in *bound_sin; 809*744bfb21SJohn Baldwin int ret = so4->so_proto->pr_sockaddr(so4, 810*744bfb21SJohn Baldwin (struct sockaddr **)&bound_sin); 811*744bfb21SJohn Baldwin if (ret) 812*744bfb21SJohn Baldwin return (ret); 813*744bfb21SJohn Baldwin port = ntohs(bound_sin->sin_port); 814*744bfb21SJohn Baldwin sin6.sin6_port = bound_sin->sin_port; 815*744bfb21SJohn Baldwin free(bound_sin, M_SONAME); 816*744bfb21SJohn Baldwin } 817*744bfb21SJohn Baldwin } 818*744bfb21SJohn Baldwin 819*744bfb21SJohn Baldwin if (so6) { 820*744bfb21SJohn Baldwin ret6 = sobind(so6, (struct sockaddr *)&sin6, curthread); 821*744bfb21SJohn Baldwin if (ret6 && ret6 != EADDRNOTAVAIL) 822*744bfb21SJohn Baldwin return (ret6); 823*744bfb21SJohn Baldwin if (!ret6 && !sin6.sin6_port) { 824*744bfb21SJohn Baldwin struct sockaddr_in6 *bound_sin6; 825*744bfb21SJohn Baldwin int ret = so6->so_proto->pr_sockaddr(so6, 826*744bfb21SJohn Baldwin (struct sockaddr **)&bound_sin6); 827*744bfb21SJohn Baldwin if (ret) 828*744bfb21SJohn Baldwin return (ret); 829*744bfb21SJohn Baldwin port = ntohs(bound_sin6->sin6_port); 830*744bfb21SJohn Baldwin free(bound_sin6, M_SONAME); 831*744bfb21SJohn Baldwin } 832*744bfb21SJohn Baldwin } 833*744bfb21SJohn Baldwin 834*744bfb21SJohn Baldwin if (ret4 && ret6) 835*744bfb21SJohn Baldwin return (ret4); 836*744bfb21SJohn Baldwin *requested_port = port; 837*744bfb21SJohn Baldwin if (ret4 && !ret6 && so4) { 838*744bfb21SJohn Baldwin soclose(so4); 839*744bfb21SJohn Baldwin *in_so4 = NULL; 840*744bfb21SJohn Baldwin } else if (ret6 && !ret4 && so6) { 841*744bfb21SJohn Baldwin soclose(so6); 842*744bfb21SJohn Baldwin *in_so6 = NULL; 843*744bfb21SJohn Baldwin } 844*744bfb21SJohn Baldwin return (0); 845*744bfb21SJohn Baldwin } 846*744bfb21SJohn Baldwin 847*744bfb21SJohn Baldwin static int 848*744bfb21SJohn Baldwin wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m) 849*744bfb21SJohn Baldwin { 850*744bfb21SJohn Baldwin struct epoch_tracker et; 851*744bfb21SJohn Baldwin struct sockaddr *sa; 852*744bfb21SJohn Baldwin struct wg_socket *so = &sc->sc_socket; 853*744bfb21SJohn Baldwin struct socket *so4, *so6; 854*744bfb21SJohn Baldwin struct mbuf *control = NULL; 855*744bfb21SJohn Baldwin int ret = 0; 856*744bfb21SJohn Baldwin size_t len = m->m_pkthdr.len; 857*744bfb21SJohn Baldwin 858*744bfb21SJohn Baldwin /* Get local control address before locking */ 859*744bfb21SJohn Baldwin if (e->e_remote.r_sa.sa_family == AF_INET) { 860*744bfb21SJohn Baldwin if (e->e_local.l_in.s_addr != INADDR_ANY) 861*744bfb21SJohn Baldwin control = sbcreatecontrol((caddr_t)&e->e_local.l_in, 862*744bfb21SJohn Baldwin sizeof(struct in_addr), IP_SENDSRCADDR, 863*744bfb21SJohn Baldwin IPPROTO_IP, M_NOWAIT); 864*744bfb21SJohn Baldwin #ifdef INET6 865*744bfb21SJohn Baldwin } else if (e->e_remote.r_sa.sa_family == AF_INET6) { 866*744bfb21SJohn Baldwin if (!IN6_IS_ADDR_UNSPECIFIED(&e->e_local.l_in6)) 867*744bfb21SJohn Baldwin control = sbcreatecontrol((caddr_t)&e->e_local.l_pktinfo6, 868*744bfb21SJohn Baldwin sizeof(struct in6_pktinfo), IPV6_PKTINFO, 869*744bfb21SJohn Baldwin IPPROTO_IPV6, M_NOWAIT); 870*744bfb21SJohn Baldwin #endif 871*744bfb21SJohn Baldwin } else { 872*744bfb21SJohn Baldwin m_freem(m); 873*744bfb21SJohn Baldwin return (EAFNOSUPPORT); 874*744bfb21SJohn Baldwin } 875*744bfb21SJohn Baldwin 876*744bfb21SJohn Baldwin /* Get remote address */ 877*744bfb21SJohn Baldwin sa = &e->e_remote.r_sa; 878*744bfb21SJohn Baldwin 879*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 880*744bfb21SJohn Baldwin so4 = ck_pr_load_ptr(&so->so_so4); 881*744bfb21SJohn Baldwin so6 = ck_pr_load_ptr(&so->so_so6); 882*744bfb21SJohn Baldwin if (e->e_remote.r_sa.sa_family == AF_INET && so4 != NULL) 883*744bfb21SJohn Baldwin ret = sosend(so4, sa, NULL, m, control, 0, curthread); 884*744bfb21SJohn Baldwin else if (e->e_remote.r_sa.sa_family == AF_INET6 && so6 != NULL) 885*744bfb21SJohn Baldwin ret = sosend(so6, sa, NULL, m, control, 0, curthread); 886*744bfb21SJohn Baldwin else { 887*744bfb21SJohn Baldwin ret = ENOTCONN; 888*744bfb21SJohn Baldwin m_freem(control); 889*744bfb21SJohn Baldwin m_freem(m); 890*744bfb21SJohn Baldwin } 891*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 892*744bfb21SJohn Baldwin if (ret == 0) { 893*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); 894*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len); 895*744bfb21SJohn Baldwin } 896*744bfb21SJohn Baldwin return (ret); 897*744bfb21SJohn Baldwin } 898*744bfb21SJohn Baldwin 899*744bfb21SJohn Baldwin static void 900*744bfb21SJohn Baldwin wg_send_buf(struct wg_softc *sc, struct wg_endpoint *e, uint8_t *buf, size_t len) 901*744bfb21SJohn Baldwin { 902*744bfb21SJohn Baldwin struct mbuf *m; 903*744bfb21SJohn Baldwin int ret = 0; 904*744bfb21SJohn Baldwin bool retried = false; 905*744bfb21SJohn Baldwin 906*744bfb21SJohn Baldwin retry: 907*744bfb21SJohn Baldwin m = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR); 908*744bfb21SJohn Baldwin if (!m) { 909*744bfb21SJohn Baldwin ret = ENOMEM; 910*744bfb21SJohn Baldwin goto out; 911*744bfb21SJohn Baldwin } 912*744bfb21SJohn Baldwin m_copyback(m, 0, len, buf); 913*744bfb21SJohn Baldwin 914*744bfb21SJohn Baldwin if (ret == 0) { 915*744bfb21SJohn Baldwin ret = wg_send(sc, e, m); 916*744bfb21SJohn Baldwin /* Retry if we couldn't bind to e->e_local */ 917*744bfb21SJohn Baldwin if (ret == EADDRNOTAVAIL && !retried) { 918*744bfb21SJohn Baldwin bzero(&e->e_local, sizeof(e->e_local)); 919*744bfb21SJohn Baldwin retried = true; 920*744bfb21SJohn Baldwin goto retry; 921*744bfb21SJohn Baldwin } 922*744bfb21SJohn Baldwin } else { 923*744bfb21SJohn Baldwin ret = wg_send(sc, e, m); 924*744bfb21SJohn Baldwin } 925*744bfb21SJohn Baldwin out: 926*744bfb21SJohn Baldwin if (ret) 927*744bfb21SJohn Baldwin DPRINTF(sc, "Unable to send packet: %d\n", ret); 928*744bfb21SJohn Baldwin } 929*744bfb21SJohn Baldwin 930*744bfb21SJohn Baldwin /* Timers */ 931*744bfb21SJohn Baldwin static void 932*744bfb21SJohn Baldwin wg_timers_enable(struct wg_peer *peer) 933*744bfb21SJohn Baldwin { 934*744bfb21SJohn Baldwin ck_pr_store_bool(&peer->p_enabled, true); 935*744bfb21SJohn Baldwin wg_timers_run_persistent_keepalive(peer); 936*744bfb21SJohn Baldwin } 937*744bfb21SJohn Baldwin 938*744bfb21SJohn Baldwin static void 939*744bfb21SJohn Baldwin wg_timers_disable(struct wg_peer *peer) 940*744bfb21SJohn Baldwin { 941*744bfb21SJohn Baldwin /* By setting p_enabled = false, then calling NET_EPOCH_WAIT, we can be 942*744bfb21SJohn Baldwin * sure no new handshakes are created after the wait. This is because 943*744bfb21SJohn Baldwin * all callout_resets (scheduling the callout) are guarded by 944*744bfb21SJohn Baldwin * p_enabled. We can be sure all sections that read p_enabled and then 945*744bfb21SJohn Baldwin * optionally call callout_reset are finished as they are surrounded by 946*744bfb21SJohn Baldwin * NET_EPOCH_{ENTER,EXIT}. 947*744bfb21SJohn Baldwin * 948*744bfb21SJohn Baldwin * However, as new callouts may be scheduled during NET_EPOCH_WAIT (but 949*744bfb21SJohn Baldwin * not after), we stop all callouts leaving no callouts active. 950*744bfb21SJohn Baldwin * 951*744bfb21SJohn Baldwin * We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the 952*744bfb21SJohn Baldwin * performance impact is acceptable for the time being. */ 953*744bfb21SJohn Baldwin ck_pr_store_bool(&peer->p_enabled, false); 954*744bfb21SJohn Baldwin NET_EPOCH_WAIT(); 955*744bfb21SJohn Baldwin ck_pr_store_bool(&peer->p_need_another_keepalive, false); 956*744bfb21SJohn Baldwin 957*744bfb21SJohn Baldwin callout_stop(&peer->p_new_handshake); 958*744bfb21SJohn Baldwin callout_stop(&peer->p_send_keepalive); 959*744bfb21SJohn Baldwin callout_stop(&peer->p_retry_handshake); 960*744bfb21SJohn Baldwin callout_stop(&peer->p_persistent_keepalive); 961*744bfb21SJohn Baldwin callout_stop(&peer->p_zero_key_material); 962*744bfb21SJohn Baldwin } 963*744bfb21SJohn Baldwin 964*744bfb21SJohn Baldwin static void 965*744bfb21SJohn Baldwin wg_timers_set_persistent_keepalive(struct wg_peer *peer, uint16_t interval) 966*744bfb21SJohn Baldwin { 967*744bfb21SJohn Baldwin struct epoch_tracker et; 968*744bfb21SJohn Baldwin if (interval != peer->p_persistent_keepalive_interval) { 969*744bfb21SJohn Baldwin ck_pr_store_16(&peer->p_persistent_keepalive_interval, interval); 970*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 971*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled)) 972*744bfb21SJohn Baldwin wg_timers_run_persistent_keepalive(peer); 973*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 974*744bfb21SJohn Baldwin } 975*744bfb21SJohn Baldwin } 976*744bfb21SJohn Baldwin 977*744bfb21SJohn Baldwin static void 978*744bfb21SJohn Baldwin wg_timers_get_last_handshake(struct wg_peer *peer, struct wg_timespec64 *time) 979*744bfb21SJohn Baldwin { 980*744bfb21SJohn Baldwin mtx_lock(&peer->p_handshake_mtx); 981*744bfb21SJohn Baldwin time->tv_sec = peer->p_handshake_complete.tv_sec; 982*744bfb21SJohn Baldwin time->tv_nsec = peer->p_handshake_complete.tv_nsec; 983*744bfb21SJohn Baldwin mtx_unlock(&peer->p_handshake_mtx); 984*744bfb21SJohn Baldwin } 985*744bfb21SJohn Baldwin 986*744bfb21SJohn Baldwin static void 987*744bfb21SJohn Baldwin wg_timers_event_data_sent(struct wg_peer *peer) 988*744bfb21SJohn Baldwin { 989*744bfb21SJohn Baldwin struct epoch_tracker et; 990*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 991*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake)) 992*744bfb21SJohn Baldwin callout_reset(&peer->p_new_handshake, MSEC_2_TICKS( 993*744bfb21SJohn Baldwin NEW_HANDSHAKE_TIMEOUT * 1000 + 994*744bfb21SJohn Baldwin arc4random_uniform(REKEY_TIMEOUT_JITTER)), 995*744bfb21SJohn Baldwin wg_timers_run_new_handshake, peer); 996*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 997*744bfb21SJohn Baldwin } 998*744bfb21SJohn Baldwin 999*744bfb21SJohn Baldwin static void 1000*744bfb21SJohn Baldwin wg_timers_event_data_received(struct wg_peer *peer) 1001*744bfb21SJohn Baldwin { 1002*744bfb21SJohn Baldwin struct epoch_tracker et; 1003*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1004*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled)) { 1005*744bfb21SJohn Baldwin if (!callout_pending(&peer->p_send_keepalive)) 1006*744bfb21SJohn Baldwin callout_reset(&peer->p_send_keepalive, 1007*744bfb21SJohn Baldwin MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), 1008*744bfb21SJohn Baldwin wg_timers_run_send_keepalive, peer); 1009*744bfb21SJohn Baldwin else 1010*744bfb21SJohn Baldwin ck_pr_store_bool(&peer->p_need_another_keepalive, true); 1011*744bfb21SJohn Baldwin } 1012*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1013*744bfb21SJohn Baldwin } 1014*744bfb21SJohn Baldwin 1015*744bfb21SJohn Baldwin static void 1016*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_sent(struct wg_peer *peer) 1017*744bfb21SJohn Baldwin { 1018*744bfb21SJohn Baldwin callout_stop(&peer->p_send_keepalive); 1019*744bfb21SJohn Baldwin } 1020*744bfb21SJohn Baldwin 1021*744bfb21SJohn Baldwin static void 1022*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_received(struct wg_peer *peer) 1023*744bfb21SJohn Baldwin { 1024*744bfb21SJohn Baldwin callout_stop(&peer->p_new_handshake); 1025*744bfb21SJohn Baldwin } 1026*744bfb21SJohn Baldwin 1027*744bfb21SJohn Baldwin static void 1028*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *peer) 1029*744bfb21SJohn Baldwin { 1030*744bfb21SJohn Baldwin struct epoch_tracker et; 1031*744bfb21SJohn Baldwin uint16_t interval; 1032*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1033*744bfb21SJohn Baldwin interval = ck_pr_load_16(&peer->p_persistent_keepalive_interval); 1034*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled) && interval > 0) 1035*744bfb21SJohn Baldwin callout_reset(&peer->p_persistent_keepalive, 1036*744bfb21SJohn Baldwin MSEC_2_TICKS(interval * 1000), 1037*744bfb21SJohn Baldwin wg_timers_run_persistent_keepalive, peer); 1038*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1039*744bfb21SJohn Baldwin } 1040*744bfb21SJohn Baldwin 1041*744bfb21SJohn Baldwin static void 1042*744bfb21SJohn Baldwin wg_timers_event_handshake_initiated(struct wg_peer *peer) 1043*744bfb21SJohn Baldwin { 1044*744bfb21SJohn Baldwin struct epoch_tracker et; 1045*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1046*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled)) 1047*744bfb21SJohn Baldwin callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS( 1048*744bfb21SJohn Baldwin REKEY_TIMEOUT * 1000 + 1049*744bfb21SJohn Baldwin arc4random_uniform(REKEY_TIMEOUT_JITTER)), 1050*744bfb21SJohn Baldwin wg_timers_run_retry_handshake, peer); 1051*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1052*744bfb21SJohn Baldwin } 1053*744bfb21SJohn Baldwin 1054*744bfb21SJohn Baldwin static void 1055*744bfb21SJohn Baldwin wg_timers_event_handshake_complete(struct wg_peer *peer) 1056*744bfb21SJohn Baldwin { 1057*744bfb21SJohn Baldwin struct epoch_tracker et; 1058*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1059*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled)) { 1060*744bfb21SJohn Baldwin mtx_lock(&peer->p_handshake_mtx); 1061*744bfb21SJohn Baldwin callout_stop(&peer->p_retry_handshake); 1062*744bfb21SJohn Baldwin peer->p_handshake_retries = 0; 1063*744bfb21SJohn Baldwin getnanotime(&peer->p_handshake_complete); 1064*744bfb21SJohn Baldwin mtx_unlock(&peer->p_handshake_mtx); 1065*744bfb21SJohn Baldwin wg_timers_run_send_keepalive(peer); 1066*744bfb21SJohn Baldwin } 1067*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1068*744bfb21SJohn Baldwin } 1069*744bfb21SJohn Baldwin 1070*744bfb21SJohn Baldwin static void 1071*744bfb21SJohn Baldwin wg_timers_event_session_derived(struct wg_peer *peer) 1072*744bfb21SJohn Baldwin { 1073*744bfb21SJohn Baldwin struct epoch_tracker et; 1074*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1075*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled)) 1076*744bfb21SJohn Baldwin callout_reset(&peer->p_zero_key_material, 1077*744bfb21SJohn Baldwin MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), 1078*744bfb21SJohn Baldwin wg_timers_run_zero_key_material, peer); 1079*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1080*744bfb21SJohn Baldwin } 1081*744bfb21SJohn Baldwin 1082*744bfb21SJohn Baldwin static void 1083*744bfb21SJohn Baldwin wg_timers_event_want_initiation(struct wg_peer *peer) 1084*744bfb21SJohn Baldwin { 1085*744bfb21SJohn Baldwin struct epoch_tracker et; 1086*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1087*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled)) 1088*744bfb21SJohn Baldwin wg_timers_run_send_initiation(peer, false); 1089*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1090*744bfb21SJohn Baldwin } 1091*744bfb21SJohn Baldwin 1092*744bfb21SJohn Baldwin static void 1093*744bfb21SJohn Baldwin wg_timers_run_send_initiation(struct wg_peer *peer, bool is_retry) 1094*744bfb21SJohn Baldwin { 1095*744bfb21SJohn Baldwin if (!is_retry) 1096*744bfb21SJohn Baldwin peer->p_handshake_retries = 0; 1097*744bfb21SJohn Baldwin if (noise_remote_initiation_expired(peer->p_remote) == ETIMEDOUT) 1098*744bfb21SJohn Baldwin wg_send_initiation(peer); 1099*744bfb21SJohn Baldwin } 1100*744bfb21SJohn Baldwin 1101*744bfb21SJohn Baldwin static void 1102*744bfb21SJohn Baldwin wg_timers_run_retry_handshake(void *_peer) 1103*744bfb21SJohn Baldwin { 1104*744bfb21SJohn Baldwin struct epoch_tracker et; 1105*744bfb21SJohn Baldwin struct wg_peer *peer = _peer; 1106*744bfb21SJohn Baldwin 1107*744bfb21SJohn Baldwin mtx_lock(&peer->p_handshake_mtx); 1108*744bfb21SJohn Baldwin if (peer->p_handshake_retries <= MAX_TIMER_HANDSHAKES) { 1109*744bfb21SJohn Baldwin peer->p_handshake_retries++; 1110*744bfb21SJohn Baldwin mtx_unlock(&peer->p_handshake_mtx); 1111*744bfb21SJohn Baldwin 1112*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Handshake for peer %" PRIu64 " did not complete " 1113*744bfb21SJohn Baldwin "after %d seconds, retrying (try %d)\n", peer->p_id, 1114*744bfb21SJohn Baldwin REKEY_TIMEOUT, peer->p_handshake_retries + 1); 1115*744bfb21SJohn Baldwin wg_peer_clear_src(peer); 1116*744bfb21SJohn Baldwin wg_timers_run_send_initiation(peer, true); 1117*744bfb21SJohn Baldwin } else { 1118*744bfb21SJohn Baldwin mtx_unlock(&peer->p_handshake_mtx); 1119*744bfb21SJohn Baldwin 1120*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Handshake for peer %" PRIu64 " did not complete " 1121*744bfb21SJohn Baldwin "after %d retries, giving up\n", peer->p_id, 1122*744bfb21SJohn Baldwin MAX_TIMER_HANDSHAKES + 2); 1123*744bfb21SJohn Baldwin 1124*744bfb21SJohn Baldwin callout_stop(&peer->p_send_keepalive); 1125*744bfb21SJohn Baldwin wg_queue_purge(&peer->p_stage_queue); 1126*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1127*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled) && 1128*744bfb21SJohn Baldwin !callout_pending(&peer->p_zero_key_material)) 1129*744bfb21SJohn Baldwin callout_reset(&peer->p_zero_key_material, 1130*744bfb21SJohn Baldwin MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000), 1131*744bfb21SJohn Baldwin wg_timers_run_zero_key_material, peer); 1132*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1133*744bfb21SJohn Baldwin } 1134*744bfb21SJohn Baldwin } 1135*744bfb21SJohn Baldwin 1136*744bfb21SJohn Baldwin static void 1137*744bfb21SJohn Baldwin wg_timers_run_send_keepalive(void *_peer) 1138*744bfb21SJohn Baldwin { 1139*744bfb21SJohn Baldwin struct epoch_tracker et; 1140*744bfb21SJohn Baldwin struct wg_peer *peer = _peer; 1141*744bfb21SJohn Baldwin 1142*744bfb21SJohn Baldwin wg_send_keepalive(peer); 1143*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1144*744bfb21SJohn Baldwin if (ck_pr_load_bool(&peer->p_enabled) && 1145*744bfb21SJohn Baldwin ck_pr_load_bool(&peer->p_need_another_keepalive)) { 1146*744bfb21SJohn Baldwin ck_pr_store_bool(&peer->p_need_another_keepalive, false); 1147*744bfb21SJohn Baldwin callout_reset(&peer->p_send_keepalive, 1148*744bfb21SJohn Baldwin MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000), 1149*744bfb21SJohn Baldwin wg_timers_run_send_keepalive, peer); 1150*744bfb21SJohn Baldwin } 1151*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1152*744bfb21SJohn Baldwin } 1153*744bfb21SJohn Baldwin 1154*744bfb21SJohn Baldwin static void 1155*744bfb21SJohn Baldwin wg_timers_run_new_handshake(void *_peer) 1156*744bfb21SJohn Baldwin { 1157*744bfb21SJohn Baldwin struct wg_peer *peer = _peer; 1158*744bfb21SJohn Baldwin 1159*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Retrying handshake with peer %" PRIu64 " because we " 1160*744bfb21SJohn Baldwin "stopped hearing back after %d seconds\n", 1161*744bfb21SJohn Baldwin peer->p_id, NEW_HANDSHAKE_TIMEOUT); 1162*744bfb21SJohn Baldwin 1163*744bfb21SJohn Baldwin wg_peer_clear_src(peer); 1164*744bfb21SJohn Baldwin wg_timers_run_send_initiation(peer, false); 1165*744bfb21SJohn Baldwin } 1166*744bfb21SJohn Baldwin 1167*744bfb21SJohn Baldwin static void 1168*744bfb21SJohn Baldwin wg_timers_run_zero_key_material(void *_peer) 1169*744bfb21SJohn Baldwin { 1170*744bfb21SJohn Baldwin struct wg_peer *peer = _peer; 1171*744bfb21SJohn Baldwin 1172*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Zeroing out keys for peer %" PRIu64 ", since we " 1173*744bfb21SJohn Baldwin "haven't received a new one in %d seconds\n", 1174*744bfb21SJohn Baldwin peer->p_id, REJECT_AFTER_TIME * 3); 1175*744bfb21SJohn Baldwin noise_remote_keypairs_clear(peer->p_remote); 1176*744bfb21SJohn Baldwin } 1177*744bfb21SJohn Baldwin 1178*744bfb21SJohn Baldwin static void 1179*744bfb21SJohn Baldwin wg_timers_run_persistent_keepalive(void *_peer) 1180*744bfb21SJohn Baldwin { 1181*744bfb21SJohn Baldwin struct wg_peer *peer = _peer; 1182*744bfb21SJohn Baldwin 1183*744bfb21SJohn Baldwin if (ck_pr_load_16(&peer->p_persistent_keepalive_interval) > 0) 1184*744bfb21SJohn Baldwin wg_send_keepalive(peer); 1185*744bfb21SJohn Baldwin } 1186*744bfb21SJohn Baldwin 1187*744bfb21SJohn Baldwin /* TODO Handshake */ 1188*744bfb21SJohn Baldwin static void 1189*744bfb21SJohn Baldwin wg_peer_send_buf(struct wg_peer *peer, uint8_t *buf, size_t len) 1190*744bfb21SJohn Baldwin { 1191*744bfb21SJohn Baldwin struct wg_endpoint endpoint; 1192*744bfb21SJohn Baldwin 1193*744bfb21SJohn Baldwin counter_u64_add(peer->p_tx_bytes, len); 1194*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_traversal(peer); 1195*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_sent(peer); 1196*744bfb21SJohn Baldwin wg_peer_get_endpoint(peer, &endpoint); 1197*744bfb21SJohn Baldwin wg_send_buf(peer->p_sc, &endpoint, buf, len); 1198*744bfb21SJohn Baldwin } 1199*744bfb21SJohn Baldwin 1200*744bfb21SJohn Baldwin static void 1201*744bfb21SJohn Baldwin wg_send_initiation(struct wg_peer *peer) 1202*744bfb21SJohn Baldwin { 1203*744bfb21SJohn Baldwin struct wg_pkt_initiation pkt; 1204*744bfb21SJohn Baldwin 1205*744bfb21SJohn Baldwin if (noise_create_initiation(peer->p_remote, &pkt.s_idx, pkt.ue, 1206*744bfb21SJohn Baldwin pkt.es, pkt.ets) != 0) 1207*744bfb21SJohn Baldwin return; 1208*744bfb21SJohn Baldwin 1209*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Sending handshake initiation to peer %" PRIu64 "\n", peer->p_id); 1210*744bfb21SJohn Baldwin 1211*744bfb21SJohn Baldwin pkt.t = WG_PKT_INITIATION; 1212*744bfb21SJohn Baldwin cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt, 1213*744bfb21SJohn Baldwin sizeof(pkt) - sizeof(pkt.m)); 1214*744bfb21SJohn Baldwin wg_peer_send_buf(peer, (uint8_t *)&pkt, sizeof(pkt)); 1215*744bfb21SJohn Baldwin wg_timers_event_handshake_initiated(peer); 1216*744bfb21SJohn Baldwin } 1217*744bfb21SJohn Baldwin 1218*744bfb21SJohn Baldwin static void 1219*744bfb21SJohn Baldwin wg_send_response(struct wg_peer *peer) 1220*744bfb21SJohn Baldwin { 1221*744bfb21SJohn Baldwin struct wg_pkt_response pkt; 1222*744bfb21SJohn Baldwin 1223*744bfb21SJohn Baldwin if (noise_create_response(peer->p_remote, &pkt.s_idx, &pkt.r_idx, 1224*744bfb21SJohn Baldwin pkt.ue, pkt.en) != 0) 1225*744bfb21SJohn Baldwin return; 1226*744bfb21SJohn Baldwin 1227*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Sending handshake response to peer %" PRIu64 "\n", peer->p_id); 1228*744bfb21SJohn Baldwin 1229*744bfb21SJohn Baldwin wg_timers_event_session_derived(peer); 1230*744bfb21SJohn Baldwin pkt.t = WG_PKT_RESPONSE; 1231*744bfb21SJohn Baldwin cookie_maker_mac(&peer->p_cookie, &pkt.m, &pkt, 1232*744bfb21SJohn Baldwin sizeof(pkt)-sizeof(pkt.m)); 1233*744bfb21SJohn Baldwin wg_peer_send_buf(peer, (uint8_t*)&pkt, sizeof(pkt)); 1234*744bfb21SJohn Baldwin } 1235*744bfb21SJohn Baldwin 1236*744bfb21SJohn Baldwin static void 1237*744bfb21SJohn Baldwin wg_send_cookie(struct wg_softc *sc, struct cookie_macs *cm, uint32_t idx, 1238*744bfb21SJohn Baldwin struct wg_endpoint *e) 1239*744bfb21SJohn Baldwin { 1240*744bfb21SJohn Baldwin struct wg_pkt_cookie pkt; 1241*744bfb21SJohn Baldwin 1242*744bfb21SJohn Baldwin DPRINTF(sc, "Sending cookie response for denied handshake message\n"); 1243*744bfb21SJohn Baldwin 1244*744bfb21SJohn Baldwin pkt.t = WG_PKT_COOKIE; 1245*744bfb21SJohn Baldwin pkt.r_idx = idx; 1246*744bfb21SJohn Baldwin 1247*744bfb21SJohn Baldwin cookie_checker_create_payload(&sc->sc_cookie, cm, pkt.nonce, 1248*744bfb21SJohn Baldwin pkt.ec, &e->e_remote.r_sa); 1249*744bfb21SJohn Baldwin wg_send_buf(sc, e, (uint8_t *)&pkt, sizeof(pkt)); 1250*744bfb21SJohn Baldwin } 1251*744bfb21SJohn Baldwin 1252*744bfb21SJohn Baldwin static void 1253*744bfb21SJohn Baldwin wg_send_keepalive(struct wg_peer *peer) 1254*744bfb21SJohn Baldwin { 1255*744bfb21SJohn Baldwin struct wg_packet *pkt; 1256*744bfb21SJohn Baldwin struct mbuf *m; 1257*744bfb21SJohn Baldwin 1258*744bfb21SJohn Baldwin if (wg_queue_len(&peer->p_stage_queue) > 0) 1259*744bfb21SJohn Baldwin goto send; 1260*744bfb21SJohn Baldwin if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) 1261*744bfb21SJohn Baldwin return; 1262*744bfb21SJohn Baldwin if ((pkt = wg_packet_alloc(m)) == NULL) { 1263*744bfb21SJohn Baldwin m_freem(m); 1264*744bfb21SJohn Baldwin return; 1265*744bfb21SJohn Baldwin } 1266*744bfb21SJohn Baldwin wg_queue_push_staged(&peer->p_stage_queue, pkt); 1267*744bfb21SJohn Baldwin DPRINTF(peer->p_sc, "Sending keepalive packet to peer %" PRIu64 "\n", peer->p_id); 1268*744bfb21SJohn Baldwin send: 1269*744bfb21SJohn Baldwin wg_peer_send_staged(peer); 1270*744bfb21SJohn Baldwin } 1271*744bfb21SJohn Baldwin 1272*744bfb21SJohn Baldwin static void 1273*744bfb21SJohn Baldwin wg_handshake(struct wg_softc *sc, struct wg_packet *pkt) 1274*744bfb21SJohn Baldwin { 1275*744bfb21SJohn Baldwin struct wg_pkt_initiation *init; 1276*744bfb21SJohn Baldwin struct wg_pkt_response *resp; 1277*744bfb21SJohn Baldwin struct wg_pkt_cookie *cook; 1278*744bfb21SJohn Baldwin struct wg_endpoint *e; 1279*744bfb21SJohn Baldwin struct wg_peer *peer; 1280*744bfb21SJohn Baldwin struct mbuf *m; 1281*744bfb21SJohn Baldwin struct noise_remote *remote = NULL; 1282*744bfb21SJohn Baldwin int res; 1283*744bfb21SJohn Baldwin bool underload = false; 1284*744bfb21SJohn Baldwin static sbintime_t wg_last_underload; /* sbinuptime */ 1285*744bfb21SJohn Baldwin 1286*744bfb21SJohn Baldwin underload = wg_queue_len(&sc->sc_handshake_queue) >= MAX_QUEUED_HANDSHAKES / 8; 1287*744bfb21SJohn Baldwin if (underload) { 1288*744bfb21SJohn Baldwin wg_last_underload = getsbinuptime(); 1289*744bfb21SJohn Baldwin } else if (wg_last_underload) { 1290*744bfb21SJohn Baldwin underload = wg_last_underload + UNDERLOAD_TIMEOUT * SBT_1S > getsbinuptime(); 1291*744bfb21SJohn Baldwin if (!underload) 1292*744bfb21SJohn Baldwin wg_last_underload = 0; 1293*744bfb21SJohn Baldwin } 1294*744bfb21SJohn Baldwin 1295*744bfb21SJohn Baldwin m = pkt->p_mbuf; 1296*744bfb21SJohn Baldwin e = &pkt->p_endpoint; 1297*744bfb21SJohn Baldwin 1298*744bfb21SJohn Baldwin if ((pkt->p_mbuf = m = m_pullup(m, m->m_pkthdr.len)) == NULL) 1299*744bfb21SJohn Baldwin goto error; 1300*744bfb21SJohn Baldwin 1301*744bfb21SJohn Baldwin switch (*mtod(m, uint32_t *)) { 1302*744bfb21SJohn Baldwin case WG_PKT_INITIATION: 1303*744bfb21SJohn Baldwin init = mtod(m, struct wg_pkt_initiation *); 1304*744bfb21SJohn Baldwin 1305*744bfb21SJohn Baldwin res = cookie_checker_validate_macs(&sc->sc_cookie, &init->m, 1306*744bfb21SJohn Baldwin init, sizeof(*init) - sizeof(init->m), 1307*744bfb21SJohn Baldwin underload, &e->e_remote.r_sa, 1308*744bfb21SJohn Baldwin sc->sc_ifp->if_vnet); 1309*744bfb21SJohn Baldwin 1310*744bfb21SJohn Baldwin if (res == EINVAL) { 1311*744bfb21SJohn Baldwin DPRINTF(sc, "Invalid initiation MAC\n"); 1312*744bfb21SJohn Baldwin goto error; 1313*744bfb21SJohn Baldwin } else if (res == ECONNREFUSED) { 1314*744bfb21SJohn Baldwin DPRINTF(sc, "Handshake ratelimited\n"); 1315*744bfb21SJohn Baldwin goto error; 1316*744bfb21SJohn Baldwin } else if (res == EAGAIN) { 1317*744bfb21SJohn Baldwin wg_send_cookie(sc, &init->m, init->s_idx, e); 1318*744bfb21SJohn Baldwin goto error; 1319*744bfb21SJohn Baldwin } else if (res != 0) { 1320*744bfb21SJohn Baldwin panic("unexpected response: %d\n", res); 1321*744bfb21SJohn Baldwin } 1322*744bfb21SJohn Baldwin 1323*744bfb21SJohn Baldwin if (noise_consume_initiation(sc->sc_local, &remote, 1324*744bfb21SJohn Baldwin init->s_idx, init->ue, init->es, init->ets) != 0) { 1325*744bfb21SJohn Baldwin DPRINTF(sc, "Invalid handshake initiation\n"); 1326*744bfb21SJohn Baldwin goto error; 1327*744bfb21SJohn Baldwin } 1328*744bfb21SJohn Baldwin 1329*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 1330*744bfb21SJohn Baldwin 1331*744bfb21SJohn Baldwin DPRINTF(sc, "Receiving handshake initiation from peer %" PRIu64 "\n", peer->p_id); 1332*744bfb21SJohn Baldwin 1333*744bfb21SJohn Baldwin wg_peer_set_endpoint(peer, e); 1334*744bfb21SJohn Baldwin wg_send_response(peer); 1335*744bfb21SJohn Baldwin break; 1336*744bfb21SJohn Baldwin case WG_PKT_RESPONSE: 1337*744bfb21SJohn Baldwin resp = mtod(m, struct wg_pkt_response *); 1338*744bfb21SJohn Baldwin 1339*744bfb21SJohn Baldwin res = cookie_checker_validate_macs(&sc->sc_cookie, &resp->m, 1340*744bfb21SJohn Baldwin resp, sizeof(*resp) - sizeof(resp->m), 1341*744bfb21SJohn Baldwin underload, &e->e_remote.r_sa, 1342*744bfb21SJohn Baldwin sc->sc_ifp->if_vnet); 1343*744bfb21SJohn Baldwin 1344*744bfb21SJohn Baldwin if (res == EINVAL) { 1345*744bfb21SJohn Baldwin DPRINTF(sc, "Invalid response MAC\n"); 1346*744bfb21SJohn Baldwin goto error; 1347*744bfb21SJohn Baldwin } else if (res == ECONNREFUSED) { 1348*744bfb21SJohn Baldwin DPRINTF(sc, "Handshake ratelimited\n"); 1349*744bfb21SJohn Baldwin goto error; 1350*744bfb21SJohn Baldwin } else if (res == EAGAIN) { 1351*744bfb21SJohn Baldwin wg_send_cookie(sc, &resp->m, resp->s_idx, e); 1352*744bfb21SJohn Baldwin goto error; 1353*744bfb21SJohn Baldwin } else if (res != 0) { 1354*744bfb21SJohn Baldwin panic("unexpected response: %d\n", res); 1355*744bfb21SJohn Baldwin } 1356*744bfb21SJohn Baldwin 1357*744bfb21SJohn Baldwin if (noise_consume_response(sc->sc_local, &remote, 1358*744bfb21SJohn Baldwin resp->s_idx, resp->r_idx, resp->ue, resp->en) != 0) { 1359*744bfb21SJohn Baldwin DPRINTF(sc, "Invalid handshake response\n"); 1360*744bfb21SJohn Baldwin goto error; 1361*744bfb21SJohn Baldwin } 1362*744bfb21SJohn Baldwin 1363*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 1364*744bfb21SJohn Baldwin DPRINTF(sc, "Receiving handshake response from peer %" PRIu64 "\n", peer->p_id); 1365*744bfb21SJohn Baldwin 1366*744bfb21SJohn Baldwin wg_peer_set_endpoint(peer, e); 1367*744bfb21SJohn Baldwin wg_timers_event_session_derived(peer); 1368*744bfb21SJohn Baldwin wg_timers_event_handshake_complete(peer); 1369*744bfb21SJohn Baldwin break; 1370*744bfb21SJohn Baldwin case WG_PKT_COOKIE: 1371*744bfb21SJohn Baldwin cook = mtod(m, struct wg_pkt_cookie *); 1372*744bfb21SJohn Baldwin 1373*744bfb21SJohn Baldwin if ((remote = noise_remote_index(sc->sc_local, cook->r_idx)) == NULL) { 1374*744bfb21SJohn Baldwin DPRINTF(sc, "Unknown cookie index\n"); 1375*744bfb21SJohn Baldwin goto error; 1376*744bfb21SJohn Baldwin } 1377*744bfb21SJohn Baldwin 1378*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 1379*744bfb21SJohn Baldwin 1380*744bfb21SJohn Baldwin if (cookie_maker_consume_payload(&peer->p_cookie, 1381*744bfb21SJohn Baldwin cook->nonce, cook->ec) == 0) { 1382*744bfb21SJohn Baldwin DPRINTF(sc, "Receiving cookie response\n"); 1383*744bfb21SJohn Baldwin } else { 1384*744bfb21SJohn Baldwin DPRINTF(sc, "Could not decrypt cookie response\n"); 1385*744bfb21SJohn Baldwin goto error; 1386*744bfb21SJohn Baldwin } 1387*744bfb21SJohn Baldwin 1388*744bfb21SJohn Baldwin goto not_authenticated; 1389*744bfb21SJohn Baldwin default: 1390*744bfb21SJohn Baldwin panic("invalid packet in handshake queue"); 1391*744bfb21SJohn Baldwin } 1392*744bfb21SJohn Baldwin 1393*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_received(peer); 1394*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_traversal(peer); 1395*744bfb21SJohn Baldwin 1396*744bfb21SJohn Baldwin not_authenticated: 1397*744bfb21SJohn Baldwin counter_u64_add(peer->p_rx_bytes, m->m_pkthdr.len); 1398*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 1399*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 1400*744bfb21SJohn Baldwin error: 1401*744bfb21SJohn Baldwin if (remote != NULL) 1402*744bfb21SJohn Baldwin noise_remote_put(remote); 1403*744bfb21SJohn Baldwin wg_packet_free(pkt); 1404*744bfb21SJohn Baldwin } 1405*744bfb21SJohn Baldwin 1406*744bfb21SJohn Baldwin static void 1407*744bfb21SJohn Baldwin wg_softc_handshake_receive(struct wg_softc *sc) 1408*744bfb21SJohn Baldwin { 1409*744bfb21SJohn Baldwin struct wg_packet *pkt; 1410*744bfb21SJohn Baldwin while ((pkt = wg_queue_dequeue_handshake(&sc->sc_handshake_queue)) != NULL) 1411*744bfb21SJohn Baldwin wg_handshake(sc, pkt); 1412*744bfb21SJohn Baldwin } 1413*744bfb21SJohn Baldwin 1414*744bfb21SJohn Baldwin static void 1415*744bfb21SJohn Baldwin wg_mbuf_reset(struct mbuf *m) 1416*744bfb21SJohn Baldwin { 1417*744bfb21SJohn Baldwin 1418*744bfb21SJohn Baldwin struct m_tag *t, *tmp; 1419*744bfb21SJohn Baldwin 1420*744bfb21SJohn Baldwin /* 1421*744bfb21SJohn Baldwin * We want to reset the mbuf to a newly allocated state, containing 1422*744bfb21SJohn Baldwin * just the packet contents. Unfortunately FreeBSD doesn't seem to 1423*744bfb21SJohn Baldwin * offer this anywhere, so we have to make it up as we go. If we can 1424*744bfb21SJohn Baldwin * get this in kern/kern_mbuf.c, that would be best. 1425*744bfb21SJohn Baldwin * 1426*744bfb21SJohn Baldwin * Notice: this may break things unexpectedly but it is better to fail 1427*744bfb21SJohn Baldwin * closed in the extreme case than leak informtion in every 1428*744bfb21SJohn Baldwin * case. 1429*744bfb21SJohn Baldwin * 1430*744bfb21SJohn Baldwin * With that said, all this attempts to do is remove any extraneous 1431*744bfb21SJohn Baldwin * information that could be present. 1432*744bfb21SJohn Baldwin */ 1433*744bfb21SJohn Baldwin 1434*744bfb21SJohn Baldwin M_ASSERTPKTHDR(m); 1435*744bfb21SJohn Baldwin 1436*744bfb21SJohn Baldwin m->m_flags &= ~(M_BCAST|M_MCAST|M_VLANTAG|M_PROMISC|M_PROTOFLAGS); 1437*744bfb21SJohn Baldwin 1438*744bfb21SJohn Baldwin M_HASHTYPE_CLEAR(m); 1439*744bfb21SJohn Baldwin #ifdef NUMA 1440*744bfb21SJohn Baldwin m->m_pkthdr.numa_domain = M_NODOM; 1441*744bfb21SJohn Baldwin #endif 1442*744bfb21SJohn Baldwin SLIST_FOREACH_SAFE(t, &m->m_pkthdr.tags, m_tag_link, tmp) { 1443*744bfb21SJohn Baldwin if ((t->m_tag_id != 0 || t->m_tag_cookie != MTAG_WGLOOP) && 1444*744bfb21SJohn Baldwin t->m_tag_id != PACKET_TAG_MACLABEL) 1445*744bfb21SJohn Baldwin m_tag_delete(m, t); 1446*744bfb21SJohn Baldwin } 1447*744bfb21SJohn Baldwin 1448*744bfb21SJohn Baldwin KASSERT((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0, 1449*744bfb21SJohn Baldwin ("%s: mbuf %p has a send tag", __func__, m)); 1450*744bfb21SJohn Baldwin 1451*744bfb21SJohn Baldwin m->m_pkthdr.csum_flags = 0; 1452*744bfb21SJohn Baldwin m->m_pkthdr.PH_per.sixtyfour[0] = 0; 1453*744bfb21SJohn Baldwin m->m_pkthdr.PH_loc.sixtyfour[0] = 0; 1454*744bfb21SJohn Baldwin } 1455*744bfb21SJohn Baldwin 1456*744bfb21SJohn Baldwin static inline unsigned int 1457*744bfb21SJohn Baldwin calculate_padding(struct wg_packet *pkt) 1458*744bfb21SJohn Baldwin { 1459*744bfb21SJohn Baldwin unsigned int padded_size, last_unit = pkt->p_mbuf->m_pkthdr.len; 1460*744bfb21SJohn Baldwin 1461*744bfb21SJohn Baldwin if (__predict_false(!pkt->p_mtu)) 1462*744bfb21SJohn Baldwin return (last_unit + (WG_PKT_PADDING - 1)) & ~(WG_PKT_PADDING - 1); 1463*744bfb21SJohn Baldwin 1464*744bfb21SJohn Baldwin if (__predict_false(last_unit > pkt->p_mtu)) 1465*744bfb21SJohn Baldwin last_unit %= pkt->p_mtu; 1466*744bfb21SJohn Baldwin 1467*744bfb21SJohn Baldwin padded_size = (last_unit + (WG_PKT_PADDING - 1)) & ~(WG_PKT_PADDING - 1); 1468*744bfb21SJohn Baldwin if (pkt->p_mtu < padded_size) 1469*744bfb21SJohn Baldwin padded_size = pkt->p_mtu; 1470*744bfb21SJohn Baldwin return padded_size - last_unit; 1471*744bfb21SJohn Baldwin } 1472*744bfb21SJohn Baldwin 1473*744bfb21SJohn Baldwin static void 1474*744bfb21SJohn Baldwin wg_encrypt(struct wg_softc *sc, struct wg_packet *pkt) 1475*744bfb21SJohn Baldwin { 1476*744bfb21SJohn Baldwin static const uint8_t padding[WG_PKT_PADDING] = { 0 }; 1477*744bfb21SJohn Baldwin struct wg_pkt_data *data; 1478*744bfb21SJohn Baldwin struct wg_peer *peer; 1479*744bfb21SJohn Baldwin struct noise_remote *remote; 1480*744bfb21SJohn Baldwin struct mbuf *m; 1481*744bfb21SJohn Baldwin uint32_t idx; 1482*744bfb21SJohn Baldwin unsigned int padlen; 1483*744bfb21SJohn Baldwin enum wg_ring_state state = WG_PACKET_DEAD; 1484*744bfb21SJohn Baldwin 1485*744bfb21SJohn Baldwin remote = noise_keypair_remote(pkt->p_keypair); 1486*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 1487*744bfb21SJohn Baldwin m = pkt->p_mbuf; 1488*744bfb21SJohn Baldwin 1489*744bfb21SJohn Baldwin /* Pad the packet */ 1490*744bfb21SJohn Baldwin padlen = calculate_padding(pkt); 1491*744bfb21SJohn Baldwin if (padlen != 0 && !m_append(m, padlen, padding)) 1492*744bfb21SJohn Baldwin goto out; 1493*744bfb21SJohn Baldwin 1494*744bfb21SJohn Baldwin /* Do encryption */ 1495*744bfb21SJohn Baldwin if (noise_keypair_encrypt(pkt->p_keypair, &idx, pkt->p_nonce, m) != 0) 1496*744bfb21SJohn Baldwin goto out; 1497*744bfb21SJohn Baldwin 1498*744bfb21SJohn Baldwin /* Put header into packet */ 1499*744bfb21SJohn Baldwin M_PREPEND(m, sizeof(struct wg_pkt_data), M_NOWAIT); 1500*744bfb21SJohn Baldwin if (m == NULL) 1501*744bfb21SJohn Baldwin goto out; 1502*744bfb21SJohn Baldwin data = mtod(m, struct wg_pkt_data *); 1503*744bfb21SJohn Baldwin data->t = WG_PKT_DATA; 1504*744bfb21SJohn Baldwin data->r_idx = idx; 1505*744bfb21SJohn Baldwin data->nonce = htole64(pkt->p_nonce); 1506*744bfb21SJohn Baldwin 1507*744bfb21SJohn Baldwin wg_mbuf_reset(m); 1508*744bfb21SJohn Baldwin state = WG_PACKET_CRYPTED; 1509*744bfb21SJohn Baldwin out: 1510*744bfb21SJohn Baldwin pkt->p_mbuf = m; 1511*744bfb21SJohn Baldwin wmb(); 1512*744bfb21SJohn Baldwin pkt->p_state = state; 1513*744bfb21SJohn Baldwin GROUPTASK_ENQUEUE(&peer->p_send); 1514*744bfb21SJohn Baldwin noise_remote_put(remote); 1515*744bfb21SJohn Baldwin } 1516*744bfb21SJohn Baldwin 1517*744bfb21SJohn Baldwin static void 1518*744bfb21SJohn Baldwin wg_decrypt(struct wg_softc *sc, struct wg_packet *pkt) 1519*744bfb21SJohn Baldwin { 1520*744bfb21SJohn Baldwin struct wg_peer *peer, *allowed_peer; 1521*744bfb21SJohn Baldwin struct noise_remote *remote; 1522*744bfb21SJohn Baldwin struct mbuf *m; 1523*744bfb21SJohn Baldwin int len; 1524*744bfb21SJohn Baldwin enum wg_ring_state state = WG_PACKET_DEAD; 1525*744bfb21SJohn Baldwin 1526*744bfb21SJohn Baldwin remote = noise_keypair_remote(pkt->p_keypair); 1527*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 1528*744bfb21SJohn Baldwin m = pkt->p_mbuf; 1529*744bfb21SJohn Baldwin 1530*744bfb21SJohn Baldwin /* Read nonce and then adjust to remove the header. */ 1531*744bfb21SJohn Baldwin pkt->p_nonce = le64toh(mtod(m, struct wg_pkt_data *)->nonce); 1532*744bfb21SJohn Baldwin m_adj(m, sizeof(struct wg_pkt_data)); 1533*744bfb21SJohn Baldwin 1534*744bfb21SJohn Baldwin if (noise_keypair_decrypt(pkt->p_keypair, pkt->p_nonce, m) != 0) 1535*744bfb21SJohn Baldwin goto out; 1536*744bfb21SJohn Baldwin 1537*744bfb21SJohn Baldwin /* A packet with length 0 is a keepalive packet */ 1538*744bfb21SJohn Baldwin if (__predict_false(m->m_pkthdr.len == 0)) { 1539*744bfb21SJohn Baldwin DPRINTF(sc, "Receiving keepalive packet from peer " 1540*744bfb21SJohn Baldwin "%" PRIu64 "\n", peer->p_id); 1541*744bfb21SJohn Baldwin state = WG_PACKET_CRYPTED; 1542*744bfb21SJohn Baldwin goto out; 1543*744bfb21SJohn Baldwin } 1544*744bfb21SJohn Baldwin 1545*744bfb21SJohn Baldwin /* 1546*744bfb21SJohn Baldwin * We can let the network stack handle the intricate validation of the 1547*744bfb21SJohn Baldwin * IP header, we just worry about the sizeof and the version, so we can 1548*744bfb21SJohn Baldwin * read the source address in wg_aip_lookup. 1549*744bfb21SJohn Baldwin */ 1550*744bfb21SJohn Baldwin 1551*744bfb21SJohn Baldwin if (determine_af_and_pullup(&m, &pkt->p_af) == 0) { 1552*744bfb21SJohn Baldwin if (pkt->p_af == AF_INET) { 1553*744bfb21SJohn Baldwin struct ip *ip = mtod(m, struct ip *); 1554*744bfb21SJohn Baldwin allowed_peer = wg_aip_lookup(sc, AF_INET, &ip->ip_src); 1555*744bfb21SJohn Baldwin len = ntohs(ip->ip_len); 1556*744bfb21SJohn Baldwin if (len >= sizeof(struct ip) && len < m->m_pkthdr.len) 1557*744bfb21SJohn Baldwin m_adj(m, len - m->m_pkthdr.len); 1558*744bfb21SJohn Baldwin } else if (pkt->p_af == AF_INET6) { 1559*744bfb21SJohn Baldwin struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 1560*744bfb21SJohn Baldwin allowed_peer = wg_aip_lookup(sc, AF_INET6, &ip6->ip6_src); 1561*744bfb21SJohn Baldwin len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); 1562*744bfb21SJohn Baldwin if (len < m->m_pkthdr.len) 1563*744bfb21SJohn Baldwin m_adj(m, len - m->m_pkthdr.len); 1564*744bfb21SJohn Baldwin } else 1565*744bfb21SJohn Baldwin panic("determine_af_and_pullup returned unexpected value"); 1566*744bfb21SJohn Baldwin } else { 1567*744bfb21SJohn Baldwin DPRINTF(sc, "Packet is neither ipv4 nor ipv6 from peer %" PRIu64 "\n", peer->p_id); 1568*744bfb21SJohn Baldwin goto out; 1569*744bfb21SJohn Baldwin } 1570*744bfb21SJohn Baldwin 1571*744bfb21SJohn Baldwin /* We only want to compare the address, not dereference, so drop the ref. */ 1572*744bfb21SJohn Baldwin if (allowed_peer != NULL) 1573*744bfb21SJohn Baldwin noise_remote_put(allowed_peer->p_remote); 1574*744bfb21SJohn Baldwin 1575*744bfb21SJohn Baldwin if (__predict_false(peer != allowed_peer)) { 1576*744bfb21SJohn Baldwin DPRINTF(sc, "Packet has unallowed src IP from peer %" PRIu64 "\n", peer->p_id); 1577*744bfb21SJohn Baldwin goto out; 1578*744bfb21SJohn Baldwin } 1579*744bfb21SJohn Baldwin 1580*744bfb21SJohn Baldwin wg_mbuf_reset(m); 1581*744bfb21SJohn Baldwin state = WG_PACKET_CRYPTED; 1582*744bfb21SJohn Baldwin out: 1583*744bfb21SJohn Baldwin pkt->p_mbuf = m; 1584*744bfb21SJohn Baldwin wmb(); 1585*744bfb21SJohn Baldwin pkt->p_state = state; 1586*744bfb21SJohn Baldwin GROUPTASK_ENQUEUE(&peer->p_recv); 1587*744bfb21SJohn Baldwin noise_remote_put(remote); 1588*744bfb21SJohn Baldwin } 1589*744bfb21SJohn Baldwin 1590*744bfb21SJohn Baldwin static void 1591*744bfb21SJohn Baldwin wg_softc_decrypt(struct wg_softc *sc) 1592*744bfb21SJohn Baldwin { 1593*744bfb21SJohn Baldwin struct wg_packet *pkt; 1594*744bfb21SJohn Baldwin 1595*744bfb21SJohn Baldwin while ((pkt = wg_queue_dequeue_parallel(&sc->sc_decrypt_parallel)) != NULL) 1596*744bfb21SJohn Baldwin wg_decrypt(sc, pkt); 1597*744bfb21SJohn Baldwin } 1598*744bfb21SJohn Baldwin 1599*744bfb21SJohn Baldwin static void 1600*744bfb21SJohn Baldwin wg_softc_encrypt(struct wg_softc *sc) 1601*744bfb21SJohn Baldwin { 1602*744bfb21SJohn Baldwin struct wg_packet *pkt; 1603*744bfb21SJohn Baldwin 1604*744bfb21SJohn Baldwin while ((pkt = wg_queue_dequeue_parallel(&sc->sc_encrypt_parallel)) != NULL) 1605*744bfb21SJohn Baldwin wg_encrypt(sc, pkt); 1606*744bfb21SJohn Baldwin } 1607*744bfb21SJohn Baldwin 1608*744bfb21SJohn Baldwin static void 1609*744bfb21SJohn Baldwin wg_encrypt_dispatch(struct wg_softc *sc) 1610*744bfb21SJohn Baldwin { 1611*744bfb21SJohn Baldwin /* 1612*744bfb21SJohn Baldwin * The update to encrypt_last_cpu is racey such that we may 1613*744bfb21SJohn Baldwin * reschedule the task for the same CPU multiple times, but 1614*744bfb21SJohn Baldwin * the race doesn't really matter. 1615*744bfb21SJohn Baldwin */ 1616*744bfb21SJohn Baldwin u_int cpu = (sc->sc_encrypt_last_cpu + 1) % mp_ncpus; 1617*744bfb21SJohn Baldwin sc->sc_encrypt_last_cpu = cpu; 1618*744bfb21SJohn Baldwin GROUPTASK_ENQUEUE(&sc->sc_encrypt[cpu]); 1619*744bfb21SJohn Baldwin } 1620*744bfb21SJohn Baldwin 1621*744bfb21SJohn Baldwin static void 1622*744bfb21SJohn Baldwin wg_decrypt_dispatch(struct wg_softc *sc) 1623*744bfb21SJohn Baldwin { 1624*744bfb21SJohn Baldwin u_int cpu = (sc->sc_decrypt_last_cpu + 1) % mp_ncpus; 1625*744bfb21SJohn Baldwin sc->sc_decrypt_last_cpu = cpu; 1626*744bfb21SJohn Baldwin GROUPTASK_ENQUEUE(&sc->sc_decrypt[cpu]); 1627*744bfb21SJohn Baldwin } 1628*744bfb21SJohn Baldwin 1629*744bfb21SJohn Baldwin static void 1630*744bfb21SJohn Baldwin wg_deliver_out(struct wg_peer *peer) 1631*744bfb21SJohn Baldwin { 1632*744bfb21SJohn Baldwin struct wg_endpoint endpoint; 1633*744bfb21SJohn Baldwin struct wg_softc *sc = peer->p_sc; 1634*744bfb21SJohn Baldwin struct wg_packet *pkt; 1635*744bfb21SJohn Baldwin struct mbuf *m; 1636*744bfb21SJohn Baldwin int rc, len; 1637*744bfb21SJohn Baldwin 1638*744bfb21SJohn Baldwin wg_peer_get_endpoint(peer, &endpoint); 1639*744bfb21SJohn Baldwin 1640*744bfb21SJohn Baldwin while ((pkt = wg_queue_dequeue_serial(&peer->p_encrypt_serial)) != NULL) { 1641*744bfb21SJohn Baldwin if (pkt->p_state != WG_PACKET_CRYPTED) 1642*744bfb21SJohn Baldwin goto error; 1643*744bfb21SJohn Baldwin 1644*744bfb21SJohn Baldwin m = pkt->p_mbuf; 1645*744bfb21SJohn Baldwin pkt->p_mbuf = NULL; 1646*744bfb21SJohn Baldwin 1647*744bfb21SJohn Baldwin len = m->m_pkthdr.len; 1648*744bfb21SJohn Baldwin 1649*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_traversal(peer); 1650*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_sent(peer); 1651*744bfb21SJohn Baldwin rc = wg_send(sc, &endpoint, m); 1652*744bfb21SJohn Baldwin if (rc == 0) { 1653*744bfb21SJohn Baldwin if (len > (sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN)) 1654*744bfb21SJohn Baldwin wg_timers_event_data_sent(peer); 1655*744bfb21SJohn Baldwin counter_u64_add(peer->p_tx_bytes, len); 1656*744bfb21SJohn Baldwin } else if (rc == EADDRNOTAVAIL) { 1657*744bfb21SJohn Baldwin wg_peer_clear_src(peer); 1658*744bfb21SJohn Baldwin wg_peer_get_endpoint(peer, &endpoint); 1659*744bfb21SJohn Baldwin goto error; 1660*744bfb21SJohn Baldwin } else { 1661*744bfb21SJohn Baldwin goto error; 1662*744bfb21SJohn Baldwin } 1663*744bfb21SJohn Baldwin wg_packet_free(pkt); 1664*744bfb21SJohn Baldwin if (noise_keep_key_fresh_send(peer->p_remote)) 1665*744bfb21SJohn Baldwin wg_timers_event_want_initiation(peer); 1666*744bfb21SJohn Baldwin continue; 1667*744bfb21SJohn Baldwin error: 1668*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 1669*744bfb21SJohn Baldwin wg_packet_free(pkt); 1670*744bfb21SJohn Baldwin } 1671*744bfb21SJohn Baldwin } 1672*744bfb21SJohn Baldwin 1673*744bfb21SJohn Baldwin static void 1674*744bfb21SJohn Baldwin wg_deliver_in(struct wg_peer *peer) 1675*744bfb21SJohn Baldwin { 1676*744bfb21SJohn Baldwin struct wg_softc *sc = peer->p_sc; 1677*744bfb21SJohn Baldwin struct ifnet *ifp = sc->sc_ifp; 1678*744bfb21SJohn Baldwin struct wg_packet *pkt; 1679*744bfb21SJohn Baldwin struct mbuf *m; 1680*744bfb21SJohn Baldwin struct epoch_tracker et; 1681*744bfb21SJohn Baldwin 1682*744bfb21SJohn Baldwin while ((pkt = wg_queue_dequeue_serial(&peer->p_decrypt_serial)) != NULL) { 1683*744bfb21SJohn Baldwin if (pkt->p_state != WG_PACKET_CRYPTED) 1684*744bfb21SJohn Baldwin goto error; 1685*744bfb21SJohn Baldwin 1686*744bfb21SJohn Baldwin m = pkt->p_mbuf; 1687*744bfb21SJohn Baldwin if (noise_keypair_nonce_check(pkt->p_keypair, pkt->p_nonce) != 0) 1688*744bfb21SJohn Baldwin goto error; 1689*744bfb21SJohn Baldwin 1690*744bfb21SJohn Baldwin if (noise_keypair_received_with(pkt->p_keypair) == ECONNRESET) 1691*744bfb21SJohn Baldwin wg_timers_event_handshake_complete(peer); 1692*744bfb21SJohn Baldwin 1693*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_received(peer); 1694*744bfb21SJohn Baldwin wg_timers_event_any_authenticated_packet_traversal(peer); 1695*744bfb21SJohn Baldwin wg_peer_set_endpoint(peer, &pkt->p_endpoint); 1696*744bfb21SJohn Baldwin 1697*744bfb21SJohn Baldwin counter_u64_add(peer->p_rx_bytes, m->m_pkthdr.len + 1698*744bfb21SJohn Baldwin sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN); 1699*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 1700*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len + 1701*744bfb21SJohn Baldwin sizeof(struct wg_pkt_data) + NOISE_AUTHTAG_LEN); 1702*744bfb21SJohn Baldwin 1703*744bfb21SJohn Baldwin if (m->m_pkthdr.len == 0) 1704*744bfb21SJohn Baldwin goto done; 1705*744bfb21SJohn Baldwin 1706*744bfb21SJohn Baldwin MPASS(pkt->p_af == AF_INET || pkt->p_af == AF_INET6); 1707*744bfb21SJohn Baldwin pkt->p_mbuf = NULL; 1708*744bfb21SJohn Baldwin 1709*744bfb21SJohn Baldwin m->m_pkthdr.rcvif = ifp; 1710*744bfb21SJohn Baldwin 1711*744bfb21SJohn Baldwin NET_EPOCH_ENTER(et); 1712*744bfb21SJohn Baldwin BPF_MTAP2_AF(ifp, m, pkt->p_af); 1713*744bfb21SJohn Baldwin 1714*744bfb21SJohn Baldwin CURVNET_SET(ifp->if_vnet); 1715*744bfb21SJohn Baldwin M_SETFIB(m, ifp->if_fib); 1716*744bfb21SJohn Baldwin if (pkt->p_af == AF_INET) 1717*744bfb21SJohn Baldwin netisr_dispatch(NETISR_IP, m); 1718*744bfb21SJohn Baldwin if (pkt->p_af == AF_INET6) 1719*744bfb21SJohn Baldwin netisr_dispatch(NETISR_IPV6, m); 1720*744bfb21SJohn Baldwin CURVNET_RESTORE(); 1721*744bfb21SJohn Baldwin NET_EPOCH_EXIT(et); 1722*744bfb21SJohn Baldwin 1723*744bfb21SJohn Baldwin wg_timers_event_data_received(peer); 1724*744bfb21SJohn Baldwin 1725*744bfb21SJohn Baldwin done: 1726*744bfb21SJohn Baldwin if (noise_keep_key_fresh_recv(peer->p_remote)) 1727*744bfb21SJohn Baldwin wg_timers_event_want_initiation(peer); 1728*744bfb21SJohn Baldwin wg_packet_free(pkt); 1729*744bfb21SJohn Baldwin continue; 1730*744bfb21SJohn Baldwin error: 1731*744bfb21SJohn Baldwin if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1732*744bfb21SJohn Baldwin wg_packet_free(pkt); 1733*744bfb21SJohn Baldwin } 1734*744bfb21SJohn Baldwin } 1735*744bfb21SJohn Baldwin 1736*744bfb21SJohn Baldwin static struct wg_packet * 1737*744bfb21SJohn Baldwin wg_packet_alloc(struct mbuf *m) 1738*744bfb21SJohn Baldwin { 1739*744bfb21SJohn Baldwin struct wg_packet *pkt; 1740*744bfb21SJohn Baldwin 1741*744bfb21SJohn Baldwin if ((pkt = uma_zalloc(wg_packet_zone, M_NOWAIT | M_ZERO)) == NULL) 1742*744bfb21SJohn Baldwin return (NULL); 1743*744bfb21SJohn Baldwin pkt->p_mbuf = m; 1744*744bfb21SJohn Baldwin return (pkt); 1745*744bfb21SJohn Baldwin } 1746*744bfb21SJohn Baldwin 1747*744bfb21SJohn Baldwin static void 1748*744bfb21SJohn Baldwin wg_packet_free(struct wg_packet *pkt) 1749*744bfb21SJohn Baldwin { 1750*744bfb21SJohn Baldwin if (pkt->p_keypair != NULL) 1751*744bfb21SJohn Baldwin noise_keypair_put(pkt->p_keypair); 1752*744bfb21SJohn Baldwin if (pkt->p_mbuf != NULL) 1753*744bfb21SJohn Baldwin m_freem(pkt->p_mbuf); 1754*744bfb21SJohn Baldwin uma_zfree(wg_packet_zone, pkt); 1755*744bfb21SJohn Baldwin } 1756*744bfb21SJohn Baldwin 1757*744bfb21SJohn Baldwin static void 1758*744bfb21SJohn Baldwin wg_queue_init(struct wg_queue *queue, const char *name) 1759*744bfb21SJohn Baldwin { 1760*744bfb21SJohn Baldwin mtx_init(&queue->q_mtx, name, NULL, MTX_DEF); 1761*744bfb21SJohn Baldwin STAILQ_INIT(&queue->q_queue); 1762*744bfb21SJohn Baldwin queue->q_len = 0; 1763*744bfb21SJohn Baldwin } 1764*744bfb21SJohn Baldwin 1765*744bfb21SJohn Baldwin static void 1766*744bfb21SJohn Baldwin wg_queue_deinit(struct wg_queue *queue) 1767*744bfb21SJohn Baldwin { 1768*744bfb21SJohn Baldwin wg_queue_purge(queue); 1769*744bfb21SJohn Baldwin mtx_destroy(&queue->q_mtx); 1770*744bfb21SJohn Baldwin } 1771*744bfb21SJohn Baldwin 1772*744bfb21SJohn Baldwin static size_t 1773*744bfb21SJohn Baldwin wg_queue_len(struct wg_queue *queue) 1774*744bfb21SJohn Baldwin { 1775*744bfb21SJohn Baldwin return (queue->q_len); 1776*744bfb21SJohn Baldwin } 1777*744bfb21SJohn Baldwin 1778*744bfb21SJohn Baldwin static int 1779*744bfb21SJohn Baldwin wg_queue_enqueue_handshake(struct wg_queue *hs, struct wg_packet *pkt) 1780*744bfb21SJohn Baldwin { 1781*744bfb21SJohn Baldwin int ret = 0; 1782*744bfb21SJohn Baldwin mtx_lock(&hs->q_mtx); 1783*744bfb21SJohn Baldwin if (hs->q_len < MAX_QUEUED_HANDSHAKES) { 1784*744bfb21SJohn Baldwin STAILQ_INSERT_TAIL(&hs->q_queue, pkt, p_parallel); 1785*744bfb21SJohn Baldwin hs->q_len++; 1786*744bfb21SJohn Baldwin } else { 1787*744bfb21SJohn Baldwin ret = ENOBUFS; 1788*744bfb21SJohn Baldwin } 1789*744bfb21SJohn Baldwin mtx_unlock(&hs->q_mtx); 1790*744bfb21SJohn Baldwin if (ret != 0) 1791*744bfb21SJohn Baldwin wg_packet_free(pkt); 1792*744bfb21SJohn Baldwin return (ret); 1793*744bfb21SJohn Baldwin } 1794*744bfb21SJohn Baldwin 1795*744bfb21SJohn Baldwin static struct wg_packet * 1796*744bfb21SJohn Baldwin wg_queue_dequeue_handshake(struct wg_queue *hs) 1797*744bfb21SJohn Baldwin { 1798*744bfb21SJohn Baldwin struct wg_packet *pkt; 1799*744bfb21SJohn Baldwin mtx_lock(&hs->q_mtx); 1800*744bfb21SJohn Baldwin if ((pkt = STAILQ_FIRST(&hs->q_queue)) != NULL) { 1801*744bfb21SJohn Baldwin STAILQ_REMOVE_HEAD(&hs->q_queue, p_parallel); 1802*744bfb21SJohn Baldwin hs->q_len--; 1803*744bfb21SJohn Baldwin } 1804*744bfb21SJohn Baldwin mtx_unlock(&hs->q_mtx); 1805*744bfb21SJohn Baldwin return (pkt); 1806*744bfb21SJohn Baldwin } 1807*744bfb21SJohn Baldwin 1808*744bfb21SJohn Baldwin static void 1809*744bfb21SJohn Baldwin wg_queue_push_staged(struct wg_queue *staged, struct wg_packet *pkt) 1810*744bfb21SJohn Baldwin { 1811*744bfb21SJohn Baldwin struct wg_packet *old = NULL; 1812*744bfb21SJohn Baldwin 1813*744bfb21SJohn Baldwin mtx_lock(&staged->q_mtx); 1814*744bfb21SJohn Baldwin if (staged->q_len >= MAX_STAGED_PKT) { 1815*744bfb21SJohn Baldwin old = STAILQ_FIRST(&staged->q_queue); 1816*744bfb21SJohn Baldwin STAILQ_REMOVE_HEAD(&staged->q_queue, p_parallel); 1817*744bfb21SJohn Baldwin staged->q_len--; 1818*744bfb21SJohn Baldwin } 1819*744bfb21SJohn Baldwin STAILQ_INSERT_TAIL(&staged->q_queue, pkt, p_parallel); 1820*744bfb21SJohn Baldwin staged->q_len++; 1821*744bfb21SJohn Baldwin mtx_unlock(&staged->q_mtx); 1822*744bfb21SJohn Baldwin 1823*744bfb21SJohn Baldwin if (old != NULL) 1824*744bfb21SJohn Baldwin wg_packet_free(old); 1825*744bfb21SJohn Baldwin } 1826*744bfb21SJohn Baldwin 1827*744bfb21SJohn Baldwin static void 1828*744bfb21SJohn Baldwin wg_queue_enlist_staged(struct wg_queue *staged, struct wg_packet_list *list) 1829*744bfb21SJohn Baldwin { 1830*744bfb21SJohn Baldwin struct wg_packet *pkt, *tpkt; 1831*744bfb21SJohn Baldwin STAILQ_FOREACH_SAFE(pkt, list, p_parallel, tpkt) 1832*744bfb21SJohn Baldwin wg_queue_push_staged(staged, pkt); 1833*744bfb21SJohn Baldwin } 1834*744bfb21SJohn Baldwin 1835*744bfb21SJohn Baldwin static void 1836*744bfb21SJohn Baldwin wg_queue_delist_staged(struct wg_queue *staged, struct wg_packet_list *list) 1837*744bfb21SJohn Baldwin { 1838*744bfb21SJohn Baldwin STAILQ_INIT(list); 1839*744bfb21SJohn Baldwin mtx_lock(&staged->q_mtx); 1840*744bfb21SJohn Baldwin STAILQ_CONCAT(list, &staged->q_queue); 1841*744bfb21SJohn Baldwin staged->q_len = 0; 1842*744bfb21SJohn Baldwin mtx_unlock(&staged->q_mtx); 1843*744bfb21SJohn Baldwin } 1844*744bfb21SJohn Baldwin 1845*744bfb21SJohn Baldwin static void 1846*744bfb21SJohn Baldwin wg_queue_purge(struct wg_queue *staged) 1847*744bfb21SJohn Baldwin { 1848*744bfb21SJohn Baldwin struct wg_packet_list list; 1849*744bfb21SJohn Baldwin struct wg_packet *pkt, *tpkt; 1850*744bfb21SJohn Baldwin wg_queue_delist_staged(staged, &list); 1851*744bfb21SJohn Baldwin STAILQ_FOREACH_SAFE(pkt, &list, p_parallel, tpkt) 1852*744bfb21SJohn Baldwin wg_packet_free(pkt); 1853*744bfb21SJohn Baldwin } 1854*744bfb21SJohn Baldwin 1855*744bfb21SJohn Baldwin static int 1856*744bfb21SJohn Baldwin wg_queue_both(struct wg_queue *parallel, struct wg_queue *serial, struct wg_packet *pkt) 1857*744bfb21SJohn Baldwin { 1858*744bfb21SJohn Baldwin pkt->p_state = WG_PACKET_UNCRYPTED; 1859*744bfb21SJohn Baldwin 1860*744bfb21SJohn Baldwin mtx_lock(&serial->q_mtx); 1861*744bfb21SJohn Baldwin if (serial->q_len < MAX_QUEUED_PKT) { 1862*744bfb21SJohn Baldwin serial->q_len++; 1863*744bfb21SJohn Baldwin STAILQ_INSERT_TAIL(&serial->q_queue, pkt, p_serial); 1864*744bfb21SJohn Baldwin } else { 1865*744bfb21SJohn Baldwin mtx_unlock(&serial->q_mtx); 1866*744bfb21SJohn Baldwin wg_packet_free(pkt); 1867*744bfb21SJohn Baldwin return (ENOBUFS); 1868*744bfb21SJohn Baldwin } 1869*744bfb21SJohn Baldwin mtx_unlock(&serial->q_mtx); 1870*744bfb21SJohn Baldwin 1871*744bfb21SJohn Baldwin mtx_lock(¶llel->q_mtx); 1872*744bfb21SJohn Baldwin if (parallel->q_len < MAX_QUEUED_PKT) { 1873*744bfb21SJohn Baldwin parallel->q_len++; 1874*744bfb21SJohn Baldwin STAILQ_INSERT_TAIL(¶llel->q_queue, pkt, p_parallel); 1875*744bfb21SJohn Baldwin } else { 1876*744bfb21SJohn Baldwin mtx_unlock(¶llel->q_mtx); 1877*744bfb21SJohn Baldwin pkt->p_state = WG_PACKET_DEAD; 1878*744bfb21SJohn Baldwin return (ENOBUFS); 1879*744bfb21SJohn Baldwin } 1880*744bfb21SJohn Baldwin mtx_unlock(¶llel->q_mtx); 1881*744bfb21SJohn Baldwin 1882*744bfb21SJohn Baldwin return (0); 1883*744bfb21SJohn Baldwin } 1884*744bfb21SJohn Baldwin 1885*744bfb21SJohn Baldwin static struct wg_packet * 1886*744bfb21SJohn Baldwin wg_queue_dequeue_serial(struct wg_queue *serial) 1887*744bfb21SJohn Baldwin { 1888*744bfb21SJohn Baldwin struct wg_packet *pkt = NULL; 1889*744bfb21SJohn Baldwin mtx_lock(&serial->q_mtx); 1890*744bfb21SJohn Baldwin if (serial->q_len > 0 && STAILQ_FIRST(&serial->q_queue)->p_state != WG_PACKET_UNCRYPTED) { 1891*744bfb21SJohn Baldwin serial->q_len--; 1892*744bfb21SJohn Baldwin pkt = STAILQ_FIRST(&serial->q_queue); 1893*744bfb21SJohn Baldwin STAILQ_REMOVE_HEAD(&serial->q_queue, p_serial); 1894*744bfb21SJohn Baldwin } 1895*744bfb21SJohn Baldwin mtx_unlock(&serial->q_mtx); 1896*744bfb21SJohn Baldwin return (pkt); 1897*744bfb21SJohn Baldwin } 1898*744bfb21SJohn Baldwin 1899*744bfb21SJohn Baldwin static struct wg_packet * 1900*744bfb21SJohn Baldwin wg_queue_dequeue_parallel(struct wg_queue *parallel) 1901*744bfb21SJohn Baldwin { 1902*744bfb21SJohn Baldwin struct wg_packet *pkt = NULL; 1903*744bfb21SJohn Baldwin mtx_lock(¶llel->q_mtx); 1904*744bfb21SJohn Baldwin if (parallel->q_len > 0) { 1905*744bfb21SJohn Baldwin parallel->q_len--; 1906*744bfb21SJohn Baldwin pkt = STAILQ_FIRST(¶llel->q_queue); 1907*744bfb21SJohn Baldwin STAILQ_REMOVE_HEAD(¶llel->q_queue, p_parallel); 1908*744bfb21SJohn Baldwin } 1909*744bfb21SJohn Baldwin mtx_unlock(¶llel->q_mtx); 1910*744bfb21SJohn Baldwin return (pkt); 1911*744bfb21SJohn Baldwin } 1912*744bfb21SJohn Baldwin 1913*744bfb21SJohn Baldwin static bool 1914*744bfb21SJohn Baldwin wg_input(struct mbuf *m, int offset, struct inpcb *inpcb, 1915*744bfb21SJohn Baldwin const struct sockaddr *sa, void *_sc) 1916*744bfb21SJohn Baldwin { 1917*744bfb21SJohn Baldwin #ifdef INET 1918*744bfb21SJohn Baldwin const struct sockaddr_in *sin; 1919*744bfb21SJohn Baldwin #endif 1920*744bfb21SJohn Baldwin #ifdef INET6 1921*744bfb21SJohn Baldwin const struct sockaddr_in6 *sin6; 1922*744bfb21SJohn Baldwin #endif 1923*744bfb21SJohn Baldwin struct noise_remote *remote; 1924*744bfb21SJohn Baldwin struct wg_pkt_data *data; 1925*744bfb21SJohn Baldwin struct wg_packet *pkt; 1926*744bfb21SJohn Baldwin struct wg_peer *peer; 1927*744bfb21SJohn Baldwin struct wg_softc *sc = _sc; 1928*744bfb21SJohn Baldwin struct mbuf *defragged; 1929*744bfb21SJohn Baldwin 1930*744bfb21SJohn Baldwin defragged = m_defrag(m, M_NOWAIT); 1931*744bfb21SJohn Baldwin if (defragged) 1932*744bfb21SJohn Baldwin m = defragged; 1933*744bfb21SJohn Baldwin m = m_unshare(m, M_NOWAIT); 1934*744bfb21SJohn Baldwin if (!m) { 1935*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); 1936*744bfb21SJohn Baldwin return true; 1937*744bfb21SJohn Baldwin } 1938*744bfb21SJohn Baldwin 1939*744bfb21SJohn Baldwin /* Caller provided us with `sa`, no need for this header. */ 1940*744bfb21SJohn Baldwin m_adj(m, offset + sizeof(struct udphdr)); 1941*744bfb21SJohn Baldwin 1942*744bfb21SJohn Baldwin /* Pullup enough to read packet type */ 1943*744bfb21SJohn Baldwin if ((m = m_pullup(m, sizeof(uint32_t))) == NULL) { 1944*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); 1945*744bfb21SJohn Baldwin return true; 1946*744bfb21SJohn Baldwin } 1947*744bfb21SJohn Baldwin 1948*744bfb21SJohn Baldwin if ((pkt = wg_packet_alloc(m)) == NULL) { 1949*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); 1950*744bfb21SJohn Baldwin m_freem(m); 1951*744bfb21SJohn Baldwin return true; 1952*744bfb21SJohn Baldwin } 1953*744bfb21SJohn Baldwin 1954*744bfb21SJohn Baldwin /* Save send/recv address and port for later. */ 1955*744bfb21SJohn Baldwin switch (sa->sa_family) { 1956*744bfb21SJohn Baldwin #ifdef INET 1957*744bfb21SJohn Baldwin case AF_INET: 1958*744bfb21SJohn Baldwin sin = (const struct sockaddr_in *)sa; 1959*744bfb21SJohn Baldwin pkt->p_endpoint.e_remote.r_sin = sin[0]; 1960*744bfb21SJohn Baldwin pkt->p_endpoint.e_local.l_in = sin[1].sin_addr; 1961*744bfb21SJohn Baldwin break; 1962*744bfb21SJohn Baldwin #endif 1963*744bfb21SJohn Baldwin #ifdef INET6 1964*744bfb21SJohn Baldwin case AF_INET6: 1965*744bfb21SJohn Baldwin sin6 = (const struct sockaddr_in6 *)sa; 1966*744bfb21SJohn Baldwin pkt->p_endpoint.e_remote.r_sin6 = sin6[0]; 1967*744bfb21SJohn Baldwin pkt->p_endpoint.e_local.l_in6 = sin6[1].sin6_addr; 1968*744bfb21SJohn Baldwin break; 1969*744bfb21SJohn Baldwin #endif 1970*744bfb21SJohn Baldwin default: 1971*744bfb21SJohn Baldwin goto error; 1972*744bfb21SJohn Baldwin } 1973*744bfb21SJohn Baldwin 1974*744bfb21SJohn Baldwin if ((m->m_pkthdr.len == sizeof(struct wg_pkt_initiation) && 1975*744bfb21SJohn Baldwin *mtod(m, uint32_t *) == WG_PKT_INITIATION) || 1976*744bfb21SJohn Baldwin (m->m_pkthdr.len == sizeof(struct wg_pkt_response) && 1977*744bfb21SJohn Baldwin *mtod(m, uint32_t *) == WG_PKT_RESPONSE) || 1978*744bfb21SJohn Baldwin (m->m_pkthdr.len == sizeof(struct wg_pkt_cookie) && 1979*744bfb21SJohn Baldwin *mtod(m, uint32_t *) == WG_PKT_COOKIE)) { 1980*744bfb21SJohn Baldwin 1981*744bfb21SJohn Baldwin if (wg_queue_enqueue_handshake(&sc->sc_handshake_queue, pkt) != 0) { 1982*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); 1983*744bfb21SJohn Baldwin DPRINTF(sc, "Dropping handshake packet\n"); 1984*744bfb21SJohn Baldwin } 1985*744bfb21SJohn Baldwin GROUPTASK_ENQUEUE(&sc->sc_handshake); 1986*744bfb21SJohn Baldwin } else if (m->m_pkthdr.len >= sizeof(struct wg_pkt_data) + 1987*744bfb21SJohn Baldwin NOISE_AUTHTAG_LEN && *mtod(m, uint32_t *) == WG_PKT_DATA) { 1988*744bfb21SJohn Baldwin 1989*744bfb21SJohn Baldwin /* Pullup whole header to read r_idx below. */ 1990*744bfb21SJohn Baldwin if ((pkt->p_mbuf = m_pullup(m, sizeof(struct wg_pkt_data))) == NULL) 1991*744bfb21SJohn Baldwin goto error; 1992*744bfb21SJohn Baldwin 1993*744bfb21SJohn Baldwin data = mtod(pkt->p_mbuf, struct wg_pkt_data *); 1994*744bfb21SJohn Baldwin if ((pkt->p_keypair = noise_keypair_lookup(sc->sc_local, data->r_idx)) == NULL) 1995*744bfb21SJohn Baldwin goto error; 1996*744bfb21SJohn Baldwin 1997*744bfb21SJohn Baldwin remote = noise_keypair_remote(pkt->p_keypair); 1998*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 1999*744bfb21SJohn Baldwin if (wg_queue_both(&sc->sc_decrypt_parallel, &peer->p_decrypt_serial, pkt) != 0) 2000*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); 2001*744bfb21SJohn Baldwin wg_decrypt_dispatch(sc); 2002*744bfb21SJohn Baldwin noise_remote_put(remote); 2003*744bfb21SJohn Baldwin } else { 2004*744bfb21SJohn Baldwin goto error; 2005*744bfb21SJohn Baldwin } 2006*744bfb21SJohn Baldwin return true; 2007*744bfb21SJohn Baldwin error: 2008*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); 2009*744bfb21SJohn Baldwin wg_packet_free(pkt); 2010*744bfb21SJohn Baldwin return true; 2011*744bfb21SJohn Baldwin } 2012*744bfb21SJohn Baldwin 2013*744bfb21SJohn Baldwin static void 2014*744bfb21SJohn Baldwin wg_peer_send_staged(struct wg_peer *peer) 2015*744bfb21SJohn Baldwin { 2016*744bfb21SJohn Baldwin struct wg_packet_list list; 2017*744bfb21SJohn Baldwin struct noise_keypair *keypair; 2018*744bfb21SJohn Baldwin struct wg_packet *pkt, *tpkt; 2019*744bfb21SJohn Baldwin struct wg_softc *sc = peer->p_sc; 2020*744bfb21SJohn Baldwin 2021*744bfb21SJohn Baldwin wg_queue_delist_staged(&peer->p_stage_queue, &list); 2022*744bfb21SJohn Baldwin 2023*744bfb21SJohn Baldwin if (STAILQ_EMPTY(&list)) 2024*744bfb21SJohn Baldwin return; 2025*744bfb21SJohn Baldwin 2026*744bfb21SJohn Baldwin if ((keypair = noise_keypair_current(peer->p_remote)) == NULL) 2027*744bfb21SJohn Baldwin goto error; 2028*744bfb21SJohn Baldwin 2029*744bfb21SJohn Baldwin STAILQ_FOREACH(pkt, &list, p_parallel) { 2030*744bfb21SJohn Baldwin if (noise_keypair_nonce_next(keypair, &pkt->p_nonce) != 0) 2031*744bfb21SJohn Baldwin goto error_keypair; 2032*744bfb21SJohn Baldwin } 2033*744bfb21SJohn Baldwin STAILQ_FOREACH_SAFE(pkt, &list, p_parallel, tpkt) { 2034*744bfb21SJohn Baldwin pkt->p_keypair = noise_keypair_ref(keypair); 2035*744bfb21SJohn Baldwin if (wg_queue_both(&sc->sc_encrypt_parallel, &peer->p_encrypt_serial, pkt) != 0) 2036*744bfb21SJohn Baldwin if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); 2037*744bfb21SJohn Baldwin } 2038*744bfb21SJohn Baldwin wg_encrypt_dispatch(sc); 2039*744bfb21SJohn Baldwin noise_keypair_put(keypair); 2040*744bfb21SJohn Baldwin return; 2041*744bfb21SJohn Baldwin 2042*744bfb21SJohn Baldwin error_keypair: 2043*744bfb21SJohn Baldwin noise_keypair_put(keypair); 2044*744bfb21SJohn Baldwin error: 2045*744bfb21SJohn Baldwin wg_queue_enlist_staged(&peer->p_stage_queue, &list); 2046*744bfb21SJohn Baldwin wg_timers_event_want_initiation(peer); 2047*744bfb21SJohn Baldwin } 2048*744bfb21SJohn Baldwin 2049*744bfb21SJohn Baldwin static inline void 2050*744bfb21SJohn Baldwin xmit_err(struct ifnet *ifp, struct mbuf *m, struct wg_packet *pkt, sa_family_t af) 2051*744bfb21SJohn Baldwin { 2052*744bfb21SJohn Baldwin if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2053*744bfb21SJohn Baldwin switch (af) { 2054*744bfb21SJohn Baldwin #ifdef INET 2055*744bfb21SJohn Baldwin case AF_INET: 2056*744bfb21SJohn Baldwin icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); 2057*744bfb21SJohn Baldwin if (pkt) 2058*744bfb21SJohn Baldwin pkt->p_mbuf = NULL; 2059*744bfb21SJohn Baldwin m = NULL; 2060*744bfb21SJohn Baldwin break; 2061*744bfb21SJohn Baldwin #endif 2062*744bfb21SJohn Baldwin #ifdef INET6 2063*744bfb21SJohn Baldwin case AF_INET6: 2064*744bfb21SJohn Baldwin icmp6_error(m, ICMP6_DST_UNREACH, 0, 0); 2065*744bfb21SJohn Baldwin if (pkt) 2066*744bfb21SJohn Baldwin pkt->p_mbuf = NULL; 2067*744bfb21SJohn Baldwin m = NULL; 2068*744bfb21SJohn Baldwin break; 2069*744bfb21SJohn Baldwin #endif 2070*744bfb21SJohn Baldwin } 2071*744bfb21SJohn Baldwin if (pkt) 2072*744bfb21SJohn Baldwin wg_packet_free(pkt); 2073*744bfb21SJohn Baldwin else if (m) 2074*744bfb21SJohn Baldwin m_freem(m); 2075*744bfb21SJohn Baldwin } 2076*744bfb21SJohn Baldwin 2077*744bfb21SJohn Baldwin static int 2078*744bfb21SJohn Baldwin wg_xmit(struct ifnet *ifp, struct mbuf *m, sa_family_t af, uint32_t mtu) 2079*744bfb21SJohn Baldwin { 2080*744bfb21SJohn Baldwin struct wg_packet *pkt = NULL; 2081*744bfb21SJohn Baldwin struct wg_softc *sc = ifp->if_softc; 2082*744bfb21SJohn Baldwin struct wg_peer *peer; 2083*744bfb21SJohn Baldwin int rc = 0; 2084*744bfb21SJohn Baldwin sa_family_t peer_af; 2085*744bfb21SJohn Baldwin 2086*744bfb21SJohn Baldwin /* Work around lifetime issue in the ipv6 mld code. */ 2087*744bfb21SJohn Baldwin if (__predict_false((ifp->if_flags & IFF_DYING) || !sc)) { 2088*744bfb21SJohn Baldwin rc = ENXIO; 2089*744bfb21SJohn Baldwin goto err_xmit; 2090*744bfb21SJohn Baldwin } 2091*744bfb21SJohn Baldwin 2092*744bfb21SJohn Baldwin if ((pkt = wg_packet_alloc(m)) == NULL) { 2093*744bfb21SJohn Baldwin rc = ENOBUFS; 2094*744bfb21SJohn Baldwin goto err_xmit; 2095*744bfb21SJohn Baldwin } 2096*744bfb21SJohn Baldwin pkt->p_mtu = mtu; 2097*744bfb21SJohn Baldwin pkt->p_af = af; 2098*744bfb21SJohn Baldwin 2099*744bfb21SJohn Baldwin if (af == AF_INET) { 2100*744bfb21SJohn Baldwin peer = wg_aip_lookup(sc, AF_INET, &mtod(m, struct ip *)->ip_dst); 2101*744bfb21SJohn Baldwin } else if (af == AF_INET6) { 2102*744bfb21SJohn Baldwin peer = wg_aip_lookup(sc, AF_INET6, &mtod(m, struct ip6_hdr *)->ip6_dst); 2103*744bfb21SJohn Baldwin } else { 2104*744bfb21SJohn Baldwin rc = EAFNOSUPPORT; 2105*744bfb21SJohn Baldwin goto err_xmit; 2106*744bfb21SJohn Baldwin } 2107*744bfb21SJohn Baldwin 2108*744bfb21SJohn Baldwin BPF_MTAP2_AF(ifp, m, pkt->p_af); 2109*744bfb21SJohn Baldwin 2110*744bfb21SJohn Baldwin if (__predict_false(peer == NULL)) { 2111*744bfb21SJohn Baldwin rc = ENOKEY; 2112*744bfb21SJohn Baldwin goto err_xmit; 2113*744bfb21SJohn Baldwin } 2114*744bfb21SJohn Baldwin 2115*744bfb21SJohn Baldwin if (__predict_false(if_tunnel_check_nesting(ifp, m, MTAG_WGLOOP, MAX_LOOPS))) { 2116*744bfb21SJohn Baldwin DPRINTF(sc, "Packet looped"); 2117*744bfb21SJohn Baldwin rc = ELOOP; 2118*744bfb21SJohn Baldwin goto err_peer; 2119*744bfb21SJohn Baldwin } 2120*744bfb21SJohn Baldwin 2121*744bfb21SJohn Baldwin peer_af = peer->p_endpoint.e_remote.r_sa.sa_family; 2122*744bfb21SJohn Baldwin if (__predict_false(peer_af != AF_INET && peer_af != AF_INET6)) { 2123*744bfb21SJohn Baldwin DPRINTF(sc, "No valid endpoint has been configured or " 2124*744bfb21SJohn Baldwin "discovered for peer %" PRIu64 "\n", peer->p_id); 2125*744bfb21SJohn Baldwin rc = EHOSTUNREACH; 2126*744bfb21SJohn Baldwin goto err_peer; 2127*744bfb21SJohn Baldwin } 2128*744bfb21SJohn Baldwin 2129*744bfb21SJohn Baldwin wg_queue_push_staged(&peer->p_stage_queue, pkt); 2130*744bfb21SJohn Baldwin wg_peer_send_staged(peer); 2131*744bfb21SJohn Baldwin noise_remote_put(peer->p_remote); 2132*744bfb21SJohn Baldwin return (0); 2133*744bfb21SJohn Baldwin 2134*744bfb21SJohn Baldwin err_peer: 2135*744bfb21SJohn Baldwin noise_remote_put(peer->p_remote); 2136*744bfb21SJohn Baldwin err_xmit: 2137*744bfb21SJohn Baldwin xmit_err(ifp, m, pkt, af); 2138*744bfb21SJohn Baldwin return (rc); 2139*744bfb21SJohn Baldwin } 2140*744bfb21SJohn Baldwin 2141*744bfb21SJohn Baldwin static inline int 2142*744bfb21SJohn Baldwin determine_af_and_pullup(struct mbuf **m, sa_family_t *af) 2143*744bfb21SJohn Baldwin { 2144*744bfb21SJohn Baldwin u_char ipv; 2145*744bfb21SJohn Baldwin if ((*m)->m_pkthdr.len >= sizeof(struct ip6_hdr)) 2146*744bfb21SJohn Baldwin *m = m_pullup(*m, sizeof(struct ip6_hdr)); 2147*744bfb21SJohn Baldwin else if ((*m)->m_pkthdr.len >= sizeof(struct ip)) 2148*744bfb21SJohn Baldwin *m = m_pullup(*m, sizeof(struct ip)); 2149*744bfb21SJohn Baldwin else 2150*744bfb21SJohn Baldwin return (EAFNOSUPPORT); 2151*744bfb21SJohn Baldwin if (*m == NULL) 2152*744bfb21SJohn Baldwin return (ENOBUFS); 2153*744bfb21SJohn Baldwin ipv = mtod(*m, struct ip *)->ip_v; 2154*744bfb21SJohn Baldwin if (ipv == 4) 2155*744bfb21SJohn Baldwin *af = AF_INET; 2156*744bfb21SJohn Baldwin else if (ipv == 6 && (*m)->m_pkthdr.len >= sizeof(struct ip6_hdr)) 2157*744bfb21SJohn Baldwin *af = AF_INET6; 2158*744bfb21SJohn Baldwin else 2159*744bfb21SJohn Baldwin return (EAFNOSUPPORT); 2160*744bfb21SJohn Baldwin return (0); 2161*744bfb21SJohn Baldwin } 2162*744bfb21SJohn Baldwin 2163*744bfb21SJohn Baldwin static int 2164*744bfb21SJohn Baldwin wg_transmit(struct ifnet *ifp, struct mbuf *m) 2165*744bfb21SJohn Baldwin { 2166*744bfb21SJohn Baldwin sa_family_t af; 2167*744bfb21SJohn Baldwin int ret; 2168*744bfb21SJohn Baldwin struct mbuf *defragged; 2169*744bfb21SJohn Baldwin 2170*744bfb21SJohn Baldwin defragged = m_defrag(m, M_NOWAIT); 2171*744bfb21SJohn Baldwin if (defragged) 2172*744bfb21SJohn Baldwin m = defragged; 2173*744bfb21SJohn Baldwin m = m_unshare(m, M_NOWAIT); 2174*744bfb21SJohn Baldwin if (!m) { 2175*744bfb21SJohn Baldwin xmit_err(ifp, m, NULL, AF_UNSPEC); 2176*744bfb21SJohn Baldwin return (ENOBUFS); 2177*744bfb21SJohn Baldwin } 2178*744bfb21SJohn Baldwin 2179*744bfb21SJohn Baldwin ret = determine_af_and_pullup(&m, &af); 2180*744bfb21SJohn Baldwin if (ret) { 2181*744bfb21SJohn Baldwin xmit_err(ifp, m, NULL, AF_UNSPEC); 2182*744bfb21SJohn Baldwin return (ret); 2183*744bfb21SJohn Baldwin } 2184*744bfb21SJohn Baldwin return (wg_xmit(ifp, m, af, ifp->if_mtu)); 2185*744bfb21SJohn Baldwin } 2186*744bfb21SJohn Baldwin 2187*744bfb21SJohn Baldwin static int 2188*744bfb21SJohn Baldwin wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) 2189*744bfb21SJohn Baldwin { 2190*744bfb21SJohn Baldwin sa_family_t parsed_af; 2191*744bfb21SJohn Baldwin uint32_t af, mtu; 2192*744bfb21SJohn Baldwin int ret; 2193*744bfb21SJohn Baldwin struct mbuf *defragged; 2194*744bfb21SJohn Baldwin 2195*744bfb21SJohn Baldwin if (dst->sa_family == AF_UNSPEC) 2196*744bfb21SJohn Baldwin memcpy(&af, dst->sa_data, sizeof(af)); 2197*744bfb21SJohn Baldwin else 2198*744bfb21SJohn Baldwin af = dst->sa_family; 2199*744bfb21SJohn Baldwin if (af == AF_UNSPEC) { 2200*744bfb21SJohn Baldwin xmit_err(ifp, m, NULL, af); 2201*744bfb21SJohn Baldwin return (EAFNOSUPPORT); 2202*744bfb21SJohn Baldwin } 2203*744bfb21SJohn Baldwin 2204*744bfb21SJohn Baldwin defragged = m_defrag(m, M_NOWAIT); 2205*744bfb21SJohn Baldwin if (defragged) 2206*744bfb21SJohn Baldwin m = defragged; 2207*744bfb21SJohn Baldwin m = m_unshare(m, M_NOWAIT); 2208*744bfb21SJohn Baldwin if (!m) { 2209*744bfb21SJohn Baldwin xmit_err(ifp, m, NULL, AF_UNSPEC); 2210*744bfb21SJohn Baldwin return (ENOBUFS); 2211*744bfb21SJohn Baldwin } 2212*744bfb21SJohn Baldwin 2213*744bfb21SJohn Baldwin ret = determine_af_and_pullup(&m, &parsed_af); 2214*744bfb21SJohn Baldwin if (ret) { 2215*744bfb21SJohn Baldwin xmit_err(ifp, m, NULL, AF_UNSPEC); 2216*744bfb21SJohn Baldwin return (ret); 2217*744bfb21SJohn Baldwin } 2218*744bfb21SJohn Baldwin if (parsed_af != af) { 2219*744bfb21SJohn Baldwin xmit_err(ifp, m, NULL, AF_UNSPEC); 2220*744bfb21SJohn Baldwin return (EAFNOSUPPORT); 2221*744bfb21SJohn Baldwin } 2222*744bfb21SJohn Baldwin mtu = (ro != NULL && ro->ro_mtu > 0) ? ro->ro_mtu : ifp->if_mtu; 2223*744bfb21SJohn Baldwin return (wg_xmit(ifp, m, parsed_af, mtu)); 2224*744bfb21SJohn Baldwin } 2225*744bfb21SJohn Baldwin 2226*744bfb21SJohn Baldwin static int 2227*744bfb21SJohn Baldwin wg_peer_add(struct wg_softc *sc, const nvlist_t *nvl) 2228*744bfb21SJohn Baldwin { 2229*744bfb21SJohn Baldwin uint8_t public[WG_KEY_SIZE]; 2230*744bfb21SJohn Baldwin const void *pub_key, *preshared_key = NULL; 2231*744bfb21SJohn Baldwin const struct sockaddr *endpoint; 2232*744bfb21SJohn Baldwin int err; 2233*744bfb21SJohn Baldwin size_t size; 2234*744bfb21SJohn Baldwin struct noise_remote *remote; 2235*744bfb21SJohn Baldwin struct wg_peer *peer = NULL; 2236*744bfb21SJohn Baldwin bool need_insert = false; 2237*744bfb21SJohn Baldwin 2238*744bfb21SJohn Baldwin sx_assert(&sc->sc_lock, SX_XLOCKED); 2239*744bfb21SJohn Baldwin 2240*744bfb21SJohn Baldwin if (!nvlist_exists_binary(nvl, "public-key")) { 2241*744bfb21SJohn Baldwin return (EINVAL); 2242*744bfb21SJohn Baldwin } 2243*744bfb21SJohn Baldwin pub_key = nvlist_get_binary(nvl, "public-key", &size); 2244*744bfb21SJohn Baldwin if (size != WG_KEY_SIZE) { 2245*744bfb21SJohn Baldwin return (EINVAL); 2246*744bfb21SJohn Baldwin } 2247*744bfb21SJohn Baldwin if (noise_local_keys(sc->sc_local, public, NULL) == 0 && 2248*744bfb21SJohn Baldwin bcmp(public, pub_key, WG_KEY_SIZE) == 0) { 2249*744bfb21SJohn Baldwin return (0); // Silently ignored; not actually a failure. 2250*744bfb21SJohn Baldwin } 2251*744bfb21SJohn Baldwin if ((remote = noise_remote_lookup(sc->sc_local, pub_key)) != NULL) 2252*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 2253*744bfb21SJohn Baldwin if (nvlist_exists_bool(nvl, "remove") && 2254*744bfb21SJohn Baldwin nvlist_get_bool(nvl, "remove")) { 2255*744bfb21SJohn Baldwin if (remote != NULL) { 2256*744bfb21SJohn Baldwin wg_peer_destroy(peer); 2257*744bfb21SJohn Baldwin noise_remote_put(remote); 2258*744bfb21SJohn Baldwin } 2259*744bfb21SJohn Baldwin return (0); 2260*744bfb21SJohn Baldwin } 2261*744bfb21SJohn Baldwin if (nvlist_exists_bool(nvl, "replace-allowedips") && 2262*744bfb21SJohn Baldwin nvlist_get_bool(nvl, "replace-allowedips") && 2263*744bfb21SJohn Baldwin peer != NULL) { 2264*744bfb21SJohn Baldwin 2265*744bfb21SJohn Baldwin wg_aip_remove_all(sc, peer); 2266*744bfb21SJohn Baldwin } 2267*744bfb21SJohn Baldwin if (peer == NULL) { 2268*744bfb21SJohn Baldwin peer = wg_peer_alloc(sc, pub_key); 2269*744bfb21SJohn Baldwin need_insert = true; 2270*744bfb21SJohn Baldwin } 2271*744bfb21SJohn Baldwin if (nvlist_exists_binary(nvl, "endpoint")) { 2272*744bfb21SJohn Baldwin endpoint = nvlist_get_binary(nvl, "endpoint", &size); 2273*744bfb21SJohn Baldwin if (size > sizeof(peer->p_endpoint.e_remote)) { 2274*744bfb21SJohn Baldwin err = EINVAL; 2275*744bfb21SJohn Baldwin goto out; 2276*744bfb21SJohn Baldwin } 2277*744bfb21SJohn Baldwin memcpy(&peer->p_endpoint.e_remote, endpoint, size); 2278*744bfb21SJohn Baldwin } 2279*744bfb21SJohn Baldwin if (nvlist_exists_binary(nvl, "preshared-key")) { 2280*744bfb21SJohn Baldwin preshared_key = nvlist_get_binary(nvl, "preshared-key", &size); 2281*744bfb21SJohn Baldwin if (size != WG_KEY_SIZE) { 2282*744bfb21SJohn Baldwin err = EINVAL; 2283*744bfb21SJohn Baldwin goto out; 2284*744bfb21SJohn Baldwin } 2285*744bfb21SJohn Baldwin noise_remote_set_psk(peer->p_remote, preshared_key); 2286*744bfb21SJohn Baldwin } 2287*744bfb21SJohn Baldwin if (nvlist_exists_number(nvl, "persistent-keepalive-interval")) { 2288*744bfb21SJohn Baldwin uint64_t pki = nvlist_get_number(nvl, "persistent-keepalive-interval"); 2289*744bfb21SJohn Baldwin if (pki > UINT16_MAX) { 2290*744bfb21SJohn Baldwin err = EINVAL; 2291*744bfb21SJohn Baldwin goto out; 2292*744bfb21SJohn Baldwin } 2293*744bfb21SJohn Baldwin wg_timers_set_persistent_keepalive(peer, pki); 2294*744bfb21SJohn Baldwin } 2295*744bfb21SJohn Baldwin if (nvlist_exists_nvlist_array(nvl, "allowed-ips")) { 2296*744bfb21SJohn Baldwin const void *addr; 2297*744bfb21SJohn Baldwin uint64_t cidr; 2298*744bfb21SJohn Baldwin const nvlist_t * const * aipl; 2299*744bfb21SJohn Baldwin size_t allowedip_count; 2300*744bfb21SJohn Baldwin 2301*744bfb21SJohn Baldwin aipl = nvlist_get_nvlist_array(nvl, "allowed-ips", &allowedip_count); 2302*744bfb21SJohn Baldwin for (size_t idx = 0; idx < allowedip_count; idx++) { 2303*744bfb21SJohn Baldwin if (!nvlist_exists_number(aipl[idx], "cidr")) 2304*744bfb21SJohn Baldwin continue; 2305*744bfb21SJohn Baldwin cidr = nvlist_get_number(aipl[idx], "cidr"); 2306*744bfb21SJohn Baldwin if (nvlist_exists_binary(aipl[idx], "ipv4")) { 2307*744bfb21SJohn Baldwin addr = nvlist_get_binary(aipl[idx], "ipv4", &size); 2308*744bfb21SJohn Baldwin if (addr == NULL || cidr > 32 || size != sizeof(struct in_addr)) { 2309*744bfb21SJohn Baldwin err = EINVAL; 2310*744bfb21SJohn Baldwin goto out; 2311*744bfb21SJohn Baldwin } 2312*744bfb21SJohn Baldwin if ((err = wg_aip_add(sc, peer, AF_INET, addr, cidr)) != 0) 2313*744bfb21SJohn Baldwin goto out; 2314*744bfb21SJohn Baldwin } else if (nvlist_exists_binary(aipl[idx], "ipv6")) { 2315*744bfb21SJohn Baldwin addr = nvlist_get_binary(aipl[idx], "ipv6", &size); 2316*744bfb21SJohn Baldwin if (addr == NULL || cidr > 128 || size != sizeof(struct in6_addr)) { 2317*744bfb21SJohn Baldwin err = EINVAL; 2318*744bfb21SJohn Baldwin goto out; 2319*744bfb21SJohn Baldwin } 2320*744bfb21SJohn Baldwin if ((err = wg_aip_add(sc, peer, AF_INET6, addr, cidr)) != 0) 2321*744bfb21SJohn Baldwin goto out; 2322*744bfb21SJohn Baldwin } else { 2323*744bfb21SJohn Baldwin continue; 2324*744bfb21SJohn Baldwin } 2325*744bfb21SJohn Baldwin } 2326*744bfb21SJohn Baldwin } 2327*744bfb21SJohn Baldwin if (need_insert) { 2328*744bfb21SJohn Baldwin if ((err = noise_remote_enable(peer->p_remote)) != 0) 2329*744bfb21SJohn Baldwin goto out; 2330*744bfb21SJohn Baldwin TAILQ_INSERT_TAIL(&sc->sc_peers, peer, p_entry); 2331*744bfb21SJohn Baldwin sc->sc_peers_num++; 2332*744bfb21SJohn Baldwin if (sc->sc_ifp->if_link_state == LINK_STATE_UP) 2333*744bfb21SJohn Baldwin wg_timers_enable(peer); 2334*744bfb21SJohn Baldwin } 2335*744bfb21SJohn Baldwin if (remote != NULL) 2336*744bfb21SJohn Baldwin noise_remote_put(remote); 2337*744bfb21SJohn Baldwin return (0); 2338*744bfb21SJohn Baldwin out: 2339*744bfb21SJohn Baldwin if (need_insert) /* If we fail, only destroy if it was new. */ 2340*744bfb21SJohn Baldwin wg_peer_destroy(peer); 2341*744bfb21SJohn Baldwin if (remote != NULL) 2342*744bfb21SJohn Baldwin noise_remote_put(remote); 2343*744bfb21SJohn Baldwin return (err); 2344*744bfb21SJohn Baldwin } 2345*744bfb21SJohn Baldwin 2346*744bfb21SJohn Baldwin static int 2347*744bfb21SJohn Baldwin wgc_set(struct wg_softc *sc, struct wg_data_io *wgd) 2348*744bfb21SJohn Baldwin { 2349*744bfb21SJohn Baldwin uint8_t public[WG_KEY_SIZE], private[WG_KEY_SIZE]; 2350*744bfb21SJohn Baldwin struct ifnet *ifp; 2351*744bfb21SJohn Baldwin void *nvlpacked; 2352*744bfb21SJohn Baldwin nvlist_t *nvl; 2353*744bfb21SJohn Baldwin ssize_t size; 2354*744bfb21SJohn Baldwin int err; 2355*744bfb21SJohn Baldwin 2356*744bfb21SJohn Baldwin ifp = sc->sc_ifp; 2357*744bfb21SJohn Baldwin if (wgd->wgd_size == 0 || wgd->wgd_data == NULL) 2358*744bfb21SJohn Baldwin return (EFAULT); 2359*744bfb21SJohn Baldwin 2360*744bfb21SJohn Baldwin /* Can nvlists be streamed in? It's not nice to impose arbitrary limits like that but 2361*744bfb21SJohn Baldwin * there needs to be _some_ limitation. */ 2362*744bfb21SJohn Baldwin if (wgd->wgd_size >= UINT32_MAX / 2) 2363*744bfb21SJohn Baldwin return (E2BIG); 2364*744bfb21SJohn Baldwin 2365*744bfb21SJohn Baldwin nvlpacked = malloc(wgd->wgd_size, M_TEMP, M_WAITOK | M_ZERO); 2366*744bfb21SJohn Baldwin 2367*744bfb21SJohn Baldwin err = copyin(wgd->wgd_data, nvlpacked, wgd->wgd_size); 2368*744bfb21SJohn Baldwin if (err) 2369*744bfb21SJohn Baldwin goto out; 2370*744bfb21SJohn Baldwin nvl = nvlist_unpack(nvlpacked, wgd->wgd_size, 0); 2371*744bfb21SJohn Baldwin if (nvl == NULL) { 2372*744bfb21SJohn Baldwin err = EBADMSG; 2373*744bfb21SJohn Baldwin goto out; 2374*744bfb21SJohn Baldwin } 2375*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2376*744bfb21SJohn Baldwin if (nvlist_exists_bool(nvl, "replace-peers") && 2377*744bfb21SJohn Baldwin nvlist_get_bool(nvl, "replace-peers")) 2378*744bfb21SJohn Baldwin wg_peer_destroy_all(sc); 2379*744bfb21SJohn Baldwin if (nvlist_exists_number(nvl, "listen-port")) { 2380*744bfb21SJohn Baldwin uint64_t new_port = nvlist_get_number(nvl, "listen-port"); 2381*744bfb21SJohn Baldwin if (new_port > UINT16_MAX) { 2382*744bfb21SJohn Baldwin err = EINVAL; 2383*744bfb21SJohn Baldwin goto out_locked; 2384*744bfb21SJohn Baldwin } 2385*744bfb21SJohn Baldwin if (new_port != sc->sc_socket.so_port) { 2386*744bfb21SJohn Baldwin if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2387*744bfb21SJohn Baldwin if ((err = wg_socket_init(sc, new_port)) != 0) 2388*744bfb21SJohn Baldwin goto out_locked; 2389*744bfb21SJohn Baldwin } else 2390*744bfb21SJohn Baldwin sc->sc_socket.so_port = new_port; 2391*744bfb21SJohn Baldwin } 2392*744bfb21SJohn Baldwin } 2393*744bfb21SJohn Baldwin if (nvlist_exists_binary(nvl, "private-key")) { 2394*744bfb21SJohn Baldwin const void *key = nvlist_get_binary(nvl, "private-key", &size); 2395*744bfb21SJohn Baldwin if (size != WG_KEY_SIZE) { 2396*744bfb21SJohn Baldwin err = EINVAL; 2397*744bfb21SJohn Baldwin goto out_locked; 2398*744bfb21SJohn Baldwin } 2399*744bfb21SJohn Baldwin 2400*744bfb21SJohn Baldwin if (noise_local_keys(sc->sc_local, NULL, private) != 0 || 2401*744bfb21SJohn Baldwin timingsafe_bcmp(private, key, WG_KEY_SIZE) != 0) { 2402*744bfb21SJohn Baldwin struct wg_peer *peer; 2403*744bfb21SJohn Baldwin 2404*744bfb21SJohn Baldwin if (curve25519_generate_public(public, key)) { 2405*744bfb21SJohn Baldwin /* Peer conflict: remove conflicting peer. */ 2406*744bfb21SJohn Baldwin struct noise_remote *remote; 2407*744bfb21SJohn Baldwin if ((remote = noise_remote_lookup(sc->sc_local, 2408*744bfb21SJohn Baldwin public)) != NULL) { 2409*744bfb21SJohn Baldwin peer = noise_remote_arg(remote); 2410*744bfb21SJohn Baldwin wg_peer_destroy(peer); 2411*744bfb21SJohn Baldwin noise_remote_put(remote); 2412*744bfb21SJohn Baldwin } 2413*744bfb21SJohn Baldwin } 2414*744bfb21SJohn Baldwin 2415*744bfb21SJohn Baldwin /* 2416*744bfb21SJohn Baldwin * Set the private key and invalidate all existing 2417*744bfb21SJohn Baldwin * handshakes. 2418*744bfb21SJohn Baldwin */ 2419*744bfb21SJohn Baldwin /* Note: we might be removing the private key. */ 2420*744bfb21SJohn Baldwin noise_local_private(sc->sc_local, key); 2421*744bfb21SJohn Baldwin if (noise_local_keys(sc->sc_local, NULL, NULL) == 0) 2422*744bfb21SJohn Baldwin cookie_checker_update(&sc->sc_cookie, public); 2423*744bfb21SJohn Baldwin else 2424*744bfb21SJohn Baldwin cookie_checker_update(&sc->sc_cookie, NULL); 2425*744bfb21SJohn Baldwin } 2426*744bfb21SJohn Baldwin } 2427*744bfb21SJohn Baldwin if (nvlist_exists_number(nvl, "user-cookie")) { 2428*744bfb21SJohn Baldwin uint64_t user_cookie = nvlist_get_number(nvl, "user-cookie"); 2429*744bfb21SJohn Baldwin if (user_cookie > UINT32_MAX) { 2430*744bfb21SJohn Baldwin err = EINVAL; 2431*744bfb21SJohn Baldwin goto out_locked; 2432*744bfb21SJohn Baldwin } 2433*744bfb21SJohn Baldwin err = wg_socket_set_cookie(sc, user_cookie); 2434*744bfb21SJohn Baldwin if (err) 2435*744bfb21SJohn Baldwin goto out_locked; 2436*744bfb21SJohn Baldwin } 2437*744bfb21SJohn Baldwin if (nvlist_exists_nvlist_array(nvl, "peers")) { 2438*744bfb21SJohn Baldwin size_t peercount; 2439*744bfb21SJohn Baldwin const nvlist_t * const*nvl_peers; 2440*744bfb21SJohn Baldwin 2441*744bfb21SJohn Baldwin nvl_peers = nvlist_get_nvlist_array(nvl, "peers", &peercount); 2442*744bfb21SJohn Baldwin for (int i = 0; i < peercount; i++) { 2443*744bfb21SJohn Baldwin err = wg_peer_add(sc, nvl_peers[i]); 2444*744bfb21SJohn Baldwin if (err != 0) 2445*744bfb21SJohn Baldwin goto out_locked; 2446*744bfb21SJohn Baldwin } 2447*744bfb21SJohn Baldwin } 2448*744bfb21SJohn Baldwin 2449*744bfb21SJohn Baldwin out_locked: 2450*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2451*744bfb21SJohn Baldwin nvlist_destroy(nvl); 2452*744bfb21SJohn Baldwin out: 2453*744bfb21SJohn Baldwin explicit_bzero(nvlpacked, wgd->wgd_size); 2454*744bfb21SJohn Baldwin free(nvlpacked, M_TEMP); 2455*744bfb21SJohn Baldwin return (err); 2456*744bfb21SJohn Baldwin } 2457*744bfb21SJohn Baldwin 2458*744bfb21SJohn Baldwin static int 2459*744bfb21SJohn Baldwin wgc_get(struct wg_softc *sc, struct wg_data_io *wgd) 2460*744bfb21SJohn Baldwin { 2461*744bfb21SJohn Baldwin uint8_t public_key[WG_KEY_SIZE] = { 0 }; 2462*744bfb21SJohn Baldwin uint8_t private_key[WG_KEY_SIZE] = { 0 }; 2463*744bfb21SJohn Baldwin uint8_t preshared_key[NOISE_SYMMETRIC_KEY_LEN] = { 0 }; 2464*744bfb21SJohn Baldwin nvlist_t *nvl, *nvl_peer, *nvl_aip, **nvl_peers, **nvl_aips; 2465*744bfb21SJohn Baldwin size_t size, peer_count, aip_count, i, j; 2466*744bfb21SJohn Baldwin struct wg_timespec64 ts64; 2467*744bfb21SJohn Baldwin struct wg_peer *peer; 2468*744bfb21SJohn Baldwin struct wg_aip *aip; 2469*744bfb21SJohn Baldwin void *packed; 2470*744bfb21SJohn Baldwin int err = 0; 2471*744bfb21SJohn Baldwin 2472*744bfb21SJohn Baldwin nvl = nvlist_create(0); 2473*744bfb21SJohn Baldwin if (!nvl) 2474*744bfb21SJohn Baldwin return (ENOMEM); 2475*744bfb21SJohn Baldwin 2476*744bfb21SJohn Baldwin sx_slock(&sc->sc_lock); 2477*744bfb21SJohn Baldwin 2478*744bfb21SJohn Baldwin if (sc->sc_socket.so_port != 0) 2479*744bfb21SJohn Baldwin nvlist_add_number(nvl, "listen-port", sc->sc_socket.so_port); 2480*744bfb21SJohn Baldwin if (sc->sc_socket.so_user_cookie != 0) 2481*744bfb21SJohn Baldwin nvlist_add_number(nvl, "user-cookie", sc->sc_socket.so_user_cookie); 2482*744bfb21SJohn Baldwin if (noise_local_keys(sc->sc_local, public_key, private_key) == 0) { 2483*744bfb21SJohn Baldwin nvlist_add_binary(nvl, "public-key", public_key, WG_KEY_SIZE); 2484*744bfb21SJohn Baldwin if (wgc_privileged(sc)) 2485*744bfb21SJohn Baldwin nvlist_add_binary(nvl, "private-key", private_key, WG_KEY_SIZE); 2486*744bfb21SJohn Baldwin explicit_bzero(private_key, sizeof(private_key)); 2487*744bfb21SJohn Baldwin } 2488*744bfb21SJohn Baldwin peer_count = sc->sc_peers_num; 2489*744bfb21SJohn Baldwin if (peer_count) { 2490*744bfb21SJohn Baldwin nvl_peers = mallocarray(peer_count, sizeof(void *), M_NVLIST, M_WAITOK | M_ZERO); 2491*744bfb21SJohn Baldwin i = 0; 2492*744bfb21SJohn Baldwin TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) { 2493*744bfb21SJohn Baldwin if (i >= peer_count) 2494*744bfb21SJohn Baldwin panic("peers changed from under us"); 2495*744bfb21SJohn Baldwin 2496*744bfb21SJohn Baldwin nvl_peers[i++] = nvl_peer = nvlist_create(0); 2497*744bfb21SJohn Baldwin if (!nvl_peer) { 2498*744bfb21SJohn Baldwin err = ENOMEM; 2499*744bfb21SJohn Baldwin goto err_peer; 2500*744bfb21SJohn Baldwin } 2501*744bfb21SJohn Baldwin 2502*744bfb21SJohn Baldwin (void)noise_remote_keys(peer->p_remote, public_key, preshared_key); 2503*744bfb21SJohn Baldwin nvlist_add_binary(nvl_peer, "public-key", public_key, sizeof(public_key)); 2504*744bfb21SJohn Baldwin if (wgc_privileged(sc)) 2505*744bfb21SJohn Baldwin nvlist_add_binary(nvl_peer, "preshared-key", preshared_key, sizeof(preshared_key)); 2506*744bfb21SJohn Baldwin explicit_bzero(preshared_key, sizeof(preshared_key)); 2507*744bfb21SJohn Baldwin if (peer->p_endpoint.e_remote.r_sa.sa_family == AF_INET) 2508*744bfb21SJohn Baldwin nvlist_add_binary(nvl_peer, "endpoint", &peer->p_endpoint.e_remote, sizeof(struct sockaddr_in)); 2509*744bfb21SJohn Baldwin else if (peer->p_endpoint.e_remote.r_sa.sa_family == AF_INET6) 2510*744bfb21SJohn Baldwin nvlist_add_binary(nvl_peer, "endpoint", &peer->p_endpoint.e_remote, sizeof(struct sockaddr_in6)); 2511*744bfb21SJohn Baldwin wg_timers_get_last_handshake(peer, &ts64); 2512*744bfb21SJohn Baldwin nvlist_add_binary(nvl_peer, "last-handshake-time", &ts64, sizeof(ts64)); 2513*744bfb21SJohn Baldwin nvlist_add_number(nvl_peer, "persistent-keepalive-interval", peer->p_persistent_keepalive_interval); 2514*744bfb21SJohn Baldwin nvlist_add_number(nvl_peer, "rx-bytes", counter_u64_fetch(peer->p_rx_bytes)); 2515*744bfb21SJohn Baldwin nvlist_add_number(nvl_peer, "tx-bytes", counter_u64_fetch(peer->p_tx_bytes)); 2516*744bfb21SJohn Baldwin 2517*744bfb21SJohn Baldwin aip_count = peer->p_aips_num; 2518*744bfb21SJohn Baldwin if (aip_count) { 2519*744bfb21SJohn Baldwin nvl_aips = mallocarray(aip_count, sizeof(void *), M_NVLIST, M_WAITOK | M_ZERO); 2520*744bfb21SJohn Baldwin j = 0; 2521*744bfb21SJohn Baldwin LIST_FOREACH(aip, &peer->p_aips, a_entry) { 2522*744bfb21SJohn Baldwin if (j >= aip_count) 2523*744bfb21SJohn Baldwin panic("aips changed from under us"); 2524*744bfb21SJohn Baldwin 2525*744bfb21SJohn Baldwin nvl_aips[j++] = nvl_aip = nvlist_create(0); 2526*744bfb21SJohn Baldwin if (!nvl_aip) { 2527*744bfb21SJohn Baldwin err = ENOMEM; 2528*744bfb21SJohn Baldwin goto err_aip; 2529*744bfb21SJohn Baldwin } 2530*744bfb21SJohn Baldwin if (aip->a_af == AF_INET) { 2531*744bfb21SJohn Baldwin nvlist_add_binary(nvl_aip, "ipv4", &aip->a_addr.in, sizeof(aip->a_addr.in)); 2532*744bfb21SJohn Baldwin nvlist_add_number(nvl_aip, "cidr", bitcount32(aip->a_mask.ip)); 2533*744bfb21SJohn Baldwin } 2534*744bfb21SJohn Baldwin #ifdef INET6 2535*744bfb21SJohn Baldwin else if (aip->a_af == AF_INET6) { 2536*744bfb21SJohn Baldwin nvlist_add_binary(nvl_aip, "ipv6", &aip->a_addr.in6, sizeof(aip->a_addr.in6)); 2537*744bfb21SJohn Baldwin nvlist_add_number(nvl_aip, "cidr", in6_mask2len(&aip->a_mask.in6, NULL)); 2538*744bfb21SJohn Baldwin } 2539*744bfb21SJohn Baldwin #endif 2540*744bfb21SJohn Baldwin } 2541*744bfb21SJohn Baldwin nvlist_add_nvlist_array(nvl_peer, "allowed-ips", (const nvlist_t *const *)nvl_aips, aip_count); 2542*744bfb21SJohn Baldwin err_aip: 2543*744bfb21SJohn Baldwin for (j = 0; j < aip_count; ++j) 2544*744bfb21SJohn Baldwin nvlist_destroy(nvl_aips[j]); 2545*744bfb21SJohn Baldwin free(nvl_aips, M_NVLIST); 2546*744bfb21SJohn Baldwin if (err) 2547*744bfb21SJohn Baldwin goto err_peer; 2548*744bfb21SJohn Baldwin } 2549*744bfb21SJohn Baldwin } 2550*744bfb21SJohn Baldwin nvlist_add_nvlist_array(nvl, "peers", (const nvlist_t * const *)nvl_peers, peer_count); 2551*744bfb21SJohn Baldwin err_peer: 2552*744bfb21SJohn Baldwin for (i = 0; i < peer_count; ++i) 2553*744bfb21SJohn Baldwin nvlist_destroy(nvl_peers[i]); 2554*744bfb21SJohn Baldwin free(nvl_peers, M_NVLIST); 2555*744bfb21SJohn Baldwin if (err) { 2556*744bfb21SJohn Baldwin sx_sunlock(&sc->sc_lock); 2557*744bfb21SJohn Baldwin goto err; 2558*744bfb21SJohn Baldwin } 2559*744bfb21SJohn Baldwin } 2560*744bfb21SJohn Baldwin sx_sunlock(&sc->sc_lock); 2561*744bfb21SJohn Baldwin packed = nvlist_pack(nvl, &size); 2562*744bfb21SJohn Baldwin if (!packed) { 2563*744bfb21SJohn Baldwin err = ENOMEM; 2564*744bfb21SJohn Baldwin goto err; 2565*744bfb21SJohn Baldwin } 2566*744bfb21SJohn Baldwin if (!wgd->wgd_size) { 2567*744bfb21SJohn Baldwin wgd->wgd_size = size; 2568*744bfb21SJohn Baldwin goto out; 2569*744bfb21SJohn Baldwin } 2570*744bfb21SJohn Baldwin if (wgd->wgd_size < size) { 2571*744bfb21SJohn Baldwin err = ENOSPC; 2572*744bfb21SJohn Baldwin goto out; 2573*744bfb21SJohn Baldwin } 2574*744bfb21SJohn Baldwin err = copyout(packed, wgd->wgd_data, size); 2575*744bfb21SJohn Baldwin wgd->wgd_size = size; 2576*744bfb21SJohn Baldwin 2577*744bfb21SJohn Baldwin out: 2578*744bfb21SJohn Baldwin explicit_bzero(packed, size); 2579*744bfb21SJohn Baldwin free(packed, M_NVLIST); 2580*744bfb21SJohn Baldwin err: 2581*744bfb21SJohn Baldwin nvlist_destroy(nvl); 2582*744bfb21SJohn Baldwin return (err); 2583*744bfb21SJohn Baldwin } 2584*744bfb21SJohn Baldwin 2585*744bfb21SJohn Baldwin static int 2586*744bfb21SJohn Baldwin wg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2587*744bfb21SJohn Baldwin { 2588*744bfb21SJohn Baldwin struct wg_data_io *wgd = (struct wg_data_io *)data; 2589*744bfb21SJohn Baldwin struct ifreq *ifr = (struct ifreq *)data; 2590*744bfb21SJohn Baldwin struct wg_softc *sc; 2591*744bfb21SJohn Baldwin int ret = 0; 2592*744bfb21SJohn Baldwin 2593*744bfb21SJohn Baldwin sx_slock(&wg_sx); 2594*744bfb21SJohn Baldwin sc = ifp->if_softc; 2595*744bfb21SJohn Baldwin if (!sc) { 2596*744bfb21SJohn Baldwin ret = ENXIO; 2597*744bfb21SJohn Baldwin goto out; 2598*744bfb21SJohn Baldwin } 2599*744bfb21SJohn Baldwin 2600*744bfb21SJohn Baldwin switch (cmd) { 2601*744bfb21SJohn Baldwin case SIOCSWG: 2602*744bfb21SJohn Baldwin ret = priv_check(curthread, PRIV_NET_WG); 2603*744bfb21SJohn Baldwin if (ret == 0) 2604*744bfb21SJohn Baldwin ret = wgc_set(sc, wgd); 2605*744bfb21SJohn Baldwin break; 2606*744bfb21SJohn Baldwin case SIOCGWG: 2607*744bfb21SJohn Baldwin ret = wgc_get(sc, wgd); 2608*744bfb21SJohn Baldwin break; 2609*744bfb21SJohn Baldwin /* Interface IOCTLs */ 2610*744bfb21SJohn Baldwin case SIOCSIFADDR: 2611*744bfb21SJohn Baldwin /* 2612*744bfb21SJohn Baldwin * This differs from *BSD norms, but is more uniform with how 2613*744bfb21SJohn Baldwin * WireGuard behaves elsewhere. 2614*744bfb21SJohn Baldwin */ 2615*744bfb21SJohn Baldwin break; 2616*744bfb21SJohn Baldwin case SIOCSIFFLAGS: 2617*744bfb21SJohn Baldwin if (ifp->if_flags & IFF_UP) 2618*744bfb21SJohn Baldwin ret = wg_up(sc); 2619*744bfb21SJohn Baldwin else 2620*744bfb21SJohn Baldwin wg_down(sc); 2621*744bfb21SJohn Baldwin break; 2622*744bfb21SJohn Baldwin case SIOCSIFMTU: 2623*744bfb21SJohn Baldwin if (ifr->ifr_mtu <= 0 || ifr->ifr_mtu > MAX_MTU) 2624*744bfb21SJohn Baldwin ret = EINVAL; 2625*744bfb21SJohn Baldwin else 2626*744bfb21SJohn Baldwin ifp->if_mtu = ifr->ifr_mtu; 2627*744bfb21SJohn Baldwin break; 2628*744bfb21SJohn Baldwin case SIOCADDMULTI: 2629*744bfb21SJohn Baldwin case SIOCDELMULTI: 2630*744bfb21SJohn Baldwin break; 2631*744bfb21SJohn Baldwin case SIOCGTUNFIB: 2632*744bfb21SJohn Baldwin ifr->ifr_fib = sc->sc_socket.so_fibnum; 2633*744bfb21SJohn Baldwin break; 2634*744bfb21SJohn Baldwin case SIOCSTUNFIB: 2635*744bfb21SJohn Baldwin ret = priv_check(curthread, PRIV_NET_WG); 2636*744bfb21SJohn Baldwin if (ret) 2637*744bfb21SJohn Baldwin break; 2638*744bfb21SJohn Baldwin ret = priv_check(curthread, PRIV_NET_SETIFFIB); 2639*744bfb21SJohn Baldwin if (ret) 2640*744bfb21SJohn Baldwin break; 2641*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2642*744bfb21SJohn Baldwin ret = wg_socket_set_fibnum(sc, ifr->ifr_fib); 2643*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2644*744bfb21SJohn Baldwin break; 2645*744bfb21SJohn Baldwin default: 2646*744bfb21SJohn Baldwin ret = ENOTTY; 2647*744bfb21SJohn Baldwin } 2648*744bfb21SJohn Baldwin 2649*744bfb21SJohn Baldwin out: 2650*744bfb21SJohn Baldwin sx_sunlock(&wg_sx); 2651*744bfb21SJohn Baldwin return (ret); 2652*744bfb21SJohn Baldwin } 2653*744bfb21SJohn Baldwin 2654*744bfb21SJohn Baldwin static int 2655*744bfb21SJohn Baldwin wg_up(struct wg_softc *sc) 2656*744bfb21SJohn Baldwin { 2657*744bfb21SJohn Baldwin struct ifnet *ifp = sc->sc_ifp; 2658*744bfb21SJohn Baldwin struct wg_peer *peer; 2659*744bfb21SJohn Baldwin int rc = EBUSY; 2660*744bfb21SJohn Baldwin 2661*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2662*744bfb21SJohn Baldwin /* Jail's being removed, no more wg_up(). */ 2663*744bfb21SJohn Baldwin if ((sc->sc_flags & WGF_DYING) != 0) 2664*744bfb21SJohn Baldwin goto out; 2665*744bfb21SJohn Baldwin 2666*744bfb21SJohn Baldwin /* Silent success if we're already running. */ 2667*744bfb21SJohn Baldwin rc = 0; 2668*744bfb21SJohn Baldwin if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2669*744bfb21SJohn Baldwin goto out; 2670*744bfb21SJohn Baldwin ifp->if_drv_flags |= IFF_DRV_RUNNING; 2671*744bfb21SJohn Baldwin 2672*744bfb21SJohn Baldwin rc = wg_socket_init(sc, sc->sc_socket.so_port); 2673*744bfb21SJohn Baldwin if (rc == 0) { 2674*744bfb21SJohn Baldwin TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) 2675*744bfb21SJohn Baldwin wg_timers_enable(peer); 2676*744bfb21SJohn Baldwin if_link_state_change(sc->sc_ifp, LINK_STATE_UP); 2677*744bfb21SJohn Baldwin } else { 2678*744bfb21SJohn Baldwin ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2679*744bfb21SJohn Baldwin DPRINTF(sc, "Unable to initialize sockets: %d\n", rc); 2680*744bfb21SJohn Baldwin } 2681*744bfb21SJohn Baldwin out: 2682*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2683*744bfb21SJohn Baldwin return (rc); 2684*744bfb21SJohn Baldwin } 2685*744bfb21SJohn Baldwin 2686*744bfb21SJohn Baldwin static void 2687*744bfb21SJohn Baldwin wg_down(struct wg_softc *sc) 2688*744bfb21SJohn Baldwin { 2689*744bfb21SJohn Baldwin struct ifnet *ifp = sc->sc_ifp; 2690*744bfb21SJohn Baldwin struct wg_peer *peer; 2691*744bfb21SJohn Baldwin 2692*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2693*744bfb21SJohn Baldwin if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2694*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2695*744bfb21SJohn Baldwin return; 2696*744bfb21SJohn Baldwin } 2697*744bfb21SJohn Baldwin ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2698*744bfb21SJohn Baldwin 2699*744bfb21SJohn Baldwin TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) { 2700*744bfb21SJohn Baldwin wg_queue_purge(&peer->p_stage_queue); 2701*744bfb21SJohn Baldwin wg_timers_disable(peer); 2702*744bfb21SJohn Baldwin } 2703*744bfb21SJohn Baldwin 2704*744bfb21SJohn Baldwin wg_queue_purge(&sc->sc_handshake_queue); 2705*744bfb21SJohn Baldwin 2706*744bfb21SJohn Baldwin TAILQ_FOREACH(peer, &sc->sc_peers, p_entry) { 2707*744bfb21SJohn Baldwin noise_remote_handshake_clear(peer->p_remote); 2708*744bfb21SJohn Baldwin noise_remote_keypairs_clear(peer->p_remote); 2709*744bfb21SJohn Baldwin } 2710*744bfb21SJohn Baldwin 2711*744bfb21SJohn Baldwin if_link_state_change(sc->sc_ifp, LINK_STATE_DOWN); 2712*744bfb21SJohn Baldwin wg_socket_uninit(sc); 2713*744bfb21SJohn Baldwin 2714*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2715*744bfb21SJohn Baldwin } 2716*744bfb21SJohn Baldwin 2717*744bfb21SJohn Baldwin static int 2718*744bfb21SJohn Baldwin wg_clone_create(struct if_clone *ifc, int unit, caddr_t params) 2719*744bfb21SJohn Baldwin { 2720*744bfb21SJohn Baldwin struct wg_softc *sc; 2721*744bfb21SJohn Baldwin struct ifnet *ifp; 2722*744bfb21SJohn Baldwin 2723*744bfb21SJohn Baldwin sc = malloc(sizeof(*sc), M_WG, M_WAITOK | M_ZERO); 2724*744bfb21SJohn Baldwin 2725*744bfb21SJohn Baldwin sc->sc_local = noise_local_alloc(sc); 2726*744bfb21SJohn Baldwin 2727*744bfb21SJohn Baldwin sc->sc_encrypt = mallocarray(sizeof(struct grouptask), mp_ncpus, M_WG, M_WAITOK | M_ZERO); 2728*744bfb21SJohn Baldwin 2729*744bfb21SJohn Baldwin sc->sc_decrypt = mallocarray(sizeof(struct grouptask), mp_ncpus, M_WG, M_WAITOK | M_ZERO); 2730*744bfb21SJohn Baldwin 2731*744bfb21SJohn Baldwin if (!rn_inithead((void **)&sc->sc_aip4, offsetof(struct aip_addr, in) * NBBY)) 2732*744bfb21SJohn Baldwin goto free_decrypt; 2733*744bfb21SJohn Baldwin 2734*744bfb21SJohn Baldwin if (!rn_inithead((void **)&sc->sc_aip6, offsetof(struct aip_addr, in6) * NBBY)) 2735*744bfb21SJohn Baldwin goto free_aip4; 2736*744bfb21SJohn Baldwin 2737*744bfb21SJohn Baldwin atomic_add_int(&clone_count, 1); 2738*744bfb21SJohn Baldwin ifp = sc->sc_ifp = if_alloc(IFT_WIREGUARD); 2739*744bfb21SJohn Baldwin 2740*744bfb21SJohn Baldwin sc->sc_ucred = crhold(curthread->td_ucred); 2741*744bfb21SJohn Baldwin sc->sc_socket.so_fibnum = curthread->td_proc->p_fibnum; 2742*744bfb21SJohn Baldwin sc->sc_socket.so_port = 0; 2743*744bfb21SJohn Baldwin 2744*744bfb21SJohn Baldwin TAILQ_INIT(&sc->sc_peers); 2745*744bfb21SJohn Baldwin sc->sc_peers_num = 0; 2746*744bfb21SJohn Baldwin 2747*744bfb21SJohn Baldwin cookie_checker_init(&sc->sc_cookie); 2748*744bfb21SJohn Baldwin 2749*744bfb21SJohn Baldwin RADIX_NODE_HEAD_LOCK_INIT(sc->sc_aip4); 2750*744bfb21SJohn Baldwin RADIX_NODE_HEAD_LOCK_INIT(sc->sc_aip6); 2751*744bfb21SJohn Baldwin 2752*744bfb21SJohn Baldwin GROUPTASK_INIT(&sc->sc_handshake, 0, (gtask_fn_t *)wg_softc_handshake_receive, sc); 2753*744bfb21SJohn Baldwin taskqgroup_attach(qgroup_wg_tqg, &sc->sc_handshake, sc, NULL, NULL, "wg tx initiation"); 2754*744bfb21SJohn Baldwin wg_queue_init(&sc->sc_handshake_queue, "hsq"); 2755*744bfb21SJohn Baldwin 2756*744bfb21SJohn Baldwin for (int i = 0; i < mp_ncpus; i++) { 2757*744bfb21SJohn Baldwin GROUPTASK_INIT(&sc->sc_encrypt[i], 0, 2758*744bfb21SJohn Baldwin (gtask_fn_t *)wg_softc_encrypt, sc); 2759*744bfb21SJohn Baldwin taskqgroup_attach_cpu(qgroup_wg_tqg, &sc->sc_encrypt[i], sc, i, NULL, NULL, "wg encrypt"); 2760*744bfb21SJohn Baldwin GROUPTASK_INIT(&sc->sc_decrypt[i], 0, 2761*744bfb21SJohn Baldwin (gtask_fn_t *)wg_softc_decrypt, sc); 2762*744bfb21SJohn Baldwin taskqgroup_attach_cpu(qgroup_wg_tqg, &sc->sc_decrypt[i], sc, i, NULL, NULL, "wg decrypt"); 2763*744bfb21SJohn Baldwin } 2764*744bfb21SJohn Baldwin 2765*744bfb21SJohn Baldwin wg_queue_init(&sc->sc_encrypt_parallel, "encp"); 2766*744bfb21SJohn Baldwin wg_queue_init(&sc->sc_decrypt_parallel, "decp"); 2767*744bfb21SJohn Baldwin 2768*744bfb21SJohn Baldwin sx_init(&sc->sc_lock, "wg softc lock"); 2769*744bfb21SJohn Baldwin 2770*744bfb21SJohn Baldwin ifp->if_softc = sc; 2771*744bfb21SJohn Baldwin ifp->if_capabilities = ifp->if_capenable = WG_CAPS; 2772*744bfb21SJohn Baldwin if_initname(ifp, wgname, unit); 2773*744bfb21SJohn Baldwin 2774*744bfb21SJohn Baldwin if_setmtu(ifp, DEFAULT_MTU); 2775*744bfb21SJohn Baldwin ifp->if_flags = IFF_NOARP | IFF_MULTICAST; 2776*744bfb21SJohn Baldwin ifp->if_init = wg_init; 2777*744bfb21SJohn Baldwin ifp->if_reassign = wg_reassign; 2778*744bfb21SJohn Baldwin ifp->if_qflush = wg_qflush; 2779*744bfb21SJohn Baldwin ifp->if_transmit = wg_transmit; 2780*744bfb21SJohn Baldwin ifp->if_output = wg_output; 2781*744bfb21SJohn Baldwin ifp->if_ioctl = wg_ioctl; 2782*744bfb21SJohn Baldwin if_attach(ifp); 2783*744bfb21SJohn Baldwin bpfattach(ifp, DLT_NULL, sizeof(uint32_t)); 2784*744bfb21SJohn Baldwin #ifdef INET6 2785*744bfb21SJohn Baldwin ND_IFINFO(ifp)->flags &= ~ND6_IFF_AUTO_LINKLOCAL; 2786*744bfb21SJohn Baldwin ND_IFINFO(ifp)->flags |= ND6_IFF_NO_DAD; 2787*744bfb21SJohn Baldwin #endif 2788*744bfb21SJohn Baldwin sx_xlock(&wg_sx); 2789*744bfb21SJohn Baldwin LIST_INSERT_HEAD(&wg_list, sc, sc_entry); 2790*744bfb21SJohn Baldwin sx_xunlock(&wg_sx); 2791*744bfb21SJohn Baldwin return (0); 2792*744bfb21SJohn Baldwin free_aip4: 2793*744bfb21SJohn Baldwin RADIX_NODE_HEAD_DESTROY(sc->sc_aip4); 2794*744bfb21SJohn Baldwin free(sc->sc_aip4, M_RTABLE); 2795*744bfb21SJohn Baldwin free_decrypt: 2796*744bfb21SJohn Baldwin free(sc->sc_decrypt, M_WG); 2797*744bfb21SJohn Baldwin free(sc->sc_encrypt, M_WG); 2798*744bfb21SJohn Baldwin noise_local_free(sc->sc_local, NULL); 2799*744bfb21SJohn Baldwin free(sc, M_WG); 2800*744bfb21SJohn Baldwin return (ENOMEM); 2801*744bfb21SJohn Baldwin } 2802*744bfb21SJohn Baldwin 2803*744bfb21SJohn Baldwin static void 2804*744bfb21SJohn Baldwin wg_clone_deferred_free(struct noise_local *l) 2805*744bfb21SJohn Baldwin { 2806*744bfb21SJohn Baldwin struct wg_softc *sc = noise_local_arg(l); 2807*744bfb21SJohn Baldwin 2808*744bfb21SJohn Baldwin free(sc, M_WG); 2809*744bfb21SJohn Baldwin atomic_add_int(&clone_count, -1); 2810*744bfb21SJohn Baldwin } 2811*744bfb21SJohn Baldwin 2812*744bfb21SJohn Baldwin static void 2813*744bfb21SJohn Baldwin wg_clone_destroy(struct ifnet *ifp) 2814*744bfb21SJohn Baldwin { 2815*744bfb21SJohn Baldwin struct wg_softc *sc = ifp->if_softc; 2816*744bfb21SJohn Baldwin struct ucred *cred; 2817*744bfb21SJohn Baldwin 2818*744bfb21SJohn Baldwin sx_xlock(&wg_sx); 2819*744bfb21SJohn Baldwin ifp->if_softc = NULL; 2820*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2821*744bfb21SJohn Baldwin sc->sc_flags |= WGF_DYING; 2822*744bfb21SJohn Baldwin cred = sc->sc_ucred; 2823*744bfb21SJohn Baldwin sc->sc_ucred = NULL; 2824*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2825*744bfb21SJohn Baldwin LIST_REMOVE(sc, sc_entry); 2826*744bfb21SJohn Baldwin sx_xunlock(&wg_sx); 2827*744bfb21SJohn Baldwin 2828*744bfb21SJohn Baldwin if_link_state_change(sc->sc_ifp, LINK_STATE_DOWN); 2829*744bfb21SJohn Baldwin CURVNET_SET(sc->sc_ifp->if_vnet); 2830*744bfb21SJohn Baldwin if_purgeaddrs(sc->sc_ifp); 2831*744bfb21SJohn Baldwin CURVNET_RESTORE(); 2832*744bfb21SJohn Baldwin 2833*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2834*744bfb21SJohn Baldwin wg_socket_uninit(sc); 2835*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2836*744bfb21SJohn Baldwin 2837*744bfb21SJohn Baldwin /* 2838*744bfb21SJohn Baldwin * No guarantees that all traffic have passed until the epoch has 2839*744bfb21SJohn Baldwin * elapsed with the socket closed. 2840*744bfb21SJohn Baldwin */ 2841*744bfb21SJohn Baldwin NET_EPOCH_WAIT(); 2842*744bfb21SJohn Baldwin 2843*744bfb21SJohn Baldwin taskqgroup_drain_all(qgroup_wg_tqg); 2844*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2845*744bfb21SJohn Baldwin wg_peer_destroy_all(sc); 2846*744bfb21SJohn Baldwin epoch_drain_callbacks(net_epoch_preempt); 2847*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2848*744bfb21SJohn Baldwin sx_destroy(&sc->sc_lock); 2849*744bfb21SJohn Baldwin taskqgroup_detach(qgroup_wg_tqg, &sc->sc_handshake); 2850*744bfb21SJohn Baldwin for (int i = 0; i < mp_ncpus; i++) { 2851*744bfb21SJohn Baldwin taskqgroup_detach(qgroup_wg_tqg, &sc->sc_encrypt[i]); 2852*744bfb21SJohn Baldwin taskqgroup_detach(qgroup_wg_tqg, &sc->sc_decrypt[i]); 2853*744bfb21SJohn Baldwin } 2854*744bfb21SJohn Baldwin free(sc->sc_encrypt, M_WG); 2855*744bfb21SJohn Baldwin free(sc->sc_decrypt, M_WG); 2856*744bfb21SJohn Baldwin wg_queue_deinit(&sc->sc_handshake_queue); 2857*744bfb21SJohn Baldwin wg_queue_deinit(&sc->sc_encrypt_parallel); 2858*744bfb21SJohn Baldwin wg_queue_deinit(&sc->sc_decrypt_parallel); 2859*744bfb21SJohn Baldwin 2860*744bfb21SJohn Baldwin RADIX_NODE_HEAD_DESTROY(sc->sc_aip4); 2861*744bfb21SJohn Baldwin RADIX_NODE_HEAD_DESTROY(sc->sc_aip6); 2862*744bfb21SJohn Baldwin rn_detachhead((void **)&sc->sc_aip4); 2863*744bfb21SJohn Baldwin rn_detachhead((void **)&sc->sc_aip6); 2864*744bfb21SJohn Baldwin 2865*744bfb21SJohn Baldwin cookie_checker_free(&sc->sc_cookie); 2866*744bfb21SJohn Baldwin 2867*744bfb21SJohn Baldwin if (cred != NULL) 2868*744bfb21SJohn Baldwin crfree(cred); 2869*744bfb21SJohn Baldwin if_detach(sc->sc_ifp); 2870*744bfb21SJohn Baldwin if_free(sc->sc_ifp); 2871*744bfb21SJohn Baldwin 2872*744bfb21SJohn Baldwin noise_local_free(sc->sc_local, wg_clone_deferred_free); 2873*744bfb21SJohn Baldwin } 2874*744bfb21SJohn Baldwin 2875*744bfb21SJohn Baldwin static void 2876*744bfb21SJohn Baldwin wg_qflush(struct ifnet *ifp __unused) 2877*744bfb21SJohn Baldwin { 2878*744bfb21SJohn Baldwin } 2879*744bfb21SJohn Baldwin 2880*744bfb21SJohn Baldwin /* 2881*744bfb21SJohn Baldwin * Privileged information (private-key, preshared-key) are only exported for 2882*744bfb21SJohn Baldwin * root and jailed root by default. 2883*744bfb21SJohn Baldwin */ 2884*744bfb21SJohn Baldwin static bool 2885*744bfb21SJohn Baldwin wgc_privileged(struct wg_softc *sc) 2886*744bfb21SJohn Baldwin { 2887*744bfb21SJohn Baldwin struct thread *td; 2888*744bfb21SJohn Baldwin 2889*744bfb21SJohn Baldwin td = curthread; 2890*744bfb21SJohn Baldwin return (priv_check(td, PRIV_NET_WG) == 0); 2891*744bfb21SJohn Baldwin } 2892*744bfb21SJohn Baldwin 2893*744bfb21SJohn Baldwin static void 2894*744bfb21SJohn Baldwin wg_reassign(struct ifnet *ifp, struct vnet *new_vnet __unused, 2895*744bfb21SJohn Baldwin char *unused __unused) 2896*744bfb21SJohn Baldwin { 2897*744bfb21SJohn Baldwin struct wg_softc *sc; 2898*744bfb21SJohn Baldwin 2899*744bfb21SJohn Baldwin sc = ifp->if_softc; 2900*744bfb21SJohn Baldwin wg_down(sc); 2901*744bfb21SJohn Baldwin } 2902*744bfb21SJohn Baldwin 2903*744bfb21SJohn Baldwin static void 2904*744bfb21SJohn Baldwin wg_init(void *xsc) 2905*744bfb21SJohn Baldwin { 2906*744bfb21SJohn Baldwin struct wg_softc *sc; 2907*744bfb21SJohn Baldwin 2908*744bfb21SJohn Baldwin sc = xsc; 2909*744bfb21SJohn Baldwin wg_up(sc); 2910*744bfb21SJohn Baldwin } 2911*744bfb21SJohn Baldwin 2912*744bfb21SJohn Baldwin static void 2913*744bfb21SJohn Baldwin vnet_wg_init(const void *unused __unused) 2914*744bfb21SJohn Baldwin { 2915*744bfb21SJohn Baldwin V_wg_cloner = if_clone_simple(wgname, wg_clone_create, wg_clone_destroy, 2916*744bfb21SJohn Baldwin 0); 2917*744bfb21SJohn Baldwin } 2918*744bfb21SJohn Baldwin VNET_SYSINIT(vnet_wg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 2919*744bfb21SJohn Baldwin vnet_wg_init, NULL); 2920*744bfb21SJohn Baldwin 2921*744bfb21SJohn Baldwin static void 2922*744bfb21SJohn Baldwin vnet_wg_uninit(const void *unused __unused) 2923*744bfb21SJohn Baldwin { 2924*744bfb21SJohn Baldwin if (V_wg_cloner) 2925*744bfb21SJohn Baldwin if_clone_detach(V_wg_cloner); 2926*744bfb21SJohn Baldwin } 2927*744bfb21SJohn Baldwin VNET_SYSUNINIT(vnet_wg_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 2928*744bfb21SJohn Baldwin vnet_wg_uninit, NULL); 2929*744bfb21SJohn Baldwin 2930*744bfb21SJohn Baldwin static int 2931*744bfb21SJohn Baldwin wg_prison_remove(void *obj, void *data __unused) 2932*744bfb21SJohn Baldwin { 2933*744bfb21SJohn Baldwin const struct prison *pr = obj; 2934*744bfb21SJohn Baldwin struct wg_softc *sc; 2935*744bfb21SJohn Baldwin 2936*744bfb21SJohn Baldwin /* 2937*744bfb21SJohn Baldwin * Do a pass through all if_wg interfaces and release creds on any from 2938*744bfb21SJohn Baldwin * the jail that are supposed to be going away. This will, in turn, let 2939*744bfb21SJohn Baldwin * the jail die so that we don't end up with Schrödinger's jail. 2940*744bfb21SJohn Baldwin */ 2941*744bfb21SJohn Baldwin sx_slock(&wg_sx); 2942*744bfb21SJohn Baldwin LIST_FOREACH(sc, &wg_list, sc_entry) { 2943*744bfb21SJohn Baldwin sx_xlock(&sc->sc_lock); 2944*744bfb21SJohn Baldwin if (!(sc->sc_flags & WGF_DYING) && sc->sc_ucred && sc->sc_ucred->cr_prison == pr) { 2945*744bfb21SJohn Baldwin struct ucred *cred = sc->sc_ucred; 2946*744bfb21SJohn Baldwin DPRINTF(sc, "Creating jail exiting\n"); 2947*744bfb21SJohn Baldwin if_link_state_change(sc->sc_ifp, LINK_STATE_DOWN); 2948*744bfb21SJohn Baldwin wg_socket_uninit(sc); 2949*744bfb21SJohn Baldwin sc->sc_ucred = NULL; 2950*744bfb21SJohn Baldwin crfree(cred); 2951*744bfb21SJohn Baldwin sc->sc_flags |= WGF_DYING; 2952*744bfb21SJohn Baldwin } 2953*744bfb21SJohn Baldwin sx_xunlock(&sc->sc_lock); 2954*744bfb21SJohn Baldwin } 2955*744bfb21SJohn Baldwin sx_sunlock(&wg_sx); 2956*744bfb21SJohn Baldwin 2957*744bfb21SJohn Baldwin return (0); 2958*744bfb21SJohn Baldwin } 2959*744bfb21SJohn Baldwin 2960*744bfb21SJohn Baldwin #ifdef SELFTESTS 2961*744bfb21SJohn Baldwin #include "selftest/allowedips.c" 2962*744bfb21SJohn Baldwin static bool wg_run_selftests(void) 2963*744bfb21SJohn Baldwin { 2964*744bfb21SJohn Baldwin bool ret = true; 2965*744bfb21SJohn Baldwin ret &= wg_allowedips_selftest(); 2966*744bfb21SJohn Baldwin ret &= noise_counter_selftest(); 2967*744bfb21SJohn Baldwin ret &= cookie_selftest(); 2968*744bfb21SJohn Baldwin return ret; 2969*744bfb21SJohn Baldwin } 2970*744bfb21SJohn Baldwin #else 2971*744bfb21SJohn Baldwin static inline bool wg_run_selftests(void) { return true; } 2972*744bfb21SJohn Baldwin #endif 2973*744bfb21SJohn Baldwin 2974*744bfb21SJohn Baldwin static int 2975*744bfb21SJohn Baldwin wg_module_init(void) 2976*744bfb21SJohn Baldwin { 2977*744bfb21SJohn Baldwin int ret = ENOMEM; 2978*744bfb21SJohn Baldwin 2979*744bfb21SJohn Baldwin osd_method_t methods[PR_MAXMETHOD] = { 2980*744bfb21SJohn Baldwin [PR_METHOD_REMOVE] = wg_prison_remove, 2981*744bfb21SJohn Baldwin }; 2982*744bfb21SJohn Baldwin 2983*744bfb21SJohn Baldwin if ((wg_packet_zone = uma_zcreate("wg packet", sizeof(struct wg_packet), 2984*744bfb21SJohn Baldwin NULL, NULL, NULL, NULL, 0, 0)) == NULL) 2985*744bfb21SJohn Baldwin goto free_none; 2986*744bfb21SJohn Baldwin ret = crypto_init(); 2987*744bfb21SJohn Baldwin if (ret != 0) 2988*744bfb21SJohn Baldwin goto free_zone; 2989*744bfb21SJohn Baldwin if (cookie_init() != 0) 2990*744bfb21SJohn Baldwin goto free_crypto; 2991*744bfb21SJohn Baldwin 2992*744bfb21SJohn Baldwin wg_osd_jail_slot = osd_jail_register(NULL, methods); 2993*744bfb21SJohn Baldwin 2994*744bfb21SJohn Baldwin ret = ENOTRECOVERABLE; 2995*744bfb21SJohn Baldwin if (!wg_run_selftests()) 2996*744bfb21SJohn Baldwin goto free_all; 2997*744bfb21SJohn Baldwin 2998*744bfb21SJohn Baldwin return (0); 2999*744bfb21SJohn Baldwin 3000*744bfb21SJohn Baldwin free_all: 3001*744bfb21SJohn Baldwin osd_jail_deregister(wg_osd_jail_slot); 3002*744bfb21SJohn Baldwin cookie_deinit(); 3003*744bfb21SJohn Baldwin free_crypto: 3004*744bfb21SJohn Baldwin crypto_deinit(); 3005*744bfb21SJohn Baldwin free_zone: 3006*744bfb21SJohn Baldwin uma_zdestroy(wg_packet_zone); 3007*744bfb21SJohn Baldwin free_none: 3008*744bfb21SJohn Baldwin return (ret); 3009*744bfb21SJohn Baldwin } 3010*744bfb21SJohn Baldwin 3011*744bfb21SJohn Baldwin static void 3012*744bfb21SJohn Baldwin wg_module_deinit(void) 3013*744bfb21SJohn Baldwin { 3014*744bfb21SJohn Baldwin VNET_ITERATOR_DECL(vnet_iter); 3015*744bfb21SJohn Baldwin VNET_LIST_RLOCK(); 3016*744bfb21SJohn Baldwin VNET_FOREACH(vnet_iter) { 3017*744bfb21SJohn Baldwin struct if_clone *clone = VNET_VNET(vnet_iter, wg_cloner); 3018*744bfb21SJohn Baldwin if (clone) { 3019*744bfb21SJohn Baldwin if_clone_detach(clone); 3020*744bfb21SJohn Baldwin VNET_VNET(vnet_iter, wg_cloner) = NULL; 3021*744bfb21SJohn Baldwin } 3022*744bfb21SJohn Baldwin } 3023*744bfb21SJohn Baldwin VNET_LIST_RUNLOCK(); 3024*744bfb21SJohn Baldwin NET_EPOCH_WAIT(); 3025*744bfb21SJohn Baldwin MPASS(LIST_EMPTY(&wg_list)); 3026*744bfb21SJohn Baldwin osd_jail_deregister(wg_osd_jail_slot); 3027*744bfb21SJohn Baldwin cookie_deinit(); 3028*744bfb21SJohn Baldwin crypto_deinit(); 3029*744bfb21SJohn Baldwin uma_zdestroy(wg_packet_zone); 3030*744bfb21SJohn Baldwin } 3031*744bfb21SJohn Baldwin 3032*744bfb21SJohn Baldwin static int 3033*744bfb21SJohn Baldwin wg_module_event_handler(module_t mod, int what, void *arg) 3034*744bfb21SJohn Baldwin { 3035*744bfb21SJohn Baldwin switch (what) { 3036*744bfb21SJohn Baldwin case MOD_LOAD: 3037*744bfb21SJohn Baldwin return wg_module_init(); 3038*744bfb21SJohn Baldwin case MOD_UNLOAD: 3039*744bfb21SJohn Baldwin wg_module_deinit(); 3040*744bfb21SJohn Baldwin break; 3041*744bfb21SJohn Baldwin default: 3042*744bfb21SJohn Baldwin return (EOPNOTSUPP); 3043*744bfb21SJohn Baldwin } 3044*744bfb21SJohn Baldwin return (0); 3045*744bfb21SJohn Baldwin } 3046*744bfb21SJohn Baldwin 3047*744bfb21SJohn Baldwin static moduledata_t wg_moduledata = { 3048*744bfb21SJohn Baldwin wgname, 3049*744bfb21SJohn Baldwin wg_module_event_handler, 3050*744bfb21SJohn Baldwin NULL 3051*744bfb21SJohn Baldwin }; 3052*744bfb21SJohn Baldwin 3053*744bfb21SJohn Baldwin DECLARE_MODULE(wg, wg_moduledata, SI_SUB_PSEUDO, SI_ORDER_ANY); 3054*744bfb21SJohn Baldwin MODULE_VERSION(wg, WIREGUARD_VERSION); 3055*744bfb21SJohn Baldwin MODULE_DEPEND(wg, crypto, 1, 1, 1); 3056