1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 */ 5 6 #include "peer.h" 7 #include "device.h" 8 #include "queueing.h" 9 #include "timers.h" 10 #include "peerlookup.h" 11 #include "noise.h" 12 13 #include <linux/kref.h> 14 #include <linux/lockdep.h> 15 #include <linux/rcupdate.h> 16 #include <linux/list.h> 17 18 static atomic64_t peer_counter = ATOMIC64_INIT(0); 19 20 struct wg_peer *wg_peer_create(struct wg_device *wg, 21 const u8 public_key[NOISE_PUBLIC_KEY_LEN], 22 const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) 23 { 24 struct wg_peer *peer; 25 int ret = -ENOMEM; 26 27 lockdep_assert_held(&wg->device_update_lock); 28 29 if (wg->num_peers >= MAX_PEERS_PER_DEVICE) 30 return ERR_PTR(ret); 31 32 peer = kzalloc(sizeof(*peer), GFP_KERNEL); 33 if (unlikely(!peer)) 34 return ERR_PTR(ret); 35 peer->device = wg; 36 37 wg_noise_handshake_init(&peer->handshake, &wg->static_identity, 38 public_key, preshared_key, peer); 39 if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) 40 goto err_1; 41 if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, 42 MAX_QUEUED_PACKETS)) 43 goto err_2; 44 if (wg_packet_queue_init(&peer->rx_queue, NULL, false, 45 MAX_QUEUED_PACKETS)) 46 goto err_3; 47 48 peer->internal_id = atomic64_inc_return(&peer_counter); 49 peer->serial_work_cpu = nr_cpumask_bits; 50 wg_cookie_init(&peer->latest_cookie); 51 wg_timers_init(peer); 52 wg_cookie_checker_precompute_peer_keys(peer); 53 spin_lock_init(&peer->keypairs.keypair_update_lock); 54 INIT_WORK(&peer->transmit_handshake_work, 55 wg_packet_handshake_send_worker); 56 rwlock_init(&peer->endpoint_lock); 57 kref_init(&peer->refcount); 58 skb_queue_head_init(&peer->staged_packet_queue); 59 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); 60 set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); 61 netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll, 62 NAPI_POLL_WEIGHT); 63 napi_enable(&peer->napi); 64 list_add_tail(&peer->peer_list, &wg->peer_list); 65 INIT_LIST_HEAD(&peer->allowedips_list); 66 wg_pubkey_hashtable_add(wg->peer_hashtable, peer); 67 ++wg->num_peers; 68 pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); 69 return peer; 70 71 err_3: 72 wg_packet_queue_free(&peer->tx_queue, false); 73 err_2: 74 dst_cache_destroy(&peer->endpoint_cache); 75 err_1: 76 kfree(peer); 77 return ERR_PTR(ret); 78 } 79 80 struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) 81 { 82 RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), 83 "Taking peer reference without holding the RCU read lock"); 84 if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) 85 return NULL; 86 return peer; 87 } 88 89 static void peer_make_dead(struct wg_peer *peer) 90 { 91 /* Remove from configuration-time lookup structures. */ 92 list_del_init(&peer->peer_list); 93 wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, 94 &peer->device->device_update_lock); 95 wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); 96 97 /* Mark as dead, so that we don't allow jumping contexts after. */ 98 WRITE_ONCE(peer->is_dead, true); 99 100 /* The caller must now synchronize_rcu() for this to take effect. */ 101 } 102 103 static void peer_remove_after_dead(struct wg_peer *peer) 104 { 105 WARN_ON(!peer->is_dead); 106 107 /* No more keypairs can be created for this peer, since is_dead protects 108 * add_new_keypair, so we can now destroy existing ones. 109 */ 110 wg_noise_keypairs_clear(&peer->keypairs); 111 112 /* Destroy all ongoing timers that were in-flight at the beginning of 113 * this function. 114 */ 115 wg_timers_stop(peer); 116 117 /* The transition between packet encryption/decryption queues isn't 118 * guarded by is_dead, but each reference's life is strictly bounded by 119 * two generations: once for parallel crypto and once for serial 120 * ingestion, so we can simply flush twice, and be sure that we no 121 * longer have references inside these queues. 122 */ 123 124 /* a) For encrypt/decrypt. */ 125 flush_workqueue(peer->device->packet_crypt_wq); 126 /* b.1) For send (but not receive, since that's napi). */ 127 flush_workqueue(peer->device->packet_crypt_wq); 128 /* b.2.1) For receive (but not send, since that's wq). */ 129 napi_disable(&peer->napi); 130 /* b.2.1) It's now safe to remove the napi struct, which must be done 131 * here from process context. 132 */ 133 netif_napi_del(&peer->napi); 134 135 /* Ensure any workstructs we own (like transmit_handshake_work or 136 * clear_peer_work) no longer are in use. 137 */ 138 flush_workqueue(peer->device->handshake_send_wq); 139 140 /* After the above flushes, a peer might still be active in a few 141 * different contexts: 1) from xmit(), before hitting is_dead and 142 * returning, 2) from wg_packet_consume_data(), before hitting is_dead 143 * and returning, 3) from wg_receive_handshake_packet() after a point 144 * where it has processed an incoming handshake packet, but where 145 * all calls to pass it off to timers fails because of is_dead. We won't 146 * have new references in (1) eventually, because we're removed from 147 * allowedips; we won't have new references in (2) eventually, because 148 * wg_index_hashtable_lookup will always return NULL, since we removed 149 * all existing keypairs and no more can be created; we won't have new 150 * references in (3) eventually, because we're removed from the pubkey 151 * hash table, which allows for a maximum of one handshake response, 152 * via the still-uncleared index hashtable entry, but not more than one, 153 * and in wg_cookie_message_consume, the lookup eventually gets a peer 154 * with a refcount of zero, so no new reference is taken. 155 */ 156 157 --peer->device->num_peers; 158 wg_peer_put(peer); 159 } 160 161 /* We have a separate "remove" function make sure that all active places where 162 * a peer is currently operating will eventually come to an end and not pass 163 * their reference onto another context. 164 */ 165 void wg_peer_remove(struct wg_peer *peer) 166 { 167 if (unlikely(!peer)) 168 return; 169 lockdep_assert_held(&peer->device->device_update_lock); 170 171 peer_make_dead(peer); 172 synchronize_rcu(); 173 peer_remove_after_dead(peer); 174 } 175 176 void wg_peer_remove_all(struct wg_device *wg) 177 { 178 struct wg_peer *peer, *temp; 179 LIST_HEAD(dead_peers); 180 181 lockdep_assert_held(&wg->device_update_lock); 182 183 /* Avoid having to traverse individually for each one. */ 184 wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); 185 186 list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { 187 peer_make_dead(peer); 188 list_add_tail(&peer->peer_list, &dead_peers); 189 } 190 synchronize_rcu(); 191 list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) 192 peer_remove_after_dead(peer); 193 } 194 195 static void rcu_release(struct rcu_head *rcu) 196 { 197 struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); 198 199 dst_cache_destroy(&peer->endpoint_cache); 200 wg_packet_queue_free(&peer->rx_queue, false); 201 wg_packet_queue_free(&peer->tx_queue, false); 202 203 /* The final zeroing takes care of clearing any remaining handshake key 204 * material and other potentially sensitive information. 205 */ 206 kfree_sensitive(peer); 207 } 208 209 static void kref_release(struct kref *refcount) 210 { 211 struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); 212 213 pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", 214 peer->device->dev->name, peer->internal_id, 215 &peer->endpoint.addr); 216 217 /* Remove ourself from dynamic runtime lookup structures, now that the 218 * last reference is gone. 219 */ 220 wg_index_hashtable_remove(peer->device->index_hashtable, 221 &peer->handshake.entry); 222 223 /* Remove any lingering packets that didn't have a chance to be 224 * transmitted. 225 */ 226 wg_packet_purge_staged_packets(peer); 227 228 /* Free the memory used. */ 229 call_rcu(&peer->rcu, rcu_release); 230 } 231 232 void wg_peer_put(struct wg_peer *peer) 233 { 234 if (unlikely(!peer)) 235 return; 236 kref_put(&peer->refcount, kref_release); 237 } 238