xref: /linux/drivers/net/wireguard/queueing.h (revision 6a35ddc5445a8291ced6247a67977e110275acde)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5 
6 #ifndef _WG_QUEUEING_H
7 #define _WG_QUEUEING_H
8 
9 #include "peer.h"
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/ip.h>
13 #include <linux/ipv6.h>
14 
15 struct wg_device;
16 struct wg_peer;
17 struct multicore_worker;
18 struct crypt_queue;
19 struct sk_buff;
20 
21 /* queueing.c APIs: */
22 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
23 			 bool multicore, unsigned int len);
24 void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
25 struct multicore_worker __percpu *
26 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
27 
28 /* receive.c APIs: */
29 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
30 void wg_packet_handshake_receive_worker(struct work_struct *work);
31 /* NAPI poll function: */
32 int wg_packet_rx_poll(struct napi_struct *napi, int budget);
33 /* Workqueue worker: */
34 void wg_packet_decrypt_worker(struct work_struct *work);
35 
36 /* send.c APIs: */
37 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
38 						bool is_retry);
39 void wg_packet_send_handshake_response(struct wg_peer *peer);
40 void wg_packet_send_handshake_cookie(struct wg_device *wg,
41 				     struct sk_buff *initiating_skb,
42 				     __le32 sender_index);
43 void wg_packet_send_keepalive(struct wg_peer *peer);
44 void wg_packet_purge_staged_packets(struct wg_peer *peer);
45 void wg_packet_send_staged_packets(struct wg_peer *peer);
46 /* Workqueue workers: */
47 void wg_packet_handshake_send_worker(struct work_struct *work);
48 void wg_packet_tx_worker(struct work_struct *work);
49 void wg_packet_encrypt_worker(struct work_struct *work);
50 
51 enum packet_state {
52 	PACKET_STATE_UNCRYPTED,
53 	PACKET_STATE_CRYPTED,
54 	PACKET_STATE_DEAD
55 };
56 
57 struct packet_cb {
58 	u64 nonce;
59 	struct noise_keypair *keypair;
60 	atomic_t state;
61 	u32 mtu;
62 	u8 ds;
63 };
64 
65 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
66 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
67 
68 /* Returns either the correct skb->protocol value, or 0 if invalid. */
69 static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
70 {
71 	if (skb_network_header(skb) >= skb->head &&
72 	    (skb_network_header(skb) + sizeof(struct iphdr)) <=
73 		    skb_tail_pointer(skb) &&
74 	    ip_hdr(skb)->version == 4)
75 		return htons(ETH_P_IP);
76 	if (skb_network_header(skb) >= skb->head &&
77 	    (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
78 		    skb_tail_pointer(skb) &&
79 	    ipv6_hdr(skb)->version == 6)
80 		return htons(ETH_P_IPV6);
81 	return 0;
82 }
83 
84 static inline void wg_reset_packet(struct sk_buff *skb)
85 {
86 	const int pfmemalloc = skb->pfmemalloc;
87 
88 	skb_scrub_packet(skb, true);
89 	memset(&skb->headers_start, 0,
90 	       offsetof(struct sk_buff, headers_end) -
91 		       offsetof(struct sk_buff, headers_start));
92 	skb->pfmemalloc = pfmemalloc;
93 	skb->queue_mapping = 0;
94 	skb->nohdr = 0;
95 	skb->peeked = 0;
96 	skb->mac_len = 0;
97 	skb->dev = NULL;
98 #ifdef CONFIG_NET_SCHED
99 	skb->tc_index = 0;
100 	skb_reset_tc(skb);
101 #endif
102 	skb->hdr_len = skb_headroom(skb);
103 	skb_reset_mac_header(skb);
104 	skb_reset_network_header(skb);
105 	skb_reset_transport_header(skb);
106 	skb_probe_transport_header(skb);
107 	skb_reset_inner_headers(skb);
108 }
109 
110 static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
111 {
112 	unsigned int cpu = *stored_cpu, cpu_index, i;
113 
114 	if (unlikely(cpu == nr_cpumask_bits ||
115 		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
116 		cpu_index = id % cpumask_weight(cpu_online_mask);
117 		cpu = cpumask_first(cpu_online_mask);
118 		for (i = 0; i < cpu_index; ++i)
119 			cpu = cpumask_next(cpu, cpu_online_mask);
120 		*stored_cpu = cpu;
121 	}
122 	return cpu;
123 }
124 
125 /* This function is racy, in the sense that next is unlocked, so it could return
126  * the same CPU twice. A race-free version of this would be to instead store an
127  * atomic sequence number, do an increment-and-return, and then iterate through
128  * every possible CPU until we get to that index -- choose_cpu. However that's
129  * a bit slower, and it doesn't seem like this potential race actually
130  * introduces any performance loss, so we live with it.
131  */
132 static inline int wg_cpumask_next_online(int *next)
133 {
134 	int cpu = *next;
135 
136 	while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
137 		cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
138 	*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
139 	return cpu;
140 }
141 
142 static inline int wg_queue_enqueue_per_device_and_peer(
143 	struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
144 	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
145 {
146 	int cpu;
147 
148 	atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
149 	/* We first queue this up for the peer ingestion, but the consumer
150 	 * will wait for the state to change to CRYPTED or DEAD before.
151 	 */
152 	if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
153 		return -ENOSPC;
154 	/* Then we queue it up in the device queue, which consumes the
155 	 * packet as soon as it can.
156 	 */
157 	cpu = wg_cpumask_next_online(next_cpu);
158 	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
159 		return -EPIPE;
160 	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
161 	return 0;
162 }
163 
164 static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
165 					     struct sk_buff *skb,
166 					     enum packet_state state)
167 {
168 	/* We take a reference, because as soon as we call atomic_set, the
169 	 * peer can be freed from below us.
170 	 */
171 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
172 
173 	atomic_set_release(&PACKET_CB(skb)->state, state);
174 	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
175 					       peer->internal_id),
176 		      peer->device->packet_crypt_wq, &queue->work);
177 	wg_peer_put(peer);
178 }
179 
180 static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
181 						  enum packet_state state)
182 {
183 	/* We take a reference, because as soon as we call atomic_set, the
184 	 * peer can be freed from below us.
185 	 */
186 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
187 
188 	atomic_set_release(&PACKET_CB(skb)->state, state);
189 	napi_schedule(&peer->napi);
190 	wg_peer_put(peer);
191 }
192 
193 #ifdef DEBUG
194 bool wg_packet_counter_selftest(void);
195 #endif
196 
197 #endif /* _WG_QUEUEING_H */
198