xref: /linux/arch/um/drivers/vector_transports.c (revision 8bc7c5e525584903ea83332e18a2118ed3b1985e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - Cambridge Greys Limited
4  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5  */
6 
7 #include <linux/etherdevice.h>
8 #include <linux/netdevice.h>
9 #include <linux/skbuff.h>
10 #include <linux/slab.h>
11 #include <asm/byteorder.h>
12 #include <uapi/linux/ip.h>
13 #include <uapi/linux/virtio_net.h>
14 #include <linux/virtio_net.h>
15 #include <linux/virtio_byteorder.h>
16 #include <linux/netdev_features.h>
17 #include "vector_user.h"
18 #include "vector_kern.h"
19 
20 #define GOOD_LINEAR 512
21 #define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
22 
23 struct gre_minimal_header {
24 	uint16_t header;
25 	uint16_t arptype;
26 };
27 
28 
29 struct uml_gre_data {
30 	uint32_t rx_key;
31 	uint32_t tx_key;
32 	uint32_t sequence;
33 
34 	bool ipv6;
35 	bool has_sequence;
36 	bool pin_sequence;
37 	bool checksum;
38 	bool key;
39 	struct gre_minimal_header expected_header;
40 
41 	uint32_t checksum_offset;
42 	uint32_t key_offset;
43 	uint32_t sequence_offset;
44 
45 };
46 
47 struct uml_l2tpv3_data {
48 	uint64_t rx_cookie;
49 	uint64_t tx_cookie;
50 	uint64_t rx_session;
51 	uint64_t tx_session;
52 	uint32_t counter;
53 
54 	bool udp;
55 	bool ipv6;
56 	bool has_counter;
57 	bool pin_counter;
58 	bool cookie;
59 	bool cookie_is_64;
60 
61 	uint32_t cookie_offset;
62 	uint32_t session_offset;
63 	uint32_t counter_offset;
64 };
65 
66 static int l2tpv3_form_header(uint8_t *header,
67 	struct sk_buff *skb, struct vector_private *vp)
68 {
69 	struct uml_l2tpv3_data *td = vp->transport_data;
70 	uint32_t *counter;
71 
72 	if (td->udp)
73 		*(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
74 	(*(uint32_t *) (header + td->session_offset)) = td->tx_session;
75 
76 	if (td->cookie) {
77 		if (td->cookie_is_64)
78 			(*(uint64_t *)(header + td->cookie_offset)) =
79 				td->tx_cookie;
80 		else
81 			(*(uint32_t *)(header + td->cookie_offset)) =
82 				td->tx_cookie;
83 	}
84 	if (td->has_counter) {
85 		counter = (uint32_t *)(header + td->counter_offset);
86 		if (td->pin_counter) {
87 			*counter = 0;
88 		} else {
89 			td->counter++;
90 			*counter = cpu_to_be32(td->counter);
91 		}
92 	}
93 	return 0;
94 }
95 
96 static int gre_form_header(uint8_t *header,
97 		struct sk_buff *skb, struct vector_private *vp)
98 {
99 	struct uml_gre_data *td = vp->transport_data;
100 	uint32_t *sequence;
101 	*((uint32_t *) header) = *((uint32_t *) &td->expected_header);
102 	if (td->key)
103 		(*(uint32_t *) (header + td->key_offset)) = td->tx_key;
104 	if (td->has_sequence) {
105 		sequence = (uint32_t *)(header + td->sequence_offset);
106 		if (td->pin_sequence)
107 			*sequence = 0;
108 		else
109 			*sequence = cpu_to_be32(++td->sequence);
110 	}
111 	return 0;
112 }
113 
114 static int raw_form_header(uint8_t *header,
115 		struct sk_buff *skb, struct vector_private *vp)
116 {
117 	struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
118 
119 	virtio_net_hdr_from_skb(
120 		skb,
121 		vheader,
122 		virtio_legacy_is_little_endian(),
123 		false,
124 		0
125 	);
126 
127 	return 0;
128 }
129 
130 static int l2tpv3_verify_header(
131 	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
132 {
133 	struct uml_l2tpv3_data *td = vp->transport_data;
134 	uint32_t *session;
135 	uint64_t cookie;
136 
137 	if ((!td->udp) && (!td->ipv6))
138 		header += sizeof(struct iphdr) /* fix for ipv4 raw */;
139 
140 	/* we do not do a strict check for "data" packets as per
141 	 * the RFC spec because the pure IP spec does not have
142 	 * that anyway.
143 	 */
144 
145 	if (td->cookie) {
146 		if (td->cookie_is_64)
147 			cookie = *(uint64_t *)(header + td->cookie_offset);
148 		else
149 			cookie = *(uint32_t *)(header + td->cookie_offset);
150 		if (cookie != td->rx_cookie) {
151 			if (net_ratelimit())
152 				netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
153 			return -1;
154 		}
155 	}
156 	session = (uint32_t *) (header + td->session_offset);
157 	if (*session != td->rx_session) {
158 		if (net_ratelimit())
159 			netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
160 		return -1;
161 	}
162 	return 0;
163 }
164 
165 static int gre_verify_header(
166 	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
167 {
168 
169 	uint32_t key;
170 	struct uml_gre_data *td = vp->transport_data;
171 
172 	if (!td->ipv6)
173 		header += sizeof(struct iphdr) /* fix for ipv4 raw */;
174 
175 	if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
176 		if (net_ratelimit())
177 			netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
178 				*((uint32_t *) &td->expected_header),
179 				*((uint32_t *) header)
180 			);
181 		return -1;
182 	}
183 
184 	if (td->key) {
185 		key = (*(uint32_t *)(header + td->key_offset));
186 		if (key != td->rx_key) {
187 			if (net_ratelimit())
188 				netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
189 						key, td->rx_key);
190 			return -1;
191 		}
192 	}
193 	return 0;
194 }
195 
196 static int raw_verify_header(
197 	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
198 {
199 	struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
200 
201 	if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
202 		(vp->req_size != 65536)) {
203 		if (net_ratelimit())
204 			netdev_err(
205 				vp->dev,
206 				GSO_ERROR
207 		);
208 	}
209 	if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
210 		return 1;
211 
212 	virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
213 	return 0;
214 }
215 
216 static bool get_uint_param(
217 	struct arglist *def, char *param, unsigned int *result)
218 {
219 	char *arg = uml_vector_fetch_arg(def, param);
220 
221 	if (arg != NULL) {
222 		if (kstrtoint(arg, 0, result) == 0)
223 			return true;
224 	}
225 	return false;
226 }
227 
228 static bool get_ulong_param(
229 	struct arglist *def, char *param, unsigned long *result)
230 {
231 	char *arg = uml_vector_fetch_arg(def, param);
232 
233 	if (arg != NULL) {
234 		if (kstrtoul(arg, 0, result) == 0)
235 			return true;
236 		return true;
237 	}
238 	return false;
239 }
240 
241 static int build_gre_transport_data(struct vector_private *vp)
242 {
243 	struct uml_gre_data *td;
244 	int temp_int;
245 	int temp_rx;
246 	int temp_tx;
247 
248 	vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
249 	if (vp->transport_data == NULL)
250 		return -ENOMEM;
251 	td = vp->transport_data;
252 	td->sequence = 0;
253 
254 	td->expected_header.arptype = GRE_IRB;
255 	td->expected_header.header = 0;
256 
257 	vp->form_header = &gre_form_header;
258 	vp->verify_header = &gre_verify_header;
259 	vp->header_size = 4;
260 	td->key_offset = 4;
261 	td->sequence_offset = 4;
262 	td->checksum_offset = 4;
263 
264 	td->ipv6 = false;
265 	if (get_uint_param(vp->parsed, "v6", &temp_int)) {
266 		if (temp_int > 0)
267 			td->ipv6 = true;
268 	}
269 	td->key = false;
270 	if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
271 		if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
272 			td->key = true;
273 			td->expected_header.header |= GRE_MODE_KEY;
274 			td->rx_key = cpu_to_be32(temp_rx);
275 			td->tx_key = cpu_to_be32(temp_tx);
276 			vp->header_size += 4;
277 			td->sequence_offset += 4;
278 		} else {
279 			return -EINVAL;
280 		}
281 	}
282 
283 	td->sequence = false;
284 	if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
285 		if (temp_int > 0) {
286 			vp->header_size += 4;
287 			td->has_sequence = true;
288 			td->expected_header.header |= GRE_MODE_SEQUENCE;
289 			if (get_uint_param(
290 				vp->parsed, "pin_sequence", &temp_int)) {
291 				if (temp_int > 0)
292 					td->pin_sequence = true;
293 			}
294 		}
295 	}
296 	vp->rx_header_size = vp->header_size;
297 	if (!td->ipv6)
298 		vp->rx_header_size += sizeof(struct iphdr);
299 	return 0;
300 }
301 
302 static int build_l2tpv3_transport_data(struct vector_private *vp)
303 {
304 
305 	struct uml_l2tpv3_data *td;
306 	int temp_int, temp_rxs, temp_txs;
307 	unsigned long temp_rx;
308 	unsigned long temp_tx;
309 
310 	vp->transport_data = kmalloc(
311 		sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
312 
313 	if (vp->transport_data == NULL)
314 		return -ENOMEM;
315 
316 	td = vp->transport_data;
317 
318 	vp->form_header = &l2tpv3_form_header;
319 	vp->verify_header = &l2tpv3_verify_header;
320 	td->counter = 0;
321 
322 	vp->header_size = 4;
323 	td->session_offset = 0;
324 	td->cookie_offset = 4;
325 	td->counter_offset = 4;
326 
327 
328 	td->ipv6 = false;
329 	if (get_uint_param(vp->parsed, "v6", &temp_int)) {
330 		if (temp_int > 0)
331 			td->ipv6 = true;
332 	}
333 
334 	if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
335 		if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
336 			td->tx_session = cpu_to_be32(temp_txs);
337 			td->rx_session = cpu_to_be32(temp_rxs);
338 		} else {
339 			return -EINVAL;
340 		}
341 	} else {
342 		return -EINVAL;
343 	}
344 
345 	td->cookie_is_64  = false;
346 	if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
347 		if (temp_int > 0)
348 			td->cookie_is_64  = true;
349 	}
350 	td->cookie = false;
351 	if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
352 		if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
353 			td->cookie = true;
354 			if (td->cookie_is_64) {
355 				td->rx_cookie = cpu_to_be64(temp_rx);
356 				td->tx_cookie = cpu_to_be64(temp_tx);
357 				vp->header_size += 8;
358 				td->counter_offset += 8;
359 			} else {
360 				td->rx_cookie = cpu_to_be32(temp_rx);
361 				td->tx_cookie = cpu_to_be32(temp_tx);
362 				vp->header_size += 4;
363 				td->counter_offset += 4;
364 			}
365 		} else {
366 			return -EINVAL;
367 		}
368 	}
369 
370 	td->has_counter = false;
371 	if (get_uint_param(vp->parsed, "counter", &temp_int)) {
372 		if (temp_int > 0) {
373 			td->has_counter = true;
374 			vp->header_size += 4;
375 			if (get_uint_param(
376 				vp->parsed, "pin_counter", &temp_int)) {
377 				if (temp_int > 0)
378 					td->pin_counter = true;
379 			}
380 		}
381 	}
382 
383 	if (get_uint_param(vp->parsed, "udp", &temp_int)) {
384 		if (temp_int > 0) {
385 			td->udp = true;
386 			vp->header_size += 4;
387 			td->counter_offset += 4;
388 			td->session_offset += 4;
389 			td->cookie_offset += 4;
390 		}
391 	}
392 
393 	vp->rx_header_size = vp->header_size;
394 	if ((!td->ipv6) && (!td->udp))
395 		vp->rx_header_size += sizeof(struct iphdr);
396 
397 	return 0;
398 }
399 
400 static int build_raw_transport_data(struct vector_private *vp)
401 {
402 	if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
403 		if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
404 			return -1;
405 		vp->form_header = &raw_form_header;
406 		vp->verify_header = &raw_verify_header;
407 		vp->header_size = sizeof(struct virtio_net_hdr);
408 		vp->rx_header_size = sizeof(struct virtio_net_hdr);
409 		vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
410 		vp->dev->features |=
411 			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
412 				NETIF_F_TSO | NETIF_F_GRO);
413 		netdev_info(
414 			vp->dev,
415 			"raw: using vnet headers for tso and tx/rx checksum"
416 		);
417 	}
418 	return 0;
419 }
420 
421 static int build_hybrid_transport_data(struct vector_private *vp)
422 {
423 	if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
424 		vp->form_header = &raw_form_header;
425 		vp->verify_header = &raw_verify_header;
426 		vp->header_size = sizeof(struct virtio_net_hdr);
427 		vp->rx_header_size = sizeof(struct virtio_net_hdr);
428 		vp->dev->hw_features |=
429 			(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
430 		vp->dev->features |=
431 			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
432 				NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
433 		netdev_info(
434 			vp->dev,
435 			"tap/raw hybrid: using vnet headers for tso and tx/rx checksum"
436 		);
437 	} else {
438 		return 0; /* do not try to enable tap too if raw failed */
439 	}
440 	if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
441 		return 0;
442 	return -1;
443 }
444 
445 static int build_tap_transport_data(struct vector_private *vp)
446 {
447 	/* "Pure" tap uses the same fd for rx and tx */
448 	if (uml_tap_enable_vnet_headers(vp->fds->tx_fd)) {
449 		vp->form_header = &raw_form_header;
450 		vp->verify_header = &raw_verify_header;
451 		vp->header_size = sizeof(struct virtio_net_hdr);
452 		vp->rx_header_size = sizeof(struct virtio_net_hdr);
453 		vp->dev->hw_features |=
454 			(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
455 		vp->dev->features |=
456 			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
457 				NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
458 		netdev_info(
459 			vp->dev,
460 			"tap: using vnet headers for tso and tx/rx checksum"
461 		);
462 		return 0;
463 	}
464 	return -1;
465 }
466 
467 
468 static int build_bess_transport_data(struct vector_private *vp)
469 {
470 	vp->form_header = NULL;
471 	vp->verify_header = NULL;
472 	vp->header_size = 0;
473 	vp->rx_header_size = 0;
474 	return 0;
475 }
476 
477 int build_transport_data(struct vector_private *vp)
478 {
479 	char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
480 
481 	if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
482 		return build_gre_transport_data(vp);
483 	if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
484 		return build_l2tpv3_transport_data(vp);
485 	if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
486 		return build_raw_transport_data(vp);
487 	if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
488 		return build_tap_transport_data(vp);
489 	if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
490 		return build_hybrid_transport_data(vp);
491 	if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
492 		return build_bess_transport_data(vp);
493 	return 0;
494 }
495 
496