xref: /linux/include/linux/virtio_net.h (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_NET_H
3 #define _LINUX_VIRTIO_NET_H
4 
5 #include <linux/if_vlan.h>
6 #include <linux/ip.h>
7 #include <linux/ipv6.h>
8 #include <linux/udp.h>
9 #include <uapi/linux/tcp.h>
10 #include <uapi/linux/virtio_net.h>
11 
virtio_net_hdr_match_proto(__be16 protocol,__u8 gso_type)12 static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
13 {
14 	switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
15 	case VIRTIO_NET_HDR_GSO_TCPV4:
16 		return protocol == cpu_to_be16(ETH_P_IP);
17 	case VIRTIO_NET_HDR_GSO_TCPV6:
18 		return protocol == cpu_to_be16(ETH_P_IPV6);
19 	case VIRTIO_NET_HDR_GSO_UDP:
20 	case VIRTIO_NET_HDR_GSO_UDP_L4:
21 		return protocol == cpu_to_be16(ETH_P_IP) ||
22 		       protocol == cpu_to_be16(ETH_P_IPV6);
23 	default:
24 		return false;
25 	}
26 }
27 
virtio_net_hdr_set_proto(struct sk_buff * skb,const struct virtio_net_hdr * hdr)28 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
29 					   const struct virtio_net_hdr *hdr)
30 {
31 	if (skb->protocol)
32 		return 0;
33 
34 	switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
35 	case VIRTIO_NET_HDR_GSO_TCPV4:
36 	case VIRTIO_NET_HDR_GSO_UDP:
37 	case VIRTIO_NET_HDR_GSO_UDP_L4:
38 		skb->protocol = cpu_to_be16(ETH_P_IP);
39 		break;
40 	case VIRTIO_NET_HDR_GSO_TCPV6:
41 		skb->protocol = cpu_to_be16(ETH_P_IPV6);
42 		break;
43 	default:
44 		return -EINVAL;
45 	}
46 
47 	return 0;
48 }
49 
__virtio_net_hdr_to_skb(struct sk_buff * skb,const struct virtio_net_hdr * hdr,bool little_endian,u8 hdr_gso_type)50 static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb,
51 					  const struct virtio_net_hdr *hdr,
52 					  bool little_endian, u8 hdr_gso_type)
53 {
54 	unsigned int nh_min_len = sizeof(struct iphdr);
55 	unsigned int gso_type = 0;
56 	unsigned int thlen = 0;
57 	unsigned int p_off = 0;
58 	unsigned int ip_proto;
59 
60 	if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
61 		switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
62 		case VIRTIO_NET_HDR_GSO_TCPV4:
63 			gso_type = SKB_GSO_TCPV4;
64 			ip_proto = IPPROTO_TCP;
65 			thlen = sizeof(struct tcphdr);
66 			break;
67 		case VIRTIO_NET_HDR_GSO_TCPV6:
68 			gso_type = SKB_GSO_TCPV6;
69 			ip_proto = IPPROTO_TCP;
70 			thlen = sizeof(struct tcphdr);
71 			nh_min_len = sizeof(struct ipv6hdr);
72 			break;
73 		case VIRTIO_NET_HDR_GSO_UDP:
74 			gso_type = SKB_GSO_UDP;
75 			ip_proto = IPPROTO_UDP;
76 			thlen = sizeof(struct udphdr);
77 			break;
78 		case VIRTIO_NET_HDR_GSO_UDP_L4:
79 			gso_type = SKB_GSO_UDP_L4;
80 			ip_proto = IPPROTO_UDP;
81 			thlen = sizeof(struct udphdr);
82 			break;
83 		default:
84 			return -EINVAL;
85 		}
86 
87 		if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN)
88 			gso_type |= SKB_GSO_TCP_ECN;
89 
90 		if (hdr->gso_size == 0)
91 			return -EINVAL;
92 	}
93 
94 	skb_reset_mac_header(skb);
95 
96 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
97 		u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
98 		u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
99 		u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
100 
101 		if (!pskb_may_pull(skb, needed))
102 			return -EINVAL;
103 
104 		if (!skb_partial_csum_set(skb, start, off))
105 			return -EINVAL;
106 		if (skb_transport_offset(skb) < nh_min_len)
107 			return -EINVAL;
108 
109 		nh_min_len = skb_transport_offset(skb);
110 		p_off = nh_min_len + thlen;
111 		if (!pskb_may_pull(skb, p_off))
112 			return -EINVAL;
113 	} else {
114 		/* gso packets without NEEDS_CSUM do not set transport_offset.
115 		 * probe and drop if does not match one of the above types.
116 		 */
117 		if (gso_type && skb->network_header) {
118 			struct flow_keys_basic keys;
119 
120 			if (!skb->protocol) {
121 				__be16 protocol = dev_parse_header_protocol(skb);
122 
123 				if (!protocol)
124 					virtio_net_hdr_set_proto(skb, hdr);
125 				else if (!virtio_net_hdr_match_proto(protocol,
126 								 hdr_gso_type))
127 					return -EINVAL;
128 				else
129 					skb->protocol = protocol;
130 			}
131 retry:
132 			if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
133 							      NULL, 0, 0, 0,
134 							      0)) {
135 				/* UFO does not specify ipv4 or 6: try both */
136 				if (gso_type & SKB_GSO_UDP &&
137 				    skb->protocol == htons(ETH_P_IP)) {
138 					skb->protocol = htons(ETH_P_IPV6);
139 					goto retry;
140 				}
141 				return -EINVAL;
142 			}
143 
144 			p_off = keys.control.thoff + thlen;
145 			if (!pskb_may_pull(skb, p_off) ||
146 			    keys.basic.ip_proto != ip_proto)
147 				return -EINVAL;
148 
149 			skb_set_transport_header(skb, keys.control.thoff);
150 		} else if (gso_type) {
151 			p_off = nh_min_len + thlen;
152 			if (!pskb_may_pull(skb, p_off))
153 				return -EINVAL;
154 		}
155 	}
156 
157 	if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
158 		u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
159 		unsigned int nh_off = p_off;
160 		struct skb_shared_info *shinfo = skb_shinfo(skb);
161 
162 		switch (gso_type & ~SKB_GSO_TCP_ECN) {
163 		case SKB_GSO_UDP:
164 			/* UFO may not include transport header in gso_size. */
165 			nh_off -= thlen;
166 			break;
167 		case SKB_GSO_UDP_L4:
168 			if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
169 				return -EINVAL;
170 			if (skb->csum_offset != offsetof(struct udphdr, check))
171 				return -EINVAL;
172 			if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
173 				return -EINVAL;
174 			if (gso_type != SKB_GSO_UDP_L4)
175 				return -EINVAL;
176 			break;
177 		case SKB_GSO_TCPV4:
178 		case SKB_GSO_TCPV6:
179 			if (skb->ip_summed == CHECKSUM_PARTIAL &&
180 			    skb->csum_offset != offsetof(struct tcphdr, check))
181 				return -EINVAL;
182 			break;
183 		}
184 
185 		/* Kernel has a special handling for GSO_BY_FRAGS. */
186 		if (gso_size == GSO_BY_FRAGS)
187 			return -EINVAL;
188 
189 		/* Too small packets are not really GSO ones. */
190 		if (skb->len - nh_off > gso_size) {
191 			shinfo->gso_size = gso_size;
192 			shinfo->gso_type = gso_type;
193 
194 			/* Header must be checked, and gso_segs computed. */
195 			shinfo->gso_type |= SKB_GSO_DODGY;
196 			shinfo->gso_segs = 0;
197 		}
198 	}
199 
200 	return 0;
201 }
202 
virtio_net_hdr_to_skb(struct sk_buff * skb,const struct virtio_net_hdr * hdr,bool little_endian)203 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
204 					const struct virtio_net_hdr *hdr,
205 					bool little_endian)
206 {
207 	return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
208 }
209 
virtio_net_hdr_from_skb(const struct sk_buff * skb,struct virtio_net_hdr * hdr,bool little_endian,bool has_data_valid,int vlan_hlen)210 static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
211 					  struct virtio_net_hdr *hdr,
212 					  bool little_endian,
213 					  bool has_data_valid,
214 					  int vlan_hlen)
215 {
216 	memset(hdr, 0, sizeof(*hdr));   /* no info leak */
217 
218 	if (skb_is_gso(skb)) {
219 		struct skb_shared_info *sinfo = skb_shinfo(skb);
220 
221 		/* This is a hint as to how much should be linear. */
222 		hdr->hdr_len = __cpu_to_virtio16(little_endian,
223 						 skb_headlen(skb));
224 		hdr->gso_size = __cpu_to_virtio16(little_endian,
225 						  sinfo->gso_size);
226 		if (sinfo->gso_type & SKB_GSO_TCPV4)
227 			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
228 		else if (sinfo->gso_type & SKB_GSO_TCPV6)
229 			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
230 		else if (sinfo->gso_type & SKB_GSO_UDP_L4)
231 			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP_L4;
232 		else
233 			return -EINVAL;
234 		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
235 			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
236 	} else
237 		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
238 
239 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
240 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
241 		hdr->csum_start = __cpu_to_virtio16(little_endian,
242 			skb_checksum_start_offset(skb) + vlan_hlen);
243 		hdr->csum_offset = __cpu_to_virtio16(little_endian,
244 				skb->csum_offset);
245 	} else if (has_data_valid &&
246 		   skb->ip_summed == CHECKSUM_UNNECESSARY) {
247 		hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
248 	} /* else everything is zero */
249 
250 	return 0;
251 }
252 
virtio_l3min(bool is_ipv6)253 static inline unsigned int virtio_l3min(bool is_ipv6)
254 {
255 	return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
256 }
257 
258 static inline int
virtio_net_hdr_tnl_to_skb(struct sk_buff * skb,const struct virtio_net_hdr_v1_hash_tunnel * vhdr,bool tnl_hdr_negotiated,bool tnl_csum_negotiated,bool little_endian)259 virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
260 			  const struct virtio_net_hdr_v1_hash_tunnel *vhdr,
261 			  bool tnl_hdr_negotiated,
262 			  bool tnl_csum_negotiated,
263 			  bool little_endian)
264 {
265 	const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr;
266 	unsigned int inner_nh, outer_th, inner_th;
267 	unsigned int inner_l3min, outer_l3min;
268 	u8 gso_inner_type, gso_tunnel_type;
269 	bool outer_isv6, inner_isv6;
270 	int ret;
271 
272 	gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
273 	if (!gso_tunnel_type)
274 		return virtio_net_hdr_to_skb(skb, hdr, little_endian);
275 
276 	/* Tunnel not supported/negotiated, but the hdr asks for it. */
277 	if (!tnl_hdr_negotiated)
278 		return -EINVAL;
279 
280 	/* Either ipv4 or ipv6. */
281 	if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL)
282 		return -EINVAL;
283 
284 	/* The UDP tunnel must carry a GSO packet, but no UFO. */
285 	gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN |
286 					   VIRTIO_NET_HDR_GSO_UDP_TUNNEL);
287 	if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP)
288 		return -EINVAL;
289 
290 	/* Rely on csum being present. */
291 	if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
292 		return -EINVAL;
293 
294 	/* Validate offsets. */
295 	outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
296 	inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6;
297 	inner_l3min = virtio_l3min(inner_isv6);
298 	outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6);
299 
300 	inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
301 	inner_nh = le16_to_cpu(vhdr->inner_nh_offset);
302 	outer_th = le16_to_cpu(vhdr->outer_th_offset);
303 	if (outer_th < outer_l3min ||
304 	    inner_nh < outer_th + sizeof(struct udphdr) ||
305 	    inner_th < inner_nh + inner_l3min)
306 		return -EINVAL;
307 
308 	/* Let the basic parsing deal with plain GSO features. */
309 	ret = __virtio_net_hdr_to_skb(skb, hdr, true,
310 				      hdr->gso_type & ~gso_tunnel_type);
311 	if (ret)
312 		return ret;
313 
314 	/* In case of USO, the inner protocol is still unknown and
315 	 * `inner_isv6` is just a guess, additional parsing is needed.
316 	 * The previous validation ensures that accessing an ipv4 inner
317 	 * network header is safe.
318 	 */
319 	if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) {
320 		struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh);
321 
322 		inner_isv6 = iphdr->version == 6;
323 		inner_l3min = virtio_l3min(inner_isv6);
324 		if (inner_th < inner_nh + inner_l3min)
325 			return -EINVAL;
326 	}
327 
328 	skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) :
329 						 htons(ETH_P_IP));
330 	if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) {
331 		if (!tnl_csum_negotiated)
332 			return -EINVAL;
333 
334 		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
335 	} else {
336 		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
337 	}
338 
339 	skb->inner_transport_header = inner_th + skb_headroom(skb);
340 	skb->inner_network_header = inner_nh + skb_headroom(skb);
341 	skb->inner_mac_header = inner_nh + skb_headroom(skb);
342 	skb->transport_header = outer_th + skb_headroom(skb);
343 	skb->encapsulation = 1;
344 	return 0;
345 }
346 
347 /* Checksum-related fields validation for the driver */
virtio_net_handle_csum_offload(struct sk_buff * skb,struct virtio_net_hdr * hdr,bool tnl_csum_negotiated)348 static inline int virtio_net_handle_csum_offload(struct sk_buff *skb,
349 						 struct virtio_net_hdr *hdr,
350 						 bool tnl_csum_negotiated)
351 {
352 	if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) {
353 		if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID))
354 			return 0;
355 
356 		skb->ip_summed = CHECKSUM_UNNECESSARY;
357 		if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM))
358 			return 0;
359 
360 		/* tunnel csum packets are invalid when the related
361 		 * feature has not been negotiated
362 		 */
363 		if (!tnl_csum_negotiated)
364 			return -EINVAL;
365 		skb->csum_level = 1;
366 		return 0;
367 	}
368 
369 	/* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO
370 	 * over UDP tunnel requires the latter
371 	 */
372 	if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)
373 		return -EINVAL;
374 	return 0;
375 }
376 
377 /*
378  * vlan_hlen always refers to the outermost MAC header. That also
379  * means it refers to the only MAC header, if the packet does not carry
380  * any encapsulation.
381  */
382 static inline int
virtio_net_hdr_tnl_from_skb(const struct sk_buff * skb,struct virtio_net_hdr_v1_hash_tunnel * vhdr,bool tnl_hdr_negotiated,bool little_endian,int vlan_hlen)383 virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
384 			    struct virtio_net_hdr_v1_hash_tunnel *vhdr,
385 			    bool tnl_hdr_negotiated,
386 			    bool little_endian,
387 			    int vlan_hlen)
388 {
389 	struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
390 	unsigned int inner_nh, outer_th;
391 	int tnl_gso_type;
392 	int ret;
393 
394 	tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
395 						    SKB_GSO_UDP_TUNNEL_CSUM);
396 	if (!tnl_gso_type)
397 		return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
398 					       vlan_hlen);
399 
400 	/* Tunnel support not negotiated but skb ask for it. */
401 	if (!tnl_hdr_negotiated)
402 		return -EINVAL;
403 
404 	/* Let the basic parsing deal with plain GSO features. */
405 	skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
406 	ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
407 	skb_shinfo(skb)->gso_type |= tnl_gso_type;
408 	if (ret)
409 		return ret;
410 
411 	if (skb->protocol == htons(ETH_P_IPV6))
412 		hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
413 	else
414 		hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
415 
416 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
417 		hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM;
418 
419 	inner_nh = skb->inner_network_header - skb_headroom(skb);
420 	outer_th = skb->transport_header - skb_headroom(skb);
421 	vhdr->inner_nh_offset = cpu_to_le16(inner_nh);
422 	vhdr->outer_th_offset = cpu_to_le16(outer_th);
423 	return 0;
424 }
425 
426 #endif /* _LINUX_VIRTIO_NET_H */
427