1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_NET_H
3 #define _LINUX_VIRTIO_NET_H
4
5 #include <linux/if_vlan.h>
6 #include <linux/ip.h>
7 #include <linux/ipv6.h>
8 #include <linux/udp.h>
9 #include <uapi/linux/tcp.h>
10 #include <uapi/linux/virtio_net.h>
11
virtio_net_hdr_match_proto(__be16 protocol,__u8 gso_type)12 static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
13 {
14 switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
15 case VIRTIO_NET_HDR_GSO_TCPV4:
16 return protocol == cpu_to_be16(ETH_P_IP);
17 case VIRTIO_NET_HDR_GSO_TCPV6:
18 return protocol == cpu_to_be16(ETH_P_IPV6);
19 case VIRTIO_NET_HDR_GSO_UDP:
20 case VIRTIO_NET_HDR_GSO_UDP_L4:
21 return protocol == cpu_to_be16(ETH_P_IP) ||
22 protocol == cpu_to_be16(ETH_P_IPV6);
23 default:
24 return false;
25 }
26 }
27
virtio_net_hdr_set_proto(struct sk_buff * skb,const struct virtio_net_hdr * hdr)28 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
29 const struct virtio_net_hdr *hdr)
30 {
31 if (skb->protocol)
32 return 0;
33
34 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
35 case VIRTIO_NET_HDR_GSO_TCPV4:
36 case VIRTIO_NET_HDR_GSO_UDP:
37 case VIRTIO_NET_HDR_GSO_UDP_L4:
38 skb->protocol = cpu_to_be16(ETH_P_IP);
39 break;
40 case VIRTIO_NET_HDR_GSO_TCPV6:
41 skb->protocol = cpu_to_be16(ETH_P_IPV6);
42 break;
43 default:
44 return -EINVAL;
45 }
46
47 return 0;
48 }
49
__virtio_net_hdr_to_skb(struct sk_buff * skb,const struct virtio_net_hdr * hdr,bool little_endian,u8 hdr_gso_type)50 static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb,
51 const struct virtio_net_hdr *hdr,
52 bool little_endian, u8 hdr_gso_type)
53 {
54 unsigned int nh_min_len = sizeof(struct iphdr);
55 unsigned int gso_type = 0;
56 unsigned int thlen = 0;
57 unsigned int p_off = 0;
58 unsigned int ip_proto;
59
60 if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
61 switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
62 case VIRTIO_NET_HDR_GSO_TCPV4:
63 gso_type = SKB_GSO_TCPV4;
64 ip_proto = IPPROTO_TCP;
65 thlen = sizeof(struct tcphdr);
66 break;
67 case VIRTIO_NET_HDR_GSO_TCPV6:
68 gso_type = SKB_GSO_TCPV6;
69 ip_proto = IPPROTO_TCP;
70 thlen = sizeof(struct tcphdr);
71 nh_min_len = sizeof(struct ipv6hdr);
72 break;
73 case VIRTIO_NET_HDR_GSO_UDP:
74 gso_type = SKB_GSO_UDP;
75 ip_proto = IPPROTO_UDP;
76 thlen = sizeof(struct udphdr);
77 break;
78 case VIRTIO_NET_HDR_GSO_UDP_L4:
79 gso_type = SKB_GSO_UDP_L4;
80 ip_proto = IPPROTO_UDP;
81 thlen = sizeof(struct udphdr);
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN)
88 gso_type |= SKB_GSO_TCP_ECN;
89
90 if (hdr->gso_size == 0)
91 return -EINVAL;
92 }
93
94 skb_reset_mac_header(skb);
95
96 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
97 u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
98 u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
99 u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
100
101 if (!pskb_may_pull(skb, needed))
102 return -EINVAL;
103
104 if (!skb_partial_csum_set(skb, start, off))
105 return -EINVAL;
106 if (skb_transport_offset(skb) < nh_min_len)
107 return -EINVAL;
108
109 nh_min_len = skb_transport_offset(skb);
110 p_off = nh_min_len + thlen;
111 if (!pskb_may_pull(skb, p_off))
112 return -EINVAL;
113 } else {
114 /* gso packets without NEEDS_CSUM do not set transport_offset.
115 * probe and drop if does not match one of the above types.
116 */
117 if (gso_type && skb->network_header) {
118 struct flow_keys_basic keys;
119
120 if (!skb->protocol) {
121 __be16 protocol = dev_parse_header_protocol(skb);
122
123 if (!protocol)
124 virtio_net_hdr_set_proto(skb, hdr);
125 else if (!virtio_net_hdr_match_proto(protocol,
126 hdr_gso_type))
127 return -EINVAL;
128 else
129 skb->protocol = protocol;
130 }
131 retry:
132 if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
133 NULL, 0, 0, 0,
134 0)) {
135 /* UFO does not specify ipv4 or 6: try both */
136 if (gso_type & SKB_GSO_UDP &&
137 skb->protocol == htons(ETH_P_IP)) {
138 skb->protocol = htons(ETH_P_IPV6);
139 goto retry;
140 }
141 return -EINVAL;
142 }
143
144 p_off = keys.control.thoff + thlen;
145 if (!pskb_may_pull(skb, p_off) ||
146 keys.basic.ip_proto != ip_proto)
147 return -EINVAL;
148
149 skb_set_transport_header(skb, keys.control.thoff);
150 } else if (gso_type) {
151 p_off = nh_min_len + thlen;
152 if (!pskb_may_pull(skb, p_off))
153 return -EINVAL;
154 }
155 }
156
157 if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
158 u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
159 unsigned int nh_off = p_off;
160 struct skb_shared_info *shinfo = skb_shinfo(skb);
161
162 switch (gso_type & ~SKB_GSO_TCP_ECN) {
163 case SKB_GSO_UDP:
164 /* UFO may not include transport header in gso_size. */
165 nh_off -= thlen;
166 break;
167 case SKB_GSO_UDP_L4:
168 if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
169 return -EINVAL;
170 if (skb->csum_offset != offsetof(struct udphdr, check))
171 return -EINVAL;
172 if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
173 return -EINVAL;
174 if (gso_type != SKB_GSO_UDP_L4)
175 return -EINVAL;
176 break;
177 case SKB_GSO_TCPV4:
178 case SKB_GSO_TCPV6:
179 if (skb->ip_summed == CHECKSUM_PARTIAL &&
180 skb->csum_offset != offsetof(struct tcphdr, check))
181 return -EINVAL;
182 break;
183 }
184
185 /* Kernel has a special handling for GSO_BY_FRAGS. */
186 if (gso_size == GSO_BY_FRAGS)
187 return -EINVAL;
188
189 /* Too small packets are not really GSO ones. */
190 if (skb->len - nh_off > gso_size) {
191 shinfo->gso_size = gso_size;
192 shinfo->gso_type = gso_type;
193
194 /* Header must be checked, and gso_segs computed. */
195 shinfo->gso_type |= SKB_GSO_DODGY;
196 shinfo->gso_segs = 0;
197 }
198 }
199
200 return 0;
201 }
202
virtio_net_hdr_to_skb(struct sk_buff * skb,const struct virtio_net_hdr * hdr,bool little_endian)203 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
204 const struct virtio_net_hdr *hdr,
205 bool little_endian)
206 {
207 return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
208 }
209
210 /* This function must be called after virtio_net_hdr_from_skb(). */
__virtio_net_set_hdrlen(const struct sk_buff * skb,struct virtio_net_hdr * hdr,bool little_endian)211 static inline void __virtio_net_set_hdrlen(const struct sk_buff *skb,
212 struct virtio_net_hdr *hdr,
213 bool little_endian)
214 {
215 u16 hdr_len;
216
217 hdr_len = skb_transport_offset(skb);
218
219 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP_L4)
220 hdr_len += sizeof(struct udphdr);
221 else
222 hdr_len += tcp_hdrlen(skb);
223
224 hdr->hdr_len = __cpu_to_virtio16(little_endian, hdr_len);
225 }
226
227 /* This function must be called after virtio_net_hdr_from_skb(). */
__virtio_net_set_tnl_hdrlen(const struct sk_buff * skb,struct virtio_net_hdr * hdr)228 static inline void __virtio_net_set_tnl_hdrlen(const struct sk_buff *skb,
229 struct virtio_net_hdr *hdr)
230 {
231 u16 hdr_len;
232
233 hdr_len = skb_inner_transport_offset(skb);
234
235 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP_L4)
236 hdr_len += sizeof(struct udphdr);
237 else
238 hdr_len += inner_tcp_hdrlen(skb);
239
240 hdr->hdr_len = __cpu_to_virtio16(true, hdr_len);
241 }
242
virtio_net_hdr_from_skb(const struct sk_buff * skb,struct virtio_net_hdr * hdr,bool little_endian,bool has_data_valid,int vlan_hlen)243 static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
244 struct virtio_net_hdr *hdr,
245 bool little_endian,
246 bool has_data_valid,
247 int vlan_hlen)
248 {
249 memset(hdr, 0, sizeof(*hdr)); /* no info leak */
250
251 if (skb_is_gso(skb)) {
252 struct skb_shared_info *sinfo = skb_shinfo(skb);
253
254 /* This is a hint as to how much should be linear. */
255 hdr->hdr_len = __cpu_to_virtio16(little_endian,
256 skb_headlen(skb));
257 hdr->gso_size = __cpu_to_virtio16(little_endian,
258 sinfo->gso_size);
259 if (sinfo->gso_type & SKB_GSO_TCPV4)
260 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
261 else if (sinfo->gso_type & SKB_GSO_TCPV6)
262 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
263 else if (sinfo->gso_type & SKB_GSO_UDP_L4)
264 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP_L4;
265 else
266 return -EINVAL;
267 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
268 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
269 } else
270 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
271
272 if (skb->ip_summed == CHECKSUM_PARTIAL) {
273 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
274 hdr->csum_start = __cpu_to_virtio16(little_endian,
275 skb_checksum_start_offset(skb) + vlan_hlen);
276 hdr->csum_offset = __cpu_to_virtio16(little_endian,
277 skb->csum_offset);
278 } else if (has_data_valid &&
279 skb->ip_summed == CHECKSUM_UNNECESSARY) {
280 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
281 } /* else everything is zero */
282
283 return 0;
284 }
285
virtio_l3min(bool is_ipv6)286 static inline unsigned int virtio_l3min(bool is_ipv6)
287 {
288 return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
289 }
290
291 static inline int
virtio_net_hdr_tnl_to_skb(struct sk_buff * skb,const struct virtio_net_hdr_v1_hash_tunnel * vhdr,bool tnl_hdr_negotiated,bool tnl_csum_negotiated,bool little_endian)292 virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
293 const struct virtio_net_hdr_v1_hash_tunnel *vhdr,
294 bool tnl_hdr_negotiated,
295 bool tnl_csum_negotiated,
296 bool little_endian)
297 {
298 const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr;
299 unsigned int inner_nh, outer_th, inner_th;
300 unsigned int inner_l3min, outer_l3min;
301 u8 gso_inner_type, gso_tunnel_type;
302 bool outer_isv6, inner_isv6;
303 int ret;
304
305 gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
306 if (!gso_tunnel_type)
307 return virtio_net_hdr_to_skb(skb, hdr, little_endian);
308
309 /* Tunnel not supported/negotiated, but the hdr asks for it. */
310 if (!tnl_hdr_negotiated)
311 return -EINVAL;
312
313 /* Either ipv4 or ipv6. */
314 if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL)
315 return -EINVAL;
316
317 /* The UDP tunnel must carry a GSO packet, but no UFO. */
318 gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN |
319 VIRTIO_NET_HDR_GSO_UDP_TUNNEL);
320 if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP)
321 return -EINVAL;
322
323 /* Rely on csum being present. */
324 if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
325 return -EINVAL;
326
327 /* Validate offsets. */
328 outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
329 inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6;
330 inner_l3min = virtio_l3min(inner_isv6);
331 outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6);
332
333 inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
334 inner_nh = le16_to_cpu(vhdr->inner_nh_offset);
335 outer_th = le16_to_cpu(vhdr->outer_th_offset);
336 if (outer_th < outer_l3min ||
337 inner_nh < outer_th + sizeof(struct udphdr) ||
338 inner_th < inner_nh + inner_l3min)
339 return -EINVAL;
340
341 /* Let the basic parsing deal with plain GSO features. */
342 ret = __virtio_net_hdr_to_skb(skb, hdr, true,
343 hdr->gso_type & ~gso_tunnel_type);
344 if (ret)
345 return ret;
346
347 /* In case of USO, the inner protocol is still unknown and
348 * `inner_isv6` is just a guess, additional parsing is needed.
349 * The previous validation ensures that accessing an ipv4 inner
350 * network header is safe.
351 */
352 if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) {
353 struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh);
354
355 inner_isv6 = iphdr->version == 6;
356 inner_l3min = virtio_l3min(inner_isv6);
357 if (inner_th < inner_nh + inner_l3min)
358 return -EINVAL;
359 }
360
361 skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) :
362 htons(ETH_P_IP));
363 if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) {
364 if (!tnl_csum_negotiated)
365 return -EINVAL;
366
367 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
368 } else {
369 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
370 }
371
372 skb->inner_transport_header = inner_th + skb_headroom(skb);
373 skb->inner_network_header = inner_nh + skb_headroom(skb);
374 skb->inner_mac_header = inner_nh + skb_headroom(skb);
375 skb->transport_header = outer_th + skb_headroom(skb);
376 skb->encapsulation = 1;
377 return 0;
378 }
379
380 /* Checksum-related fields validation for the driver */
virtio_net_handle_csum_offload(struct sk_buff * skb,struct virtio_net_hdr * hdr,bool tnl_csum_negotiated)381 static inline int virtio_net_handle_csum_offload(struct sk_buff *skb,
382 struct virtio_net_hdr *hdr,
383 bool tnl_csum_negotiated)
384 {
385 if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) {
386 if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID))
387 return 0;
388
389 skb->ip_summed = CHECKSUM_UNNECESSARY;
390 if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM))
391 return 0;
392
393 /* tunnel csum packets are invalid when the related
394 * feature has not been negotiated
395 */
396 if (!tnl_csum_negotiated)
397 return -EINVAL;
398 skb->csum_level = 1;
399 return 0;
400 }
401
402 /* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO
403 * over UDP tunnel requires the latter
404 */
405 if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)
406 return -EINVAL;
407 return 0;
408 }
409
410 /*
411 * vlan_hlen always refers to the outermost MAC header. That also
412 * means it refers to the only MAC header, if the packet does not carry
413 * any encapsulation.
414 */
415 static inline int
virtio_net_hdr_tnl_from_skb(const struct sk_buff * skb,struct virtio_net_hdr_v1_hash_tunnel * vhdr,bool tnl_hdr_negotiated,bool little_endian,int vlan_hlen,bool has_data_valid,bool feature_hdrlen)416 virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
417 struct virtio_net_hdr_v1_hash_tunnel *vhdr,
418 bool tnl_hdr_negotiated,
419 bool little_endian,
420 int vlan_hlen,
421 bool has_data_valid,
422 bool feature_hdrlen)
423 {
424 struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
425 unsigned int inner_nh, outer_th;
426 int tnl_gso_type;
427 int ret;
428
429 tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
430 SKB_GSO_UDP_TUNNEL_CSUM);
431 if (!tnl_gso_type) {
432 ret = virtio_net_hdr_from_skb(skb, hdr, little_endian,
433 has_data_valid, vlan_hlen);
434 if (ret)
435 return ret;
436
437 if (feature_hdrlen && hdr->hdr_len)
438 __virtio_net_set_hdrlen(skb, hdr, little_endian);
439
440 return ret;
441 }
442
443 /* Tunnel support not negotiated but skb ask for it. */
444 if (!tnl_hdr_negotiated)
445 return -EINVAL;
446
447 vhdr->hash_hdr.hash_value_lo = 0;
448 vhdr->hash_hdr.hash_value_hi = 0;
449 vhdr->hash_hdr.hash_report = 0;
450 vhdr->hash_hdr.padding = 0;
451
452 /* Let the basic parsing deal with plain GSO features. */
453 skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
454 ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
455 skb_shinfo(skb)->gso_type |= tnl_gso_type;
456 if (ret)
457 return ret;
458
459 if (feature_hdrlen && hdr->hdr_len)
460 __virtio_net_set_tnl_hdrlen(skb, hdr);
461
462 if (skb->protocol == htons(ETH_P_IPV6))
463 hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
464 else
465 hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
466
467 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
468 hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM;
469
470 inner_nh = skb->inner_network_header - skb_headroom(skb);
471 outer_th = skb->transport_header - skb_headroom(skb);
472 vhdr->inner_nh_offset = cpu_to_le16(inner_nh);
473 vhdr->outer_th_offset = cpu_to_le16(outer_th);
474 return 0;
475 }
476
477 #endif /* _LINUX_VIRTIO_NET_H */
478