1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * UDPv4 GSO support 7 */ 8 9 #include <linux/skbuff.h> 10 #include <net/gro.h> 11 #include <net/gso.h> 12 #include <net/udp.h> 13 #include <net/protocol.h> 14 #include <net/inet_common.h> 15 #include <net/udp_tunnel.h> 16 17 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) 18 19 /* 20 * Dummy GRO tunnel callback, exists mainly to avoid dangling/NULL 21 * values for the udp tunnel static call. 22 */ 23 static struct sk_buff *dummy_gro_rcv(struct sock *sk, 24 struct list_head *head, 25 struct sk_buff *skb) 26 { 27 NAPI_GRO_CB(skb)->flush = 1; 28 return NULL; 29 } 30 31 typedef struct sk_buff *(*udp_tunnel_gro_rcv_t)(struct sock *sk, 32 struct list_head *head, 33 struct sk_buff *skb); 34 35 struct udp_tunnel_type_entry { 36 udp_tunnel_gro_rcv_t gro_receive; 37 refcount_t count; 38 }; 39 40 #define UDP_MAX_TUNNEL_TYPES (IS_ENABLED(CONFIG_GENEVE) + \ 41 IS_ENABLED(CONFIG_VXLAN) * 2 + \ 42 IS_ENABLED(CONFIG_NET_FOU) * 2 + \ 43 IS_ENABLED(CONFIG_XFRM) * 2) 44 45 DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv); 46 static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call); 47 static struct mutex udp_tunnel_gro_type_lock; 48 static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES]; 49 static unsigned int udp_tunnel_gro_type_nr; 50 static DEFINE_SPINLOCK(udp_tunnel_gro_lock); 51 52 void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add) 53 { 54 bool is_ipv6 = sk->sk_family == AF_INET6; 55 struct udp_sock *tup, *up = udp_sk(sk); 56 struct udp_tunnel_gro *udp_tunnel_gro; 57 58 spin_lock(&udp_tunnel_gro_lock); 59 udp_tunnel_gro = &net->ipv4.udp_tunnel_gro[is_ipv6]; 60 if (add) 61 hlist_add_head(&up->tunnel_list, &udp_tunnel_gro->list); 62 else if (up->tunnel_list.pprev) 63 hlist_del_init(&up->tunnel_list); 64 65 if (udp_tunnel_gro->list.first && 66 !udp_tunnel_gro->list.first->next) { 67 tup = hlist_entry(udp_tunnel_gro->list.first, struct udp_sock, 68 tunnel_list); 69 70 rcu_assign_pointer(udp_tunnel_gro->sk, (struct sock *)tup); 71 } else { 72 RCU_INIT_POINTER(udp_tunnel_gro->sk, NULL); 73 } 74 75 spin_unlock(&udp_tunnel_gro_lock); 76 } 77 EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_lookup); 78 79 void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) 80 { 81 struct udp_tunnel_type_entry *cur = NULL; 82 struct udp_sock *up = udp_sk(sk); 83 int i, old_gro_type_nr; 84 85 if (!UDP_MAX_TUNNEL_TYPES || !up->gro_receive) 86 return; 87 88 mutex_lock(&udp_tunnel_gro_type_lock); 89 90 /* Check if the static call is permanently disabled. */ 91 if (udp_tunnel_gro_type_nr > UDP_MAX_TUNNEL_TYPES) 92 goto out; 93 94 for (i = 0; i < udp_tunnel_gro_type_nr; i++) 95 if (udp_tunnel_gro_types[i].gro_receive == up->gro_receive) 96 cur = &udp_tunnel_gro_types[i]; 97 98 old_gro_type_nr = udp_tunnel_gro_type_nr; 99 if (add) { 100 /* 101 * Update the matching entry, if found, or add a new one 102 * if needed 103 */ 104 if (cur) { 105 refcount_inc(&cur->count); 106 goto out; 107 } 108 109 if (unlikely(udp_tunnel_gro_type_nr == UDP_MAX_TUNNEL_TYPES)) { 110 pr_err_once("Too many UDP tunnel types, please increase UDP_MAX_TUNNEL_TYPES\n"); 111 /* Ensure static call will never be enabled */ 112 udp_tunnel_gro_type_nr = UDP_MAX_TUNNEL_TYPES + 1; 113 } else { 114 cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++]; 115 refcount_set(&cur->count, 1); 116 cur->gro_receive = up->gro_receive; 117 } 118 } else { 119 /* 120 * The stack cleanups only successfully added tunnel, the 121 * lookup on removal should never fail. 122 */ 123 if (WARN_ON_ONCE(!cur)) 124 goto out; 125 126 if (!refcount_dec_and_test(&cur->count)) 127 goto out; 128 129 /* Avoid gaps, so that the enable tunnel has always id 0 */ 130 *cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr]; 131 } 132 133 if (udp_tunnel_gro_type_nr == 1) { 134 static_call_update(udp_tunnel_gro_rcv, 135 udp_tunnel_gro_types[0].gro_receive); 136 static_branch_enable(&udp_tunnel_static_call); 137 } else if (old_gro_type_nr == 1) { 138 static_branch_disable(&udp_tunnel_static_call); 139 static_call_update(udp_tunnel_gro_rcv, dummy_gro_rcv); 140 } 141 142 out: 143 mutex_unlock(&udp_tunnel_gro_type_lock); 144 } 145 EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv); 146 147 static void udp_tunnel_gro_init(void) 148 { 149 mutex_init(&udp_tunnel_gro_type_lock); 150 } 151 152 static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk, 153 struct list_head *head, 154 struct sk_buff *skb) 155 { 156 if (static_branch_likely(&udp_tunnel_static_call)) { 157 if (unlikely(gro_recursion_inc_test(skb))) { 158 NAPI_GRO_CB(skb)->flush |= 1; 159 return NULL; 160 } 161 return static_call(udp_tunnel_gro_rcv)(sk, head, skb); 162 } 163 return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); 164 } 165 166 #else 167 168 static void udp_tunnel_gro_init(void) {} 169 170 static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk, 171 struct list_head *head, 172 struct sk_buff *skb) 173 { 174 return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); 175 } 176 177 #endif 178 179 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, 180 netdev_features_t features, 181 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 182 netdev_features_t features), 183 __be16 new_protocol, bool is_ipv6) 184 { 185 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 186 bool remcsum, need_csum, offload_csum, gso_partial; 187 struct sk_buff *segs = ERR_PTR(-EINVAL); 188 struct udphdr *uh = udp_hdr(skb); 189 u16 mac_offset = skb->mac_header; 190 __be16 protocol = skb->protocol; 191 u16 mac_len = skb->mac_len; 192 int udp_offset, outer_hlen; 193 __wsum partial; 194 bool need_ipsec; 195 196 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 197 goto out; 198 199 /* Adjust partial header checksum to negate old length. 200 * We cannot rely on the value contained in uh->len as it is 201 * possible that the actual value exceeds the boundaries of the 202 * 16 bit length field due to the header being added outside of an 203 * IP or IPv6 frame that was already limited to 64K - 1. 204 */ 205 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) 206 partial = (__force __wsum)uh->len; 207 else 208 partial = (__force __wsum)htonl(skb->len); 209 partial = csum_sub(csum_unfold(uh->check), partial); 210 211 /* setup inner skb. */ 212 skb->encapsulation = 0; 213 SKB_GSO_CB(skb)->encap_level = 0; 214 __skb_pull(skb, tnl_hlen); 215 skb_reset_mac_header(skb); 216 skb_set_network_header(skb, skb_inner_network_offset(skb)); 217 skb_set_transport_header(skb, skb_inner_transport_offset(skb)); 218 skb->mac_len = skb_inner_network_offset(skb); 219 skb->protocol = new_protocol; 220 221 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 222 skb->encap_hdr_csum = need_csum; 223 224 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); 225 skb->remcsum_offload = remcsum; 226 227 need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); 228 /* Try to offload checksum if possible */ 229 offload_csum = !!(need_csum && 230 !need_ipsec && 231 (skb->dev->features & 232 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 233 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); 234 235 features &= skb->dev->hw_enc_features; 236 if (need_csum) 237 features &= ~NETIF_F_SCTP_CRC; 238 239 /* The only checksum offload we care about from here on out is the 240 * outer one so strip the existing checksum feature flags and 241 * instead set the flag based on our outer checksum offload value. 242 */ 243 if (remcsum) { 244 features &= ~NETIF_F_CSUM_MASK; 245 if (!need_csum || offload_csum) 246 features |= NETIF_F_HW_CSUM; 247 } 248 249 /* segment inner packet. */ 250 segs = gso_inner_segment(skb, features); 251 if (IS_ERR_OR_NULL(segs)) { 252 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 253 mac_len); 254 goto out; 255 } 256 257 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 258 259 outer_hlen = skb_tnl_header_len(skb); 260 udp_offset = outer_hlen - tnl_hlen; 261 skb = segs; 262 do { 263 unsigned int len; 264 265 if (remcsum) 266 skb->ip_summed = CHECKSUM_NONE; 267 268 /* Set up inner headers if we are offloading inner checksum */ 269 if (skb->ip_summed == CHECKSUM_PARTIAL) { 270 skb_reset_inner_headers(skb); 271 skb->encapsulation = 1; 272 } 273 274 skb->mac_len = mac_len; 275 skb->protocol = protocol; 276 277 __skb_push(skb, outer_hlen); 278 skb_reset_mac_header(skb); 279 skb_set_network_header(skb, mac_len); 280 skb_set_transport_header(skb, udp_offset); 281 len = skb->len - udp_offset; 282 uh = udp_hdr(skb); 283 284 /* If we are only performing partial GSO the inner header 285 * will be using a length value equal to only one MSS sized 286 * segment instead of the entire frame. 287 */ 288 if (gso_partial && skb_is_gso(skb)) { 289 uh->len = htons(skb_shinfo(skb)->gso_size + 290 SKB_GSO_CB(skb)->data_offset + 291 skb->head - (unsigned char *)uh); 292 } else { 293 uh->len = htons(len); 294 } 295 296 if (!need_csum) 297 continue; 298 299 uh->check = ~csum_fold(csum_add(partial, 300 (__force __wsum)htonl(len))); 301 302 if (skb->encapsulation || !offload_csum) { 303 uh->check = gso_make_checksum(skb, ~uh->check); 304 if (uh->check == 0) 305 uh->check = CSUM_MANGLED_0; 306 } else { 307 skb->ip_summed = CHECKSUM_PARTIAL; 308 skb->csum_start = skb_transport_header(skb) - skb->head; 309 skb->csum_offset = offsetof(struct udphdr, check); 310 } 311 } while ((skb = skb->next)); 312 out: 313 return segs; 314 } 315 316 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 317 netdev_features_t features, 318 bool is_ipv6) 319 { 320 const struct net_offload __rcu **offloads; 321 __be16 protocol = skb->protocol; 322 const struct net_offload *ops; 323 struct sk_buff *segs = ERR_PTR(-EINVAL); 324 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 325 netdev_features_t features); 326 327 rcu_read_lock(); 328 329 switch (skb->inner_protocol_type) { 330 case ENCAP_TYPE_ETHER: 331 protocol = skb->inner_protocol; 332 gso_inner_segment = skb_mac_gso_segment; 333 break; 334 case ENCAP_TYPE_IPPROTO: 335 offloads = is_ipv6 ? inet6_offloads : inet_offloads; 336 ops = rcu_dereference(offloads[skb->inner_ipproto]); 337 if (!ops || !ops->callbacks.gso_segment) 338 goto out_unlock; 339 gso_inner_segment = ops->callbacks.gso_segment; 340 break; 341 default: 342 goto out_unlock; 343 } 344 345 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, 346 protocol, is_ipv6); 347 348 out_unlock: 349 rcu_read_unlock(); 350 351 return segs; 352 } 353 EXPORT_SYMBOL(skb_udp_tunnel_segment); 354 355 static void __udpv4_gso_segment_csum(struct sk_buff *seg, 356 __be32 *oldip, __be32 *newip, 357 __be16 *oldport, __be16 *newport) 358 { 359 struct udphdr *uh; 360 struct iphdr *iph; 361 362 if (*oldip == *newip && *oldport == *newport) 363 return; 364 365 uh = udp_hdr(seg); 366 iph = ip_hdr(seg); 367 368 if (uh->check) { 369 inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip, 370 true); 371 inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport, 372 false); 373 if (!uh->check) 374 uh->check = CSUM_MANGLED_0; 375 } 376 *oldport = *newport; 377 378 csum_replace4(&iph->check, *oldip, *newip); 379 *oldip = *newip; 380 } 381 382 static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) 383 { 384 struct sk_buff *seg; 385 struct udphdr *uh, *uh2; 386 struct iphdr *iph, *iph2; 387 388 seg = segs; 389 uh = udp_hdr(seg); 390 iph = ip_hdr(seg); 391 392 if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) && 393 (udp_hdr(seg)->source == udp_hdr(seg->next)->source) && 394 (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) && 395 (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr)) 396 return segs; 397 398 while ((seg = seg->next)) { 399 uh2 = udp_hdr(seg); 400 iph2 = ip_hdr(seg); 401 402 __udpv4_gso_segment_csum(seg, 403 &iph2->saddr, &iph->saddr, 404 &uh2->source, &uh->source); 405 __udpv4_gso_segment_csum(seg, 406 &iph2->daddr, &iph->daddr, 407 &uh2->dest, &uh->dest); 408 } 409 410 return segs; 411 } 412 413 static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, 414 netdev_features_t features, 415 bool is_ipv6) 416 { 417 unsigned int mss = skb_shinfo(skb)->gso_size; 418 419 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); 420 if (IS_ERR(skb)) 421 return skb; 422 423 udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); 424 425 return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); 426 } 427 428 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 429 netdev_features_t features, bool is_ipv6) 430 { 431 struct sock *sk = gso_skb->sk; 432 unsigned int sum_truesize = 0; 433 struct sk_buff *segs, *seg; 434 struct udphdr *uh; 435 unsigned int mss; 436 bool copy_dtor; 437 __sum16 check; 438 __be16 newlen; 439 440 mss = skb_shinfo(gso_skb)->gso_size; 441 if (gso_skb->len <= sizeof(*uh) + mss) 442 return ERR_PTR(-EINVAL); 443 444 if (unlikely(skb_checksum_start(gso_skb) != 445 skb_transport_header(gso_skb) && 446 !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST))) 447 return ERR_PTR(-EINVAL); 448 449 /* We don't know if egress device can segment and checksum the packet 450 * when IPv6 extension headers are present. Fall back to software GSO. 451 */ 452 if (gso_skb->ip_summed != CHECKSUM_PARTIAL) 453 features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK); 454 455 if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) { 456 /* Packet is from an untrusted source, reset gso_segs. */ 457 skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh), 458 mss); 459 return NULL; 460 } 461 462 if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) { 463 /* Detect modified geometry and pass those to skb_segment. */ 464 if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) 465 return __udp_gso_segment_list(gso_skb, features, is_ipv6); 466 467 /* Setup csum, as fraglist skips this in udp4_gro_receive. */ 468 gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head; 469 gso_skb->csum_offset = offsetof(struct udphdr, check); 470 gso_skb->ip_summed = CHECKSUM_PARTIAL; 471 472 uh = udp_hdr(gso_skb); 473 if (is_ipv6) 474 uh->check = ~udp_v6_check(gso_skb->len, 475 &ipv6_hdr(gso_skb)->saddr, 476 &ipv6_hdr(gso_skb)->daddr, 0); 477 else 478 uh->check = ~udp_v4_check(gso_skb->len, 479 ip_hdr(gso_skb)->saddr, 480 ip_hdr(gso_skb)->daddr, 0); 481 } 482 483 skb_pull(gso_skb, sizeof(*uh)); 484 485 /* clear destructor to avoid skb_segment assigning it to tail */ 486 copy_dtor = gso_skb->destructor == sock_wfree; 487 if (copy_dtor) { 488 gso_skb->destructor = NULL; 489 gso_skb->sk = NULL; 490 } 491 492 segs = skb_segment(gso_skb, features); 493 if (IS_ERR_OR_NULL(segs)) { 494 if (copy_dtor) { 495 gso_skb->destructor = sock_wfree; 496 gso_skb->sk = sk; 497 } 498 return segs; 499 } 500 501 /* GSO partial and frag_list segmentation only requires splitting 502 * the frame into an MSS multiple and possibly a remainder, both 503 * cases return a GSO skb. So update the mss now. 504 */ 505 if (skb_is_gso(segs)) 506 mss *= skb_shinfo(segs)->gso_segs; 507 508 seg = segs; 509 uh = udp_hdr(seg); 510 511 /* preserve TX timestamp flags and TS key for first segment */ 512 skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey; 513 skb_shinfo(seg)->tx_flags |= 514 (skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP); 515 516 /* compute checksum adjustment based on old length versus new */ 517 newlen = htons(sizeof(*uh) + mss); 518 check = csum16_add(csum16_sub(uh->check, uh->len), newlen); 519 520 for (;;) { 521 if (copy_dtor) { 522 seg->destructor = sock_wfree; 523 seg->sk = sk; 524 sum_truesize += seg->truesize; 525 } 526 527 if (!seg->next) 528 break; 529 530 uh->len = newlen; 531 uh->check = check; 532 533 if (seg->ip_summed == CHECKSUM_PARTIAL) 534 gso_reset_checksum(seg, ~check); 535 else 536 uh->check = gso_make_checksum(seg, ~check) ? : 537 CSUM_MANGLED_0; 538 539 seg = seg->next; 540 uh = udp_hdr(seg); 541 } 542 543 /* last packet can be partial gso_size, account for that in checksum */ 544 newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) + 545 seg->data_len); 546 check = csum16_add(csum16_sub(uh->check, uh->len), newlen); 547 548 uh->len = newlen; 549 uh->check = check; 550 551 if (seg->ip_summed == CHECKSUM_PARTIAL) 552 gso_reset_checksum(seg, ~check); 553 else 554 uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; 555 556 /* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same 557 * meaning. However, check for bad offloads in the GSO stack expects the 558 * latter, if the checksum was calculated in software. To vouch for the 559 * segment skbs we actually need to set it on the gso_skb. 560 */ 561 if (gso_skb->ip_summed == CHECKSUM_NONE) 562 gso_skb->ip_summed = CHECKSUM_UNNECESSARY; 563 564 /* update refcount for the packet */ 565 if (copy_dtor) { 566 int delta = sum_truesize - gso_skb->truesize; 567 568 /* In some pathological cases, delta can be negative. 569 * We need to either use refcount_add() or refcount_sub_and_test() 570 */ 571 if (likely(delta >= 0)) 572 refcount_add(delta, &sk->sk_wmem_alloc); 573 else 574 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); 575 } 576 return segs; 577 } 578 EXPORT_SYMBOL_GPL(__udp_gso_segment); 579 580 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 581 netdev_features_t features) 582 { 583 struct sk_buff *segs = ERR_PTR(-EINVAL); 584 unsigned int mss; 585 __wsum csum; 586 struct udphdr *uh; 587 struct iphdr *iph; 588 589 if (skb->encapsulation && 590 (skb_shinfo(skb)->gso_type & 591 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { 592 segs = skb_udp_tunnel_segment(skb, features, false); 593 goto out; 594 } 595 596 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) 597 goto out; 598 599 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 600 goto out; 601 602 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 603 return __udp_gso_segment(skb, features, false); 604 605 mss = skb_shinfo(skb)->gso_size; 606 if (unlikely(skb->len <= mss)) 607 goto out; 608 609 /* Do software UFO. Complete and fill in the UDP checksum as 610 * HW cannot do checksum of UDP packets sent as multiple 611 * IP fragments. 612 */ 613 614 uh = udp_hdr(skb); 615 iph = ip_hdr(skb); 616 617 uh->check = 0; 618 csum = skb_checksum(skb, 0, skb->len, 0); 619 uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); 620 if (uh->check == 0) 621 uh->check = CSUM_MANGLED_0; 622 623 skb->ip_summed = CHECKSUM_UNNECESSARY; 624 625 /* If there is no outer header we can fake a checksum offload 626 * due to the fact that we have already done the checksum in 627 * software prior to segmenting the frame. 628 */ 629 if (!skb->encap_hdr_csum) 630 features |= NETIF_F_HW_CSUM; 631 632 /* Fragment the skb. IP headers of the fragments are updated in 633 * inet_gso_segment() 634 */ 635 segs = skb_segment(skb, features); 636 out: 637 return segs; 638 } 639 640 641 #define UDP_GRO_CNT_MAX 64 642 static struct sk_buff *udp_gro_receive_segment(struct list_head *head, 643 struct sk_buff *skb) 644 { 645 struct udphdr *uh = udp_gro_udphdr(skb); 646 struct sk_buff *pp = NULL; 647 struct udphdr *uh2; 648 struct sk_buff *p; 649 unsigned int ulen; 650 int ret = 0; 651 int flush; 652 653 /* requires non zero csum, for symmetry with GSO */ 654 if (!uh->check) { 655 NAPI_GRO_CB(skb)->flush = 1; 656 return NULL; 657 } 658 659 /* Do not deal with padded or malicious packets, sorry ! */ 660 ulen = ntohs(uh->len); 661 if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) { 662 NAPI_GRO_CB(skb)->flush = 1; 663 return NULL; 664 } 665 /* pull encapsulating udp header */ 666 skb_gro_pull(skb, sizeof(struct udphdr)); 667 668 list_for_each_entry(p, head, list) { 669 if (!NAPI_GRO_CB(p)->same_flow) 670 continue; 671 672 uh2 = udp_hdr(p); 673 674 /* Match ports only, as csum is always non zero */ 675 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { 676 NAPI_GRO_CB(p)->same_flow = 0; 677 continue; 678 } 679 680 if (NAPI_GRO_CB(skb)->is_flist != NAPI_GRO_CB(p)->is_flist) { 681 NAPI_GRO_CB(skb)->flush = 1; 682 return p; 683 } 684 685 flush = gro_receive_network_flush(uh, uh2, p); 686 687 /* Terminate the flow on len mismatch or if it grow "too much". 688 * Under small packet flood GRO count could elsewhere grow a lot 689 * leading to excessive truesize values. 690 * On len mismatch merge the first packet shorter than gso_size, 691 * otherwise complete the GRO packet. 692 */ 693 if (ulen > ntohs(uh2->len) || flush) { 694 pp = p; 695 } else { 696 if (NAPI_GRO_CB(skb)->is_flist) { 697 if (!pskb_may_pull(skb, skb_gro_offset(skb))) { 698 NAPI_GRO_CB(skb)->flush = 1; 699 return NULL; 700 } 701 if ((skb->ip_summed != p->ip_summed) || 702 (skb->csum_level != p->csum_level)) { 703 NAPI_GRO_CB(skb)->flush = 1; 704 return NULL; 705 } 706 ret = skb_gro_receive_list(p, skb); 707 } else { 708 skb_gro_postpull_rcsum(skb, uh, 709 sizeof(struct udphdr)); 710 711 ret = skb_gro_receive(p, skb); 712 } 713 } 714 715 if (ret || ulen != ntohs(uh2->len) || 716 NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX) 717 pp = p; 718 719 return pp; 720 } 721 722 /* mismatch, but we never need to flush */ 723 return NULL; 724 } 725 726 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, 727 struct udphdr *uh, struct sock *sk) 728 { 729 struct sk_buff *pp = NULL; 730 struct sk_buff *p; 731 struct udphdr *uh2; 732 unsigned int off = skb_gro_offset(skb); 733 int flush = 1; 734 735 /* We can do L4 aggregation only if the packet can't land in a tunnel 736 * otherwise we could corrupt the inner stream. Detecting such packets 737 * cannot be foolproof and the aggregation might still happen in some 738 * cases. Such packets should be caught in udp_unexpected_gso later. 739 */ 740 NAPI_GRO_CB(skb)->is_flist = 0; 741 if (!sk || !udp_sk(sk)->gro_receive) { 742 /* If the packet was locally encapsulated in a UDP tunnel that 743 * wasn't detected above, do not GRO. 744 */ 745 if (skb->encapsulation) 746 goto out; 747 748 if (skb->dev->features & NETIF_F_GRO_FRAGLIST) 749 NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1; 750 751 if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) || 752 (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist) 753 return call_gro_receive(udp_gro_receive_segment, head, skb); 754 755 /* no GRO, be sure flush the current packet */ 756 goto out; 757 } 758 759 if (NAPI_GRO_CB(skb)->encap_mark || 760 (uh->check && skb->ip_summed != CHECKSUM_PARTIAL && 761 NAPI_GRO_CB(skb)->csum_cnt == 0 && 762 !NAPI_GRO_CB(skb)->csum_valid)) 763 goto out; 764 765 /* mark that this skb passed once through the tunnel gro layer */ 766 NAPI_GRO_CB(skb)->encap_mark = 1; 767 768 flush = 0; 769 770 list_for_each_entry(p, head, list) { 771 if (!NAPI_GRO_CB(p)->same_flow) 772 continue; 773 774 uh2 = (struct udphdr *)(p->data + off); 775 776 /* Match ports and either checksums are either both zero 777 * or nonzero. 778 */ 779 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || 780 (!uh->check ^ !uh2->check)) { 781 NAPI_GRO_CB(p)->same_flow = 0; 782 continue; 783 } 784 } 785 786 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 787 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 788 pp = udp_tunnel_gro_rcv(sk, head, skb); 789 790 out: 791 skb_gro_flush_final(skb, pp, flush); 792 return pp; 793 } 794 EXPORT_SYMBOL(udp_gro_receive); 795 796 static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport, 797 __be16 dport) 798 { 799 const struct iphdr *iph = skb_gro_network_header(skb); 800 struct net *net = dev_net_rcu(skb->dev); 801 struct sock *sk; 802 int iif, sdif; 803 804 sk = udp_tunnel_sk(net, false); 805 if (sk && dport == htons(sk->sk_num)) 806 return sk; 807 808 inet_get_iif_sdif(skb, &iif, &sdif); 809 810 return __udp4_lib_lookup(net, iph->saddr, sport, 811 iph->daddr, dport, iif, 812 sdif, net->ipv4.udp_table, NULL); 813 } 814 815 INDIRECT_CALLABLE_SCOPE 816 struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) 817 { 818 struct udphdr *uh = udp_gro_udphdr(skb); 819 struct sock *sk = NULL; 820 struct sk_buff *pp; 821 822 if (unlikely(!uh)) 823 goto flush; 824 825 /* Don't bother verifying checksum if we're going to flush anyway. */ 826 if (NAPI_GRO_CB(skb)->flush) 827 goto skip; 828 829 if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, 830 inet_gro_compute_pseudo)) 831 goto flush; 832 else if (uh->check) 833 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, 834 inet_gro_compute_pseudo); 835 skip: 836 NAPI_GRO_CB(skb)->is_ipv6 = 0; 837 838 if (static_branch_unlikely(&udp_encap_needed_key)) 839 sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest); 840 841 pp = udp_gro_receive(head, skb, uh, sk); 842 return pp; 843 844 flush: 845 NAPI_GRO_CB(skb)->flush = 1; 846 return NULL; 847 } 848 849 static int udp_gro_complete_segment(struct sk_buff *skb) 850 { 851 struct udphdr *uh = udp_hdr(skb); 852 853 skb->csum_start = (unsigned char *)uh - skb->head; 854 skb->csum_offset = offsetof(struct udphdr, check); 855 skb->ip_summed = CHECKSUM_PARTIAL; 856 857 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 858 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 859 860 if (skb->encapsulation) 861 skb->inner_transport_header = skb->transport_header; 862 863 return 0; 864 } 865 866 int udp_gro_complete(struct sk_buff *skb, int nhoff, 867 udp_lookup_t lookup) 868 { 869 __be16 newlen = htons(skb->len - nhoff); 870 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 871 struct sock *sk; 872 int err; 873 874 uh->len = newlen; 875 876 sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, 877 udp4_lib_lookup_skb, skb, uh->source, uh->dest); 878 if (sk && udp_sk(sk)->gro_complete) { 879 skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM 880 : SKB_GSO_UDP_TUNNEL; 881 882 /* clear the encap mark, so that inner frag_list gro_complete 883 * can take place 884 */ 885 NAPI_GRO_CB(skb)->encap_mark = 0; 886 887 /* Set encapsulation before calling into inner gro_complete() 888 * functions to make them set up the inner offsets. 889 */ 890 skb->encapsulation = 1; 891 err = udp_sk(sk)->gro_complete(sk, skb, 892 nhoff + sizeof(struct udphdr)); 893 } else { 894 err = udp_gro_complete_segment(skb); 895 } 896 897 if (skb->remcsum_offload) 898 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; 899 900 return err; 901 } 902 EXPORT_SYMBOL(udp_gro_complete); 903 904 INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) 905 { 906 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; 907 const struct iphdr *iph = (struct iphdr *)(skb->data + offset); 908 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 909 910 /* do fraglist only if there is no outer UDP encap (or we already processed it) */ 911 if (NAPI_GRO_CB(skb)->is_flist && !NAPI_GRO_CB(skb)->encap_mark) { 912 uh->len = htons(skb->len - nhoff); 913 914 skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); 915 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 916 917 __skb_incr_checksum_unnecessary(skb); 918 919 return 0; 920 } 921 922 if (uh->check) 923 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, 924 iph->daddr, 0); 925 926 return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); 927 } 928 929 int __init udpv4_offload_init(void) 930 { 931 net_hotdata.udpv4_offload = (struct net_offload) { 932 .callbacks = { 933 .gso_segment = udp4_ufo_fragment, 934 .gro_receive = udp4_gro_receive, 935 .gro_complete = udp4_gro_complete, 936 }, 937 }; 938 939 udp_tunnel_gro_init(); 940 return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP); 941 } 942