1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * UDPv4 GSO support 7 */ 8 9 #include <linux/skbuff.h> 10 #include <net/udp.h> 11 #include <net/protocol.h> 12 #include <net/inet_common.h> 13 14 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, 15 netdev_features_t features, 16 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 17 netdev_features_t features), 18 __be16 new_protocol, bool is_ipv6) 19 { 20 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 21 bool remcsum, need_csum, offload_csum, gso_partial; 22 struct sk_buff *segs = ERR_PTR(-EINVAL); 23 struct udphdr *uh = udp_hdr(skb); 24 u16 mac_offset = skb->mac_header; 25 __be16 protocol = skb->protocol; 26 u16 mac_len = skb->mac_len; 27 int udp_offset, outer_hlen; 28 __wsum partial; 29 bool need_ipsec; 30 31 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 32 goto out; 33 34 /* Adjust partial header checksum to negate old length. 35 * We cannot rely on the value contained in uh->len as it is 36 * possible that the actual value exceeds the boundaries of the 37 * 16 bit length field due to the header being added outside of an 38 * IP or IPv6 frame that was already limited to 64K - 1. 39 */ 40 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) 41 partial = (__force __wsum)uh->len; 42 else 43 partial = (__force __wsum)htonl(skb->len); 44 partial = csum_sub(csum_unfold(uh->check), partial); 45 46 /* setup inner skb. */ 47 skb->encapsulation = 0; 48 SKB_GSO_CB(skb)->encap_level = 0; 49 __skb_pull(skb, tnl_hlen); 50 skb_reset_mac_header(skb); 51 skb_set_network_header(skb, skb_inner_network_offset(skb)); 52 skb_set_transport_header(skb, skb_inner_transport_offset(skb)); 53 skb->mac_len = skb_inner_network_offset(skb); 54 skb->protocol = new_protocol; 55 56 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 57 skb->encap_hdr_csum = need_csum; 58 59 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); 60 skb->remcsum_offload = remcsum; 61 62 need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); 63 /* Try to offload checksum if possible */ 64 offload_csum = !!(need_csum && 65 !need_ipsec && 66 (skb->dev->features & 67 (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : 68 (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); 69 70 features &= skb->dev->hw_enc_features; 71 if (need_csum) 72 features &= ~NETIF_F_SCTP_CRC; 73 74 /* The only checksum offload we care about from here on out is the 75 * outer one so strip the existing checksum feature flags and 76 * instead set the flag based on our outer checksum offload value. 77 */ 78 if (remcsum) { 79 features &= ~NETIF_F_CSUM_MASK; 80 if (!need_csum || offload_csum) 81 features |= NETIF_F_HW_CSUM; 82 } 83 84 /* segment inner packet. */ 85 segs = gso_inner_segment(skb, features); 86 if (IS_ERR_OR_NULL(segs)) { 87 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 88 mac_len); 89 goto out; 90 } 91 92 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 93 94 outer_hlen = skb_tnl_header_len(skb); 95 udp_offset = outer_hlen - tnl_hlen; 96 skb = segs; 97 do { 98 unsigned int len; 99 100 if (remcsum) 101 skb->ip_summed = CHECKSUM_NONE; 102 103 /* Set up inner headers if we are offloading inner checksum */ 104 if (skb->ip_summed == CHECKSUM_PARTIAL) { 105 skb_reset_inner_headers(skb); 106 skb->encapsulation = 1; 107 } 108 109 skb->mac_len = mac_len; 110 skb->protocol = protocol; 111 112 __skb_push(skb, outer_hlen); 113 skb_reset_mac_header(skb); 114 skb_set_network_header(skb, mac_len); 115 skb_set_transport_header(skb, udp_offset); 116 len = skb->len - udp_offset; 117 uh = udp_hdr(skb); 118 119 /* If we are only performing partial GSO the inner header 120 * will be using a length value equal to only one MSS sized 121 * segment instead of the entire frame. 122 */ 123 if (gso_partial && skb_is_gso(skb)) { 124 uh->len = htons(skb_shinfo(skb)->gso_size + 125 SKB_GSO_CB(skb)->data_offset + 126 skb->head - (unsigned char *)uh); 127 } else { 128 uh->len = htons(len); 129 } 130 131 if (!need_csum) 132 continue; 133 134 uh->check = ~csum_fold(csum_add(partial, 135 (__force __wsum)htonl(len))); 136 137 if (skb->encapsulation || !offload_csum) { 138 uh->check = gso_make_checksum(skb, ~uh->check); 139 if (uh->check == 0) 140 uh->check = CSUM_MANGLED_0; 141 } else { 142 skb->ip_summed = CHECKSUM_PARTIAL; 143 skb->csum_start = skb_transport_header(skb) - skb->head; 144 skb->csum_offset = offsetof(struct udphdr, check); 145 } 146 } while ((skb = skb->next)); 147 out: 148 return segs; 149 } 150 151 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 152 netdev_features_t features, 153 bool is_ipv6) 154 { 155 __be16 protocol = skb->protocol; 156 const struct net_offload **offloads; 157 const struct net_offload *ops; 158 struct sk_buff *segs = ERR_PTR(-EINVAL); 159 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 160 netdev_features_t features); 161 162 rcu_read_lock(); 163 164 switch (skb->inner_protocol_type) { 165 case ENCAP_TYPE_ETHER: 166 protocol = skb->inner_protocol; 167 gso_inner_segment = skb_mac_gso_segment; 168 break; 169 case ENCAP_TYPE_IPPROTO: 170 offloads = is_ipv6 ? inet6_offloads : inet_offloads; 171 ops = rcu_dereference(offloads[skb->inner_ipproto]); 172 if (!ops || !ops->callbacks.gso_segment) 173 goto out_unlock; 174 gso_inner_segment = ops->callbacks.gso_segment; 175 break; 176 default: 177 goto out_unlock; 178 } 179 180 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, 181 protocol, is_ipv6); 182 183 out_unlock: 184 rcu_read_unlock(); 185 186 return segs; 187 } 188 EXPORT_SYMBOL(skb_udp_tunnel_segment); 189 190 static void __udpv4_gso_segment_csum(struct sk_buff *seg, 191 __be32 *oldip, __be32 *newip, 192 __be16 *oldport, __be16 *newport) 193 { 194 struct udphdr *uh; 195 struct iphdr *iph; 196 197 if (*oldip == *newip && *oldport == *newport) 198 return; 199 200 uh = udp_hdr(seg); 201 iph = ip_hdr(seg); 202 203 if (uh->check) { 204 inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip, 205 true); 206 inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport, 207 false); 208 if (!uh->check) 209 uh->check = CSUM_MANGLED_0; 210 } 211 *oldport = *newport; 212 213 csum_replace4(&iph->check, *oldip, *newip); 214 *oldip = *newip; 215 } 216 217 static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) 218 { 219 struct sk_buff *seg; 220 struct udphdr *uh, *uh2; 221 struct iphdr *iph, *iph2; 222 223 seg = segs; 224 uh = udp_hdr(seg); 225 iph = ip_hdr(seg); 226 227 if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) && 228 (udp_hdr(seg)->source == udp_hdr(seg->next)->source) && 229 (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) && 230 (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr)) 231 return segs; 232 233 while ((seg = seg->next)) { 234 uh2 = udp_hdr(seg); 235 iph2 = ip_hdr(seg); 236 237 __udpv4_gso_segment_csum(seg, 238 &iph2->saddr, &iph->saddr, 239 &uh2->source, &uh->source); 240 __udpv4_gso_segment_csum(seg, 241 &iph2->daddr, &iph->daddr, 242 &uh2->dest, &uh->dest); 243 } 244 245 return segs; 246 } 247 248 static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, 249 netdev_features_t features, 250 bool is_ipv6) 251 { 252 unsigned int mss = skb_shinfo(skb)->gso_size; 253 254 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); 255 if (IS_ERR(skb)) 256 return skb; 257 258 udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); 259 260 return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); 261 } 262 263 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 264 netdev_features_t features, bool is_ipv6) 265 { 266 struct sock *sk = gso_skb->sk; 267 unsigned int sum_truesize = 0; 268 struct sk_buff *segs, *seg; 269 struct udphdr *uh; 270 unsigned int mss; 271 bool copy_dtor; 272 __sum16 check; 273 __be16 newlen; 274 275 if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) 276 return __udp_gso_segment_list(gso_skb, features, is_ipv6); 277 278 mss = skb_shinfo(gso_skb)->gso_size; 279 if (gso_skb->len <= sizeof(*uh) + mss) 280 return ERR_PTR(-EINVAL); 281 282 skb_pull(gso_skb, sizeof(*uh)); 283 284 /* clear destructor to avoid skb_segment assigning it to tail */ 285 copy_dtor = gso_skb->destructor == sock_wfree; 286 if (copy_dtor) 287 gso_skb->destructor = NULL; 288 289 segs = skb_segment(gso_skb, features); 290 if (IS_ERR_OR_NULL(segs)) { 291 if (copy_dtor) 292 gso_skb->destructor = sock_wfree; 293 return segs; 294 } 295 296 /* GSO partial and frag_list segmentation only requires splitting 297 * the frame into an MSS multiple and possibly a remainder, both 298 * cases return a GSO skb. So update the mss now. 299 */ 300 if (skb_is_gso(segs)) 301 mss *= skb_shinfo(segs)->gso_segs; 302 303 seg = segs; 304 uh = udp_hdr(seg); 305 306 /* preserve TX timestamp flags and TS key for first segment */ 307 skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey; 308 skb_shinfo(seg)->tx_flags |= 309 (skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP); 310 311 /* compute checksum adjustment based on old length versus new */ 312 newlen = htons(sizeof(*uh) + mss); 313 check = csum16_add(csum16_sub(uh->check, uh->len), newlen); 314 315 for (;;) { 316 if (copy_dtor) { 317 seg->destructor = sock_wfree; 318 seg->sk = sk; 319 sum_truesize += seg->truesize; 320 } 321 322 if (!seg->next) 323 break; 324 325 uh->len = newlen; 326 uh->check = check; 327 328 if (seg->ip_summed == CHECKSUM_PARTIAL) 329 gso_reset_checksum(seg, ~check); 330 else 331 uh->check = gso_make_checksum(seg, ~check) ? : 332 CSUM_MANGLED_0; 333 334 seg = seg->next; 335 uh = udp_hdr(seg); 336 } 337 338 /* last packet can be partial gso_size, account for that in checksum */ 339 newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) + 340 seg->data_len); 341 check = csum16_add(csum16_sub(uh->check, uh->len), newlen); 342 343 uh->len = newlen; 344 uh->check = check; 345 346 if (seg->ip_summed == CHECKSUM_PARTIAL) 347 gso_reset_checksum(seg, ~check); 348 else 349 uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; 350 351 /* update refcount for the packet */ 352 if (copy_dtor) { 353 int delta = sum_truesize - gso_skb->truesize; 354 355 /* In some pathological cases, delta can be negative. 356 * We need to either use refcount_add() or refcount_sub_and_test() 357 */ 358 if (likely(delta >= 0)) 359 refcount_add(delta, &sk->sk_wmem_alloc); 360 else 361 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); 362 } 363 return segs; 364 } 365 EXPORT_SYMBOL_GPL(__udp_gso_segment); 366 367 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 368 netdev_features_t features) 369 { 370 struct sk_buff *segs = ERR_PTR(-EINVAL); 371 unsigned int mss; 372 __wsum csum; 373 struct udphdr *uh; 374 struct iphdr *iph; 375 376 if (skb->encapsulation && 377 (skb_shinfo(skb)->gso_type & 378 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { 379 segs = skb_udp_tunnel_segment(skb, features, false); 380 goto out; 381 } 382 383 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) 384 goto out; 385 386 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 387 goto out; 388 389 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 390 return __udp_gso_segment(skb, features, false); 391 392 mss = skb_shinfo(skb)->gso_size; 393 if (unlikely(skb->len <= mss)) 394 goto out; 395 396 /* Do software UFO. Complete and fill in the UDP checksum as 397 * HW cannot do checksum of UDP packets sent as multiple 398 * IP fragments. 399 */ 400 401 uh = udp_hdr(skb); 402 iph = ip_hdr(skb); 403 404 uh->check = 0; 405 csum = skb_checksum(skb, 0, skb->len, 0); 406 uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); 407 if (uh->check == 0) 408 uh->check = CSUM_MANGLED_0; 409 410 skb->ip_summed = CHECKSUM_UNNECESSARY; 411 412 /* If there is no outer header we can fake a checksum offload 413 * due to the fact that we have already done the checksum in 414 * software prior to segmenting the frame. 415 */ 416 if (!skb->encap_hdr_csum) 417 features |= NETIF_F_HW_CSUM; 418 419 /* Fragment the skb. IP headers of the fragments are updated in 420 * inet_gso_segment() 421 */ 422 segs = skb_segment(skb, features); 423 out: 424 return segs; 425 } 426 427 #define UDP_GRO_CNT_MAX 64 428 static struct sk_buff *udp_gro_receive_segment(struct list_head *head, 429 struct sk_buff *skb) 430 { 431 struct udphdr *uh = udp_gro_udphdr(skb); 432 struct sk_buff *pp = NULL; 433 struct udphdr *uh2; 434 struct sk_buff *p; 435 unsigned int ulen; 436 int ret = 0; 437 438 /* requires non zero csum, for symmetry with GSO */ 439 if (!uh->check) { 440 NAPI_GRO_CB(skb)->flush = 1; 441 return NULL; 442 } 443 444 /* Do not deal with padded or malicious packets, sorry ! */ 445 ulen = ntohs(uh->len); 446 if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) { 447 NAPI_GRO_CB(skb)->flush = 1; 448 return NULL; 449 } 450 /* pull encapsulating udp header */ 451 skb_gro_pull(skb, sizeof(struct udphdr)); 452 453 list_for_each_entry(p, head, list) { 454 if (!NAPI_GRO_CB(p)->same_flow) 455 continue; 456 457 uh2 = udp_hdr(p); 458 459 /* Match ports only, as csum is always non zero */ 460 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { 461 NAPI_GRO_CB(p)->same_flow = 0; 462 continue; 463 } 464 465 if (NAPI_GRO_CB(skb)->is_flist != NAPI_GRO_CB(p)->is_flist) { 466 NAPI_GRO_CB(skb)->flush = 1; 467 return p; 468 } 469 470 /* Terminate the flow on len mismatch or if it grow "too much". 471 * Under small packet flood GRO count could elsewhere grow a lot 472 * leading to excessive truesize values. 473 * On len mismatch merge the first packet shorter than gso_size, 474 * otherwise complete the GRO packet. 475 */ 476 if (ulen > ntohs(uh2->len)) { 477 pp = p; 478 } else { 479 if (NAPI_GRO_CB(skb)->is_flist) { 480 if (!pskb_may_pull(skb, skb_gro_offset(skb))) { 481 NAPI_GRO_CB(skb)->flush = 1; 482 return NULL; 483 } 484 if ((skb->ip_summed != p->ip_summed) || 485 (skb->csum_level != p->csum_level)) { 486 NAPI_GRO_CB(skb)->flush = 1; 487 return NULL; 488 } 489 ret = skb_gro_receive_list(p, skb); 490 } else { 491 skb_gro_postpull_rcsum(skb, uh, 492 sizeof(struct udphdr)); 493 494 ret = skb_gro_receive(p, skb); 495 } 496 } 497 498 if (ret || ulen != ntohs(uh2->len) || 499 NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX) 500 pp = p; 501 502 return pp; 503 } 504 505 /* mismatch, but we never need to flush */ 506 return NULL; 507 } 508 509 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, 510 struct udphdr *uh, struct sock *sk) 511 { 512 struct sk_buff *pp = NULL; 513 struct sk_buff *p; 514 struct udphdr *uh2; 515 unsigned int off = skb_gro_offset(skb); 516 int flush = 1; 517 518 /* we can do L4 aggregation only if the packet can't land in a tunnel 519 * otherwise we could corrupt the inner stream 520 */ 521 NAPI_GRO_CB(skb)->is_flist = 0; 522 if (!sk || !udp_sk(sk)->gro_receive) { 523 if (skb->dev->features & NETIF_F_GRO_FRAGLIST) 524 NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1; 525 526 if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) || 527 (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) 528 pp = call_gro_receive(udp_gro_receive_segment, head, skb); 529 return pp; 530 } 531 532 if (NAPI_GRO_CB(skb)->encap_mark || 533 (uh->check && skb->ip_summed != CHECKSUM_PARTIAL && 534 NAPI_GRO_CB(skb)->csum_cnt == 0 && 535 !NAPI_GRO_CB(skb)->csum_valid)) 536 goto out; 537 538 /* mark that this skb passed once through the tunnel gro layer */ 539 NAPI_GRO_CB(skb)->encap_mark = 1; 540 541 flush = 0; 542 543 list_for_each_entry(p, head, list) { 544 if (!NAPI_GRO_CB(p)->same_flow) 545 continue; 546 547 uh2 = (struct udphdr *)(p->data + off); 548 549 /* Match ports and either checksums are either both zero 550 * or nonzero. 551 */ 552 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || 553 (!uh->check ^ !uh2->check)) { 554 NAPI_GRO_CB(p)->same_flow = 0; 555 continue; 556 } 557 } 558 559 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 560 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 561 pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); 562 563 out: 564 skb_gro_flush_final(skb, pp, flush); 565 return pp; 566 } 567 EXPORT_SYMBOL(udp_gro_receive); 568 569 static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport, 570 __be16 dport) 571 { 572 const struct iphdr *iph = skb_gro_network_header(skb); 573 574 return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, 575 iph->daddr, dport, inet_iif(skb), 576 inet_sdif(skb), &udp_table, NULL); 577 } 578 579 INDIRECT_CALLABLE_SCOPE 580 struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) 581 { 582 struct udphdr *uh = udp_gro_udphdr(skb); 583 struct sock *sk = NULL; 584 struct sk_buff *pp; 585 586 if (unlikely(!uh)) 587 goto flush; 588 589 /* Don't bother verifying checksum if we're going to flush anyway. */ 590 if (NAPI_GRO_CB(skb)->flush) 591 goto skip; 592 593 if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, 594 inet_gro_compute_pseudo)) 595 goto flush; 596 else if (uh->check) 597 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, 598 inet_gro_compute_pseudo); 599 skip: 600 NAPI_GRO_CB(skb)->is_ipv6 = 0; 601 rcu_read_lock(); 602 603 if (static_branch_unlikely(&udp_encap_needed_key)) 604 sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest); 605 606 pp = udp_gro_receive(head, skb, uh, sk); 607 rcu_read_unlock(); 608 return pp; 609 610 flush: 611 NAPI_GRO_CB(skb)->flush = 1; 612 return NULL; 613 } 614 615 static int udp_gro_complete_segment(struct sk_buff *skb) 616 { 617 struct udphdr *uh = udp_hdr(skb); 618 619 skb->csum_start = (unsigned char *)uh - skb->head; 620 skb->csum_offset = offsetof(struct udphdr, check); 621 skb->ip_summed = CHECKSUM_PARTIAL; 622 623 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 624 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; 625 return 0; 626 } 627 628 int udp_gro_complete(struct sk_buff *skb, int nhoff, 629 udp_lookup_t lookup) 630 { 631 __be16 newlen = htons(skb->len - nhoff); 632 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 633 struct sock *sk; 634 int err; 635 636 uh->len = newlen; 637 638 rcu_read_lock(); 639 sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, 640 udp4_lib_lookup_skb, skb, uh->source, uh->dest); 641 if (sk && udp_sk(sk)->gro_complete) { 642 skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM 643 : SKB_GSO_UDP_TUNNEL; 644 645 /* clear the encap mark, so that inner frag_list gro_complete 646 * can take place 647 */ 648 NAPI_GRO_CB(skb)->encap_mark = 0; 649 650 /* Set encapsulation before calling into inner gro_complete() 651 * functions to make them set up the inner offsets. 652 */ 653 skb->encapsulation = 1; 654 err = udp_sk(sk)->gro_complete(sk, skb, 655 nhoff + sizeof(struct udphdr)); 656 } else { 657 err = udp_gro_complete_segment(skb); 658 } 659 rcu_read_unlock(); 660 661 if (skb->remcsum_offload) 662 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; 663 664 return err; 665 } 666 EXPORT_SYMBOL(udp_gro_complete); 667 668 INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) 669 { 670 const struct iphdr *iph = ip_hdr(skb); 671 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 672 673 /* do fraglist only if there is no outer UDP encap (or we already processed it) */ 674 if (NAPI_GRO_CB(skb)->is_flist && !NAPI_GRO_CB(skb)->encap_mark) { 675 uh->len = htons(skb->len - nhoff); 676 677 skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); 678 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 679 680 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 681 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) 682 skb->csum_level++; 683 } else { 684 skb->ip_summed = CHECKSUM_UNNECESSARY; 685 skb->csum_level = 0; 686 } 687 688 return 0; 689 } 690 691 if (uh->check) 692 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, 693 iph->daddr, 0); 694 695 return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); 696 } 697 698 static const struct net_offload udpv4_offload = { 699 .callbacks = { 700 .gso_segment = udp4_ufo_fragment, 701 .gro_receive = udp4_gro_receive, 702 .gro_complete = udp4_gro_complete, 703 }, 704 }; 705 706 int __init udpv4_offload_init(void) 707 { 708 return inet_add_offload(&udpv4_offload, IPPROTO_UDP); 709 } 710