1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <net/gro.h> 3 #include <net/dst_metadata.h> 4 #include <net/busy_poll.h> 5 #include <trace/events/net.h> 6 7 #define MAX_GRO_SKBS 8 8 9 /* This should be increased if a protocol with a bigger head is added. */ 10 #define GRO_MAX_HEAD (MAX_HEADER + 128) 11 12 static DEFINE_SPINLOCK(offload_lock); 13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); 14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 15 int gro_normal_batch __read_mostly = 8; 16 17 /** 18 * dev_add_offload - register offload handlers 19 * @po: protocol offload declaration 20 * 21 * Add protocol offload handlers to the networking stack. The passed 22 * &proto_offload is linked into kernel lists and may not be freed until 23 * it has been removed from the kernel lists. 24 * 25 * This call does not sleep therefore it can not 26 * guarantee all CPU's that are in middle of receiving packets 27 * will see the new offload handlers (until the next received packet). 28 */ 29 void dev_add_offload(struct packet_offload *po) 30 { 31 struct packet_offload *elem; 32 33 spin_lock(&offload_lock); 34 list_for_each_entry(elem, &offload_base, list) { 35 if (po->priority < elem->priority) 36 break; 37 } 38 list_add_rcu(&po->list, elem->list.prev); 39 spin_unlock(&offload_lock); 40 } 41 EXPORT_SYMBOL(dev_add_offload); 42 43 /** 44 * __dev_remove_offload - remove offload handler 45 * @po: packet offload declaration 46 * 47 * Remove a protocol offload handler that was previously added to the 48 * kernel offload handlers by dev_add_offload(). The passed &offload_type 49 * is removed from the kernel lists and can be freed or reused once this 50 * function returns. 51 * 52 * The packet type might still be in use by receivers 53 * and must not be freed until after all the CPU's have gone 54 * through a quiescent state. 55 */ 56 static void __dev_remove_offload(struct packet_offload *po) 57 { 58 struct list_head *head = &offload_base; 59 struct packet_offload *po1; 60 61 spin_lock(&offload_lock); 62 63 list_for_each_entry(po1, head, list) { 64 if (po == po1) { 65 list_del_rcu(&po->list); 66 goto out; 67 } 68 } 69 70 pr_warn("dev_remove_offload: %p not found\n", po); 71 out: 72 spin_unlock(&offload_lock); 73 } 74 75 /** 76 * dev_remove_offload - remove packet offload handler 77 * @po: packet offload declaration 78 * 79 * Remove a packet offload handler that was previously added to the kernel 80 * offload handlers by dev_add_offload(). The passed &offload_type is 81 * removed from the kernel lists and can be freed or reused once this 82 * function returns. 83 * 84 * This call sleeps to guarantee that no CPU is looking at the packet 85 * type after return. 86 */ 87 void dev_remove_offload(struct packet_offload *po) 88 { 89 __dev_remove_offload(po); 90 91 synchronize_net(); 92 } 93 EXPORT_SYMBOL(dev_remove_offload); 94 95 /** 96 * skb_mac_gso_segment - mac layer segmentation handler. 97 * @skb: buffer to segment 98 * @features: features for the output path (see dev->features) 99 */ 100 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 101 netdev_features_t features) 102 { 103 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 104 struct packet_offload *ptype; 105 int vlan_depth = skb->mac_len; 106 __be16 type = skb_network_protocol(skb, &vlan_depth); 107 108 if (unlikely(!type)) 109 return ERR_PTR(-EINVAL); 110 111 __skb_pull(skb, vlan_depth); 112 113 rcu_read_lock(); 114 list_for_each_entry_rcu(ptype, &offload_base, list) { 115 if (ptype->type == type && ptype->callbacks.gso_segment) { 116 segs = ptype->callbacks.gso_segment(skb, features); 117 break; 118 } 119 } 120 rcu_read_unlock(); 121 122 __skb_push(skb, skb->data - skb_mac_header(skb)); 123 124 return segs; 125 } 126 EXPORT_SYMBOL(skb_mac_gso_segment); 127 128 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 129 { 130 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 131 unsigned int offset = skb_gro_offset(skb); 132 unsigned int headlen = skb_headlen(skb); 133 unsigned int len = skb_gro_len(skb); 134 unsigned int delta_truesize; 135 unsigned int new_truesize; 136 struct sk_buff *lp; 137 138 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 139 return -E2BIG; 140 141 lp = NAPI_GRO_CB(p)->last; 142 pinfo = skb_shinfo(lp); 143 144 if (headlen <= offset) { 145 skb_frag_t *frag; 146 skb_frag_t *frag2; 147 int i = skbinfo->nr_frags; 148 int nr_frags = pinfo->nr_frags + i; 149 150 if (nr_frags > MAX_SKB_FRAGS) 151 goto merge; 152 153 offset -= headlen; 154 pinfo->nr_frags = nr_frags; 155 skbinfo->nr_frags = 0; 156 157 frag = pinfo->frags + nr_frags; 158 frag2 = skbinfo->frags + i; 159 do { 160 *--frag = *--frag2; 161 } while (--i); 162 163 skb_frag_off_add(frag, offset); 164 skb_frag_size_sub(frag, offset); 165 166 /* all fragments truesize : remove (head size + sk_buff) */ 167 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); 168 delta_truesize = skb->truesize - new_truesize; 169 170 skb->truesize = new_truesize; 171 skb->len -= skb->data_len; 172 skb->data_len = 0; 173 174 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 175 goto done; 176 } else if (skb->head_frag) { 177 int nr_frags = pinfo->nr_frags; 178 skb_frag_t *frag = pinfo->frags + nr_frags; 179 struct page *page = virt_to_head_page(skb->head); 180 unsigned int first_size = headlen - offset; 181 unsigned int first_offset; 182 183 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 184 goto merge; 185 186 first_offset = skb->data - 187 (unsigned char *)page_address(page) + 188 offset; 189 190 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 191 192 __skb_frag_set_page(frag, page); 193 skb_frag_off_set(frag, first_offset); 194 skb_frag_size_set(frag, first_size); 195 196 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 197 /* We dont need to clear skbinfo->nr_frags here */ 198 199 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); 200 delta_truesize = skb->truesize - new_truesize; 201 skb->truesize = new_truesize; 202 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 203 goto done; 204 } 205 206 merge: 207 /* sk owenrship - if any - completely transferred to the aggregated packet */ 208 skb->destructor = NULL; 209 delta_truesize = skb->truesize; 210 if (offset > headlen) { 211 unsigned int eat = offset - headlen; 212 213 skb_frag_off_add(&skbinfo->frags[0], eat); 214 skb_frag_size_sub(&skbinfo->frags[0], eat); 215 skb->data_len -= eat; 216 skb->len -= eat; 217 offset = headlen; 218 } 219 220 __skb_pull(skb, offset); 221 222 if (NAPI_GRO_CB(p)->last == p) 223 skb_shinfo(p)->frag_list = skb; 224 else 225 NAPI_GRO_CB(p)->last->next = skb; 226 NAPI_GRO_CB(p)->last = skb; 227 __skb_header_release(skb); 228 lp = p; 229 230 done: 231 NAPI_GRO_CB(p)->count++; 232 p->data_len += len; 233 p->truesize += delta_truesize; 234 p->len += len; 235 if (lp != p) { 236 lp->data_len += len; 237 lp->truesize += delta_truesize; 238 lp->len += len; 239 } 240 NAPI_GRO_CB(skb)->same_flow = 1; 241 return 0; 242 } 243 244 245 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 246 { 247 struct packet_offload *ptype; 248 __be16 type = skb->protocol; 249 struct list_head *head = &offload_base; 250 int err = -ENOENT; 251 252 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 253 254 if (NAPI_GRO_CB(skb)->count == 1) { 255 skb_shinfo(skb)->gso_size = 0; 256 goto out; 257 } 258 259 rcu_read_lock(); 260 list_for_each_entry_rcu(ptype, head, list) { 261 if (ptype->type != type || !ptype->callbacks.gro_complete) 262 continue; 263 264 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 265 ipv6_gro_complete, inet_gro_complete, 266 skb, 0); 267 break; 268 } 269 rcu_read_unlock(); 270 271 if (err) { 272 WARN_ON(&ptype->list == head); 273 kfree_skb(skb); 274 return; 275 } 276 277 out: 278 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); 279 } 280 281 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 282 bool flush_old) 283 { 284 struct list_head *head = &napi->gro_hash[index].list; 285 struct sk_buff *skb, *p; 286 287 list_for_each_entry_safe_reverse(skb, p, head, list) { 288 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 289 return; 290 skb_list_del_init(skb); 291 napi_gro_complete(napi, skb); 292 napi->gro_hash[index].count--; 293 } 294 295 if (!napi->gro_hash[index].count) 296 __clear_bit(index, &napi->gro_bitmask); 297 } 298 299 /* napi->gro_hash[].list contains packets ordered by age. 300 * youngest packets at the head of it. 301 * Complete skbs in reverse order to reduce latencies. 302 */ 303 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 304 { 305 unsigned long bitmask = napi->gro_bitmask; 306 unsigned int i, base = ~0U; 307 308 while ((i = ffs(bitmask)) != 0) { 309 bitmask >>= i; 310 base += i; 311 __napi_gro_flush_chain(napi, base, flush_old); 312 } 313 } 314 EXPORT_SYMBOL(napi_gro_flush); 315 316 static void gro_list_prepare(const struct list_head *head, 317 const struct sk_buff *skb) 318 { 319 unsigned int maclen = skb->dev->hard_header_len; 320 u32 hash = skb_get_hash_raw(skb); 321 struct sk_buff *p; 322 323 list_for_each_entry(p, head, list) { 324 unsigned long diffs; 325 326 NAPI_GRO_CB(p)->flush = 0; 327 328 if (hash != skb_get_hash_raw(p)) { 329 NAPI_GRO_CB(p)->same_flow = 0; 330 continue; 331 } 332 333 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 334 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); 335 if (skb_vlan_tag_present(p)) 336 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); 337 diffs |= skb_metadata_differs(p, skb); 338 if (maclen == ETH_HLEN) 339 diffs |= compare_ether_header(skb_mac_header(p), 340 skb_mac_header(skb)); 341 else if (!diffs) 342 diffs = memcmp(skb_mac_header(p), 343 skb_mac_header(skb), 344 maclen); 345 346 /* in most common scenarions 'slow_gro' is 0 347 * otherwise we are already on some slower paths 348 * either skip all the infrequent tests altogether or 349 * avoid trying too hard to skip each of them individually 350 */ 351 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { 352 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 353 struct tc_skb_ext *skb_ext; 354 struct tc_skb_ext *p_ext; 355 #endif 356 357 diffs |= p->sk != skb->sk; 358 diffs |= skb_metadata_dst_cmp(p, skb); 359 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); 360 361 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 362 skb_ext = skb_ext_find(skb, TC_SKB_EXT); 363 p_ext = skb_ext_find(p, TC_SKB_EXT); 364 365 diffs |= (!!p_ext) ^ (!!skb_ext); 366 if (!diffs && unlikely(skb_ext)) 367 diffs |= p_ext->chain ^ skb_ext->chain; 368 #endif 369 } 370 371 NAPI_GRO_CB(p)->same_flow = !diffs; 372 } 373 } 374 375 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) 376 { 377 const struct skb_shared_info *pinfo = skb_shinfo(skb); 378 const skb_frag_t *frag0 = &pinfo->frags[0]; 379 380 NAPI_GRO_CB(skb)->data_offset = 0; 381 NAPI_GRO_CB(skb)->frag0 = NULL; 382 NAPI_GRO_CB(skb)->frag0_len = 0; 383 384 if (!skb_headlen(skb) && pinfo->nr_frags && 385 !PageHighMem(skb_frag_page(frag0)) && 386 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { 387 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 388 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 389 skb_frag_size(frag0), 390 skb->end - skb->tail); 391 } 392 } 393 394 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 395 { 396 struct skb_shared_info *pinfo = skb_shinfo(skb); 397 398 BUG_ON(skb->end - skb->tail < grow); 399 400 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 401 402 skb->data_len -= grow; 403 skb->tail += grow; 404 405 skb_frag_off_add(&pinfo->frags[0], grow); 406 skb_frag_size_sub(&pinfo->frags[0], grow); 407 408 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 409 skb_frag_unref(skb, 0); 410 memmove(pinfo->frags, pinfo->frags + 1, 411 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 412 } 413 } 414 415 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 416 { 417 struct sk_buff *oldest; 418 419 oldest = list_last_entry(head, struct sk_buff, list); 420 421 /* We are called with head length >= MAX_GRO_SKBS, so this is 422 * impossible. 423 */ 424 if (WARN_ON_ONCE(!oldest)) 425 return; 426 427 /* Do not adjust napi->gro_hash[].count, caller is adding a new 428 * SKB to the chain. 429 */ 430 skb_list_del_init(oldest); 431 napi_gro_complete(napi, oldest); 432 } 433 434 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 435 { 436 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 437 struct gro_list *gro_list = &napi->gro_hash[bucket]; 438 struct list_head *head = &offload_base; 439 struct packet_offload *ptype; 440 __be16 type = skb->protocol; 441 struct sk_buff *pp = NULL; 442 enum gro_result ret; 443 int same_flow; 444 int grow; 445 446 if (netif_elide_gro(skb->dev)) 447 goto normal; 448 449 gro_list_prepare(&gro_list->list, skb); 450 451 rcu_read_lock(); 452 list_for_each_entry_rcu(ptype, head, list) { 453 if (ptype->type != type || !ptype->callbacks.gro_receive) 454 continue; 455 456 skb_set_network_header(skb, skb_gro_offset(skb)); 457 skb_reset_mac_len(skb); 458 NAPI_GRO_CB(skb)->same_flow = 0; 459 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 460 NAPI_GRO_CB(skb)->free = 0; 461 NAPI_GRO_CB(skb)->encap_mark = 0; 462 NAPI_GRO_CB(skb)->recursion_counter = 0; 463 NAPI_GRO_CB(skb)->is_fou = 0; 464 NAPI_GRO_CB(skb)->is_atomic = 1; 465 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 466 467 /* Setup for GRO checksum validation */ 468 switch (skb->ip_summed) { 469 case CHECKSUM_COMPLETE: 470 NAPI_GRO_CB(skb)->csum = skb->csum; 471 NAPI_GRO_CB(skb)->csum_valid = 1; 472 NAPI_GRO_CB(skb)->csum_cnt = 0; 473 break; 474 case CHECKSUM_UNNECESSARY: 475 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 476 NAPI_GRO_CB(skb)->csum_valid = 0; 477 break; 478 default: 479 NAPI_GRO_CB(skb)->csum_cnt = 0; 480 NAPI_GRO_CB(skb)->csum_valid = 0; 481 } 482 483 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 484 ipv6_gro_receive, inet_gro_receive, 485 &gro_list->list, skb); 486 break; 487 } 488 rcu_read_unlock(); 489 490 if (&ptype->list == head) 491 goto normal; 492 493 if (PTR_ERR(pp) == -EINPROGRESS) { 494 ret = GRO_CONSUMED; 495 goto ok; 496 } 497 498 same_flow = NAPI_GRO_CB(skb)->same_flow; 499 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 500 501 if (pp) { 502 skb_list_del_init(pp); 503 napi_gro_complete(napi, pp); 504 gro_list->count--; 505 } 506 507 if (same_flow) 508 goto ok; 509 510 if (NAPI_GRO_CB(skb)->flush) 511 goto normal; 512 513 if (unlikely(gro_list->count >= MAX_GRO_SKBS)) 514 gro_flush_oldest(napi, &gro_list->list); 515 else 516 gro_list->count++; 517 518 NAPI_GRO_CB(skb)->count = 1; 519 NAPI_GRO_CB(skb)->age = jiffies; 520 NAPI_GRO_CB(skb)->last = skb; 521 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 522 list_add(&skb->list, &gro_list->list); 523 ret = GRO_HELD; 524 525 pull: 526 grow = skb_gro_offset(skb) - skb_headlen(skb); 527 if (grow > 0) 528 gro_pull_from_frag0(skb, grow); 529 ok: 530 if (gro_list->count) { 531 if (!test_bit(bucket, &napi->gro_bitmask)) 532 __set_bit(bucket, &napi->gro_bitmask); 533 } else if (test_bit(bucket, &napi->gro_bitmask)) { 534 __clear_bit(bucket, &napi->gro_bitmask); 535 } 536 537 return ret; 538 539 normal: 540 ret = GRO_NORMAL; 541 goto pull; 542 } 543 544 struct packet_offload *gro_find_receive_by_type(__be16 type) 545 { 546 struct list_head *offload_head = &offload_base; 547 struct packet_offload *ptype; 548 549 list_for_each_entry_rcu(ptype, offload_head, list) { 550 if (ptype->type != type || !ptype->callbacks.gro_receive) 551 continue; 552 return ptype; 553 } 554 return NULL; 555 } 556 EXPORT_SYMBOL(gro_find_receive_by_type); 557 558 struct packet_offload *gro_find_complete_by_type(__be16 type) 559 { 560 struct list_head *offload_head = &offload_base; 561 struct packet_offload *ptype; 562 563 list_for_each_entry_rcu(ptype, offload_head, list) { 564 if (ptype->type != type || !ptype->callbacks.gro_complete) 565 continue; 566 return ptype; 567 } 568 return NULL; 569 } 570 EXPORT_SYMBOL(gro_find_complete_by_type); 571 572 static gro_result_t napi_skb_finish(struct napi_struct *napi, 573 struct sk_buff *skb, 574 gro_result_t ret) 575 { 576 switch (ret) { 577 case GRO_NORMAL: 578 gro_normal_one(napi, skb, 1); 579 break; 580 581 case GRO_MERGED_FREE: 582 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 583 napi_skb_free_stolen_head(skb); 584 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 585 __kfree_skb(skb); 586 else 587 __kfree_skb_defer(skb); 588 break; 589 590 case GRO_HELD: 591 case GRO_MERGED: 592 case GRO_CONSUMED: 593 break; 594 } 595 596 return ret; 597 } 598 599 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 600 { 601 gro_result_t ret; 602 603 skb_mark_napi_id(skb, napi); 604 trace_napi_gro_receive_entry(skb); 605 606 skb_gro_reset_offset(skb, 0); 607 608 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 609 trace_napi_gro_receive_exit(ret); 610 611 return ret; 612 } 613 EXPORT_SYMBOL(napi_gro_receive); 614 615 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 616 { 617 if (unlikely(skb->pfmemalloc)) { 618 consume_skb(skb); 619 return; 620 } 621 __skb_pull(skb, skb_headlen(skb)); 622 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 623 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 624 __vlan_hwaccel_clear_tag(skb); 625 skb->dev = napi->dev; 626 skb->skb_iif = 0; 627 628 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 629 skb->pkt_type = PACKET_HOST; 630 631 skb->encapsulation = 0; 632 skb_shinfo(skb)->gso_type = 0; 633 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 634 if (unlikely(skb->slow_gro)) { 635 skb_orphan(skb); 636 skb_ext_reset(skb); 637 nf_reset_ct(skb); 638 skb->slow_gro = 0; 639 } 640 641 napi->skb = skb; 642 } 643 644 struct sk_buff *napi_get_frags(struct napi_struct *napi) 645 { 646 struct sk_buff *skb = napi->skb; 647 648 if (!skb) { 649 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 650 if (skb) { 651 napi->skb = skb; 652 skb_mark_napi_id(skb, napi); 653 } 654 } 655 return skb; 656 } 657 EXPORT_SYMBOL(napi_get_frags); 658 659 static gro_result_t napi_frags_finish(struct napi_struct *napi, 660 struct sk_buff *skb, 661 gro_result_t ret) 662 { 663 switch (ret) { 664 case GRO_NORMAL: 665 case GRO_HELD: 666 __skb_push(skb, ETH_HLEN); 667 skb->protocol = eth_type_trans(skb, skb->dev); 668 if (ret == GRO_NORMAL) 669 gro_normal_one(napi, skb, 1); 670 break; 671 672 case GRO_MERGED_FREE: 673 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 674 napi_skb_free_stolen_head(skb); 675 else 676 napi_reuse_skb(napi, skb); 677 break; 678 679 case GRO_MERGED: 680 case GRO_CONSUMED: 681 break; 682 } 683 684 return ret; 685 } 686 687 /* Upper GRO stack assumes network header starts at gro_offset=0 688 * Drivers could call both napi_gro_frags() and napi_gro_receive() 689 * We copy ethernet header into skb->data to have a common layout. 690 */ 691 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 692 { 693 struct sk_buff *skb = napi->skb; 694 const struct ethhdr *eth; 695 unsigned int hlen = sizeof(*eth); 696 697 napi->skb = NULL; 698 699 skb_reset_mac_header(skb); 700 skb_gro_reset_offset(skb, hlen); 701 702 if (unlikely(skb_gro_header_hard(skb, hlen))) { 703 eth = skb_gro_header_slow(skb, hlen, 0); 704 if (unlikely(!eth)) { 705 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 706 __func__, napi->dev->name); 707 napi_reuse_skb(napi, skb); 708 return NULL; 709 } 710 } else { 711 eth = (const struct ethhdr *)skb->data; 712 gro_pull_from_frag0(skb, hlen); 713 NAPI_GRO_CB(skb)->frag0 += hlen; 714 NAPI_GRO_CB(skb)->frag0_len -= hlen; 715 } 716 __skb_pull(skb, hlen); 717 718 /* 719 * This works because the only protocols we care about don't require 720 * special handling. 721 * We'll fix it up properly in napi_frags_finish() 722 */ 723 skb->protocol = eth->h_proto; 724 725 return skb; 726 } 727 728 gro_result_t napi_gro_frags(struct napi_struct *napi) 729 { 730 gro_result_t ret; 731 struct sk_buff *skb = napi_frags_skb(napi); 732 733 trace_napi_gro_frags_entry(skb); 734 735 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 736 trace_napi_gro_frags_exit(ret); 737 738 return ret; 739 } 740 EXPORT_SYMBOL(napi_gro_frags); 741 742 /* Compute the checksum from gro_offset and return the folded value 743 * after adding in any pseudo checksum. 744 */ 745 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 746 { 747 __wsum wsum; 748 __sum16 sum; 749 750 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 751 752 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 753 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 754 /* See comments in __skb_checksum_complete(). */ 755 if (likely(!sum)) { 756 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 757 !skb->csum_complete_sw) 758 netdev_rx_csum_fault(skb->dev, skb); 759 } 760 761 NAPI_GRO_CB(skb)->csum = wsum; 762 NAPI_GRO_CB(skb)->csum_valid = 1; 763 764 return sum; 765 } 766 EXPORT_SYMBOL(__skb_gro_checksum_complete); 767