1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <net/gro.h> 3 #include <net/dst_metadata.h> 4 #include <net/busy_poll.h> 5 #include <trace/events/net.h> 6 #include <linux/skbuff_ref.h> 7 8 #define MAX_GRO_SKBS 8 9 10 /* This should be increased if a protocol with a bigger head is added. */ 11 #define GRO_MAX_HEAD (MAX_HEADER + 128) 12 13 static DEFINE_SPINLOCK(offload_lock); 14 15 /** 16 * dev_add_offload - register offload handlers 17 * @po: protocol offload declaration 18 * 19 * Add protocol offload handlers to the networking stack. The passed 20 * &proto_offload is linked into kernel lists and may not be freed until 21 * it has been removed from the kernel lists. 22 * 23 * This call does not sleep therefore it can not 24 * guarantee all CPU's that are in middle of receiving packets 25 * will see the new offload handlers (until the next received packet). 26 */ 27 void dev_add_offload(struct packet_offload *po) 28 { 29 struct packet_offload *elem; 30 31 spin_lock(&offload_lock); 32 list_for_each_entry(elem, &net_hotdata.offload_base, list) { 33 if (po->priority < elem->priority) 34 break; 35 } 36 list_add_rcu(&po->list, elem->list.prev); 37 spin_unlock(&offload_lock); 38 } 39 EXPORT_SYMBOL(dev_add_offload); 40 41 /** 42 * __dev_remove_offload - remove offload handler 43 * @po: packet offload declaration 44 * 45 * Remove a protocol offload handler that was previously added to the 46 * kernel offload handlers by dev_add_offload(). The passed &offload_type 47 * is removed from the kernel lists and can be freed or reused once this 48 * function returns. 49 * 50 * The packet type might still be in use by receivers 51 * and must not be freed until after all the CPU's have gone 52 * through a quiescent state. 53 */ 54 static void __dev_remove_offload(struct packet_offload *po) 55 { 56 struct list_head *head = &net_hotdata.offload_base; 57 struct packet_offload *po1; 58 59 spin_lock(&offload_lock); 60 61 list_for_each_entry(po1, head, list) { 62 if (po == po1) { 63 list_del_rcu(&po->list); 64 goto out; 65 } 66 } 67 68 pr_warn("dev_remove_offload: %p not found\n", po); 69 out: 70 spin_unlock(&offload_lock); 71 } 72 73 /** 74 * dev_remove_offload - remove packet offload handler 75 * @po: packet offload declaration 76 * 77 * Remove a packet offload handler that was previously added to the kernel 78 * offload handlers by dev_add_offload(). The passed &offload_type is 79 * removed from the kernel lists and can be freed or reused once this 80 * function returns. 81 * 82 * This call sleeps to guarantee that no CPU is looking at the packet 83 * type after return. 84 */ 85 void dev_remove_offload(struct packet_offload *po) 86 { 87 __dev_remove_offload(po); 88 89 synchronize_net(); 90 } 91 EXPORT_SYMBOL(dev_remove_offload); 92 93 94 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 95 { 96 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 97 unsigned int offset = skb_gro_offset(skb); 98 unsigned int headlen = skb_headlen(skb); 99 unsigned int len = skb_gro_len(skb); 100 unsigned int delta_truesize; 101 unsigned int gro_max_size; 102 unsigned int new_truesize; 103 struct sk_buff *lp; 104 int segs; 105 106 /* Do not splice page pool based packets w/ non-page pool 107 * packets. This can result in reference count issues as page 108 * pool pages will not decrement the reference count and will 109 * instead be immediately returned to the pool or have frag 110 * count decremented. 111 */ 112 if (p->pp_recycle != skb->pp_recycle) 113 return -ETOOMANYREFS; 114 115 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ 116 gro_max_size = p->protocol == htons(ETH_P_IPV6) ? 117 READ_ONCE(p->dev->gro_max_size) : 118 READ_ONCE(p->dev->gro_ipv4_max_size); 119 120 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) 121 return -E2BIG; 122 123 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { 124 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP || 125 (p->protocol == htons(ETH_P_IPV6) && 126 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) || 127 p->encapsulation) 128 return -E2BIG; 129 } 130 131 segs = NAPI_GRO_CB(skb)->count; 132 lp = NAPI_GRO_CB(p)->last; 133 pinfo = skb_shinfo(lp); 134 135 if (headlen <= offset) { 136 skb_frag_t *frag; 137 skb_frag_t *frag2; 138 int i = skbinfo->nr_frags; 139 int nr_frags = pinfo->nr_frags + i; 140 141 if (nr_frags > MAX_SKB_FRAGS) 142 goto merge; 143 144 offset -= headlen; 145 pinfo->nr_frags = nr_frags; 146 skbinfo->nr_frags = 0; 147 148 frag = pinfo->frags + nr_frags; 149 frag2 = skbinfo->frags + i; 150 do { 151 *--frag = *--frag2; 152 } while (--i); 153 154 skb_frag_off_add(frag, offset); 155 skb_frag_size_sub(frag, offset); 156 157 /* all fragments truesize : remove (head size + sk_buff) */ 158 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); 159 delta_truesize = skb->truesize - new_truesize; 160 161 skb->truesize = new_truesize; 162 skb->len -= skb->data_len; 163 skb->data_len = 0; 164 165 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 166 goto done; 167 } else if (skb->head_frag) { 168 int nr_frags = pinfo->nr_frags; 169 skb_frag_t *frag = pinfo->frags + nr_frags; 170 struct page *page = virt_to_head_page(skb->head); 171 unsigned int first_size = headlen - offset; 172 unsigned int first_offset; 173 174 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 175 goto merge; 176 177 first_offset = skb->data - 178 (unsigned char *)page_address(page) + 179 offset; 180 181 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 182 183 skb_frag_fill_page_desc(frag, page, first_offset, first_size); 184 185 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 186 /* We dont need to clear skbinfo->nr_frags here */ 187 188 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); 189 delta_truesize = skb->truesize - new_truesize; 190 skb->truesize = new_truesize; 191 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 192 goto done; 193 } 194 195 merge: 196 /* sk ownership - if any - completely transferred to the aggregated packet */ 197 skb->destructor = NULL; 198 skb->sk = NULL; 199 delta_truesize = skb->truesize; 200 if (offset > headlen) { 201 unsigned int eat = offset - headlen; 202 203 skb_frag_off_add(&skbinfo->frags[0], eat); 204 skb_frag_size_sub(&skbinfo->frags[0], eat); 205 skb->data_len -= eat; 206 skb->len -= eat; 207 offset = headlen; 208 } 209 210 __skb_pull(skb, offset); 211 212 if (NAPI_GRO_CB(p)->last == p) 213 skb_shinfo(p)->frag_list = skb; 214 else 215 NAPI_GRO_CB(p)->last->next = skb; 216 NAPI_GRO_CB(p)->last = skb; 217 __skb_header_release(skb); 218 lp = p; 219 220 done: 221 NAPI_GRO_CB(p)->count += segs; 222 p->data_len += len; 223 p->truesize += delta_truesize; 224 p->len += len; 225 if (lp != p) { 226 lp->data_len += len; 227 lp->truesize += delta_truesize; 228 lp->len += len; 229 } 230 NAPI_GRO_CB(skb)->same_flow = 1; 231 return 0; 232 } 233 234 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) 235 { 236 if (unlikely(p->len + skb->len >= 65536)) 237 return -E2BIG; 238 239 if (NAPI_GRO_CB(p)->last == p) 240 skb_shinfo(p)->frag_list = skb; 241 else 242 NAPI_GRO_CB(p)->last->next = skb; 243 244 skb_pull(skb, skb_gro_offset(skb)); 245 246 NAPI_GRO_CB(p)->last = skb; 247 NAPI_GRO_CB(p)->count++; 248 p->data_len += skb->len; 249 250 /* sk ownership - if any - completely transferred to the aggregated packet */ 251 skb->destructor = NULL; 252 skb->sk = NULL; 253 p->truesize += skb->truesize; 254 p->len += skb->len; 255 256 NAPI_GRO_CB(skb)->same_flow = 1; 257 258 return 0; 259 } 260 261 262 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 263 { 264 struct list_head *head = &net_hotdata.offload_base; 265 struct packet_offload *ptype; 266 __be16 type = skb->protocol; 267 int err = -ENOENT; 268 269 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 270 271 if (NAPI_GRO_CB(skb)->count == 1) { 272 skb_shinfo(skb)->gso_size = 0; 273 goto out; 274 } 275 276 rcu_read_lock(); 277 list_for_each_entry_rcu(ptype, head, list) { 278 if (ptype->type != type || !ptype->callbacks.gro_complete) 279 continue; 280 281 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 282 ipv6_gro_complete, inet_gro_complete, 283 skb, 0); 284 break; 285 } 286 rcu_read_unlock(); 287 288 if (err) { 289 WARN_ON(&ptype->list == head); 290 kfree_skb(skb); 291 return; 292 } 293 294 out: 295 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); 296 } 297 298 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 299 bool flush_old) 300 { 301 struct list_head *head = &napi->gro_hash[index].list; 302 struct sk_buff *skb, *p; 303 304 list_for_each_entry_safe_reverse(skb, p, head, list) { 305 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 306 return; 307 skb_list_del_init(skb); 308 napi_gro_complete(napi, skb); 309 napi->gro_hash[index].count--; 310 } 311 312 if (!napi->gro_hash[index].count) 313 __clear_bit(index, &napi->gro_bitmask); 314 } 315 316 /* napi->gro_hash[].list contains packets ordered by age. 317 * youngest packets at the head of it. 318 * Complete skbs in reverse order to reduce latencies. 319 */ 320 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 321 { 322 unsigned long bitmask = napi->gro_bitmask; 323 unsigned int i, base = ~0U; 324 325 while ((i = ffs(bitmask)) != 0) { 326 bitmask >>= i; 327 base += i; 328 __napi_gro_flush_chain(napi, base, flush_old); 329 } 330 } 331 EXPORT_SYMBOL(napi_gro_flush); 332 333 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb, 334 const struct sk_buff *p, 335 unsigned long diffs) 336 { 337 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 338 struct tc_skb_ext *skb_ext; 339 struct tc_skb_ext *p_ext; 340 341 skb_ext = skb_ext_find(skb, TC_SKB_EXT); 342 p_ext = skb_ext_find(p, TC_SKB_EXT); 343 344 diffs |= (!!p_ext) ^ (!!skb_ext); 345 if (!diffs && unlikely(skb_ext)) 346 diffs |= p_ext->chain ^ skb_ext->chain; 347 #endif 348 return diffs; 349 } 350 351 static void gro_list_prepare(const struct list_head *head, 352 const struct sk_buff *skb) 353 { 354 unsigned int maclen = skb->dev->hard_header_len; 355 u32 hash = skb_get_hash_raw(skb); 356 struct sk_buff *p; 357 358 list_for_each_entry(p, head, list) { 359 unsigned long diffs; 360 361 NAPI_GRO_CB(p)->flush = 0; 362 363 if (hash != skb_get_hash_raw(p)) { 364 NAPI_GRO_CB(p)->same_flow = 0; 365 continue; 366 } 367 368 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 369 diffs |= p->vlan_all ^ skb->vlan_all; 370 diffs |= skb_metadata_differs(p, skb); 371 if (maclen == ETH_HLEN) 372 diffs |= compare_ether_header(skb_mac_header(p), 373 skb_mac_header(skb)); 374 else if (!diffs) 375 diffs = memcmp(skb_mac_header(p), 376 skb_mac_header(skb), 377 maclen); 378 379 /* in most common scenarions 'slow_gro' is 0 380 * otherwise we are already on some slower paths 381 * either skip all the infrequent tests altogether or 382 * avoid trying too hard to skip each of them individually 383 */ 384 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { 385 diffs |= p->sk != skb->sk; 386 diffs |= skb_metadata_dst_cmp(p, skb); 387 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); 388 389 diffs |= gro_list_prepare_tc_ext(skb, p, diffs); 390 } 391 392 NAPI_GRO_CB(p)->same_flow = !diffs; 393 } 394 } 395 396 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) 397 { 398 const struct skb_shared_info *pinfo; 399 const skb_frag_t *frag0; 400 unsigned int headlen; 401 402 NAPI_GRO_CB(skb)->network_offset = 0; 403 NAPI_GRO_CB(skb)->data_offset = 0; 404 headlen = skb_headlen(skb); 405 NAPI_GRO_CB(skb)->frag0 = skb->data; 406 NAPI_GRO_CB(skb)->frag0_len = headlen; 407 if (headlen) 408 return; 409 410 pinfo = skb_shinfo(skb); 411 frag0 = &pinfo->frags[0]; 412 413 if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) && 414 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { 415 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 416 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 417 skb_frag_size(frag0), 418 skb->end - skb->tail); 419 } 420 } 421 422 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 423 { 424 struct skb_shared_info *pinfo = skb_shinfo(skb); 425 426 BUG_ON(skb->end - skb->tail < grow); 427 428 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 429 430 skb->data_len -= grow; 431 skb->tail += grow; 432 433 skb_frag_off_add(&pinfo->frags[0], grow); 434 skb_frag_size_sub(&pinfo->frags[0], grow); 435 436 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 437 skb_frag_unref(skb, 0); 438 memmove(pinfo->frags, pinfo->frags + 1, 439 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 440 } 441 } 442 443 static void gro_try_pull_from_frag0(struct sk_buff *skb) 444 { 445 int grow = skb_gro_offset(skb) - skb_headlen(skb); 446 447 if (grow > 0) 448 gro_pull_from_frag0(skb, grow); 449 } 450 451 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 452 { 453 struct sk_buff *oldest; 454 455 oldest = list_last_entry(head, struct sk_buff, list); 456 457 /* We are called with head length >= MAX_GRO_SKBS, so this is 458 * impossible. 459 */ 460 if (WARN_ON_ONCE(!oldest)) 461 return; 462 463 /* Do not adjust napi->gro_hash[].count, caller is adding a new 464 * SKB to the chain. 465 */ 466 skb_list_del_init(oldest); 467 napi_gro_complete(napi, oldest); 468 } 469 470 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 471 { 472 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 473 struct gro_list *gro_list = &napi->gro_hash[bucket]; 474 struct list_head *head = &net_hotdata.offload_base; 475 struct packet_offload *ptype; 476 __be16 type = skb->protocol; 477 struct sk_buff *pp = NULL; 478 enum gro_result ret; 479 int same_flow; 480 481 if (netif_elide_gro(skb->dev)) 482 goto normal; 483 484 gro_list_prepare(&gro_list->list, skb); 485 486 rcu_read_lock(); 487 list_for_each_entry_rcu(ptype, head, list) { 488 if (ptype->type == type && ptype->callbacks.gro_receive) 489 goto found_ptype; 490 } 491 rcu_read_unlock(); 492 goto normal; 493 494 found_ptype: 495 skb_set_network_header(skb, skb_gro_offset(skb)); 496 skb_reset_mac_len(skb); 497 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32)); 498 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed), 499 sizeof(u32))); /* Avoid slow unaligned acc */ 500 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; 501 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); 502 NAPI_GRO_CB(skb)->is_atomic = 1; 503 NAPI_GRO_CB(skb)->count = 1; 504 if (unlikely(skb_is_gso(skb))) { 505 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; 506 /* Only support TCP and non DODGY users. */ 507 if (!skb_is_gso_tcp(skb) || 508 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) 509 NAPI_GRO_CB(skb)->flush = 1; 510 } 511 512 /* Setup for GRO checksum validation */ 513 switch (skb->ip_summed) { 514 case CHECKSUM_COMPLETE: 515 NAPI_GRO_CB(skb)->csum = skb->csum; 516 NAPI_GRO_CB(skb)->csum_valid = 1; 517 break; 518 case CHECKSUM_UNNECESSARY: 519 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 520 break; 521 } 522 523 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 524 ipv6_gro_receive, inet_gro_receive, 525 &gro_list->list, skb); 526 527 rcu_read_unlock(); 528 529 if (PTR_ERR(pp) == -EINPROGRESS) { 530 ret = GRO_CONSUMED; 531 goto ok; 532 } 533 534 same_flow = NAPI_GRO_CB(skb)->same_flow; 535 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 536 537 if (pp) { 538 skb_list_del_init(pp); 539 napi_gro_complete(napi, pp); 540 gro_list->count--; 541 } 542 543 if (same_flow) 544 goto ok; 545 546 if (NAPI_GRO_CB(skb)->flush) 547 goto normal; 548 549 if (unlikely(gro_list->count >= MAX_GRO_SKBS)) 550 gro_flush_oldest(napi, &gro_list->list); 551 else 552 gro_list->count++; 553 554 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */ 555 gro_try_pull_from_frag0(skb); 556 NAPI_GRO_CB(skb)->age = jiffies; 557 NAPI_GRO_CB(skb)->last = skb; 558 if (!skb_is_gso(skb)) 559 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 560 list_add(&skb->list, &gro_list->list); 561 ret = GRO_HELD; 562 ok: 563 if (gro_list->count) { 564 if (!test_bit(bucket, &napi->gro_bitmask)) 565 __set_bit(bucket, &napi->gro_bitmask); 566 } else if (test_bit(bucket, &napi->gro_bitmask)) { 567 __clear_bit(bucket, &napi->gro_bitmask); 568 } 569 570 return ret; 571 572 normal: 573 ret = GRO_NORMAL; 574 gro_try_pull_from_frag0(skb); 575 goto ok; 576 } 577 578 struct packet_offload *gro_find_receive_by_type(__be16 type) 579 { 580 struct list_head *offload_head = &net_hotdata.offload_base; 581 struct packet_offload *ptype; 582 583 list_for_each_entry_rcu(ptype, offload_head, list) { 584 if (ptype->type != type || !ptype->callbacks.gro_receive) 585 continue; 586 return ptype; 587 } 588 return NULL; 589 } 590 EXPORT_SYMBOL(gro_find_receive_by_type); 591 592 struct packet_offload *gro_find_complete_by_type(__be16 type) 593 { 594 struct list_head *offload_head = &net_hotdata.offload_base; 595 struct packet_offload *ptype; 596 597 list_for_each_entry_rcu(ptype, offload_head, list) { 598 if (ptype->type != type || !ptype->callbacks.gro_complete) 599 continue; 600 return ptype; 601 } 602 return NULL; 603 } 604 EXPORT_SYMBOL(gro_find_complete_by_type); 605 606 static gro_result_t napi_skb_finish(struct napi_struct *napi, 607 struct sk_buff *skb, 608 gro_result_t ret) 609 { 610 switch (ret) { 611 case GRO_NORMAL: 612 gro_normal_one(napi, skb, 1); 613 break; 614 615 case GRO_MERGED_FREE: 616 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 617 napi_skb_free_stolen_head(skb); 618 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 619 __kfree_skb(skb); 620 else 621 __napi_kfree_skb(skb, SKB_CONSUMED); 622 break; 623 624 case GRO_HELD: 625 case GRO_MERGED: 626 case GRO_CONSUMED: 627 break; 628 } 629 630 return ret; 631 } 632 633 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 634 { 635 gro_result_t ret; 636 637 skb_mark_napi_id(skb, napi); 638 trace_napi_gro_receive_entry(skb); 639 640 skb_gro_reset_offset(skb, 0); 641 642 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 643 trace_napi_gro_receive_exit(ret); 644 645 return ret; 646 } 647 EXPORT_SYMBOL(napi_gro_receive); 648 649 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 650 { 651 if (unlikely(skb->pfmemalloc)) { 652 consume_skb(skb); 653 return; 654 } 655 __skb_pull(skb, skb_headlen(skb)); 656 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 657 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 658 __vlan_hwaccel_clear_tag(skb); 659 skb->dev = napi->dev; 660 skb->skb_iif = 0; 661 662 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 663 skb->pkt_type = PACKET_HOST; 664 665 skb->encapsulation = 0; 666 skb_shinfo(skb)->gso_type = 0; 667 skb_shinfo(skb)->gso_size = 0; 668 if (unlikely(skb->slow_gro)) { 669 skb_orphan(skb); 670 skb_ext_reset(skb); 671 nf_reset_ct(skb); 672 skb->slow_gro = 0; 673 } 674 675 napi->skb = skb; 676 } 677 678 struct sk_buff *napi_get_frags(struct napi_struct *napi) 679 { 680 struct sk_buff *skb = napi->skb; 681 682 if (!skb) { 683 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 684 if (skb) { 685 napi->skb = skb; 686 skb_mark_napi_id(skb, napi); 687 } 688 } 689 return skb; 690 } 691 EXPORT_SYMBOL(napi_get_frags); 692 693 static gro_result_t napi_frags_finish(struct napi_struct *napi, 694 struct sk_buff *skb, 695 gro_result_t ret) 696 { 697 switch (ret) { 698 case GRO_NORMAL: 699 case GRO_HELD: 700 __skb_push(skb, ETH_HLEN); 701 skb->protocol = eth_type_trans(skb, skb->dev); 702 if (ret == GRO_NORMAL) 703 gro_normal_one(napi, skb, 1); 704 break; 705 706 case GRO_MERGED_FREE: 707 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 708 napi_skb_free_stolen_head(skb); 709 else 710 napi_reuse_skb(napi, skb); 711 break; 712 713 case GRO_MERGED: 714 case GRO_CONSUMED: 715 break; 716 } 717 718 return ret; 719 } 720 721 /* Upper GRO stack assumes network header starts at gro_offset=0 722 * Drivers could call both napi_gro_frags() and napi_gro_receive() 723 * We copy ethernet header into skb->data to have a common layout. 724 */ 725 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 726 { 727 struct sk_buff *skb = napi->skb; 728 const struct ethhdr *eth; 729 unsigned int hlen = sizeof(*eth); 730 731 napi->skb = NULL; 732 733 skb_reset_mac_header(skb); 734 skb_gro_reset_offset(skb, hlen); 735 736 if (unlikely(!skb_gro_may_pull(skb, hlen))) { 737 eth = skb_gro_header_slow(skb, hlen, 0); 738 if (unlikely(!eth)) { 739 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 740 __func__, napi->dev->name); 741 napi_reuse_skb(napi, skb); 742 return NULL; 743 } 744 } else { 745 eth = (const struct ethhdr *)skb->data; 746 747 if (NAPI_GRO_CB(skb)->frag0 != skb->data) 748 gro_pull_from_frag0(skb, hlen); 749 750 NAPI_GRO_CB(skb)->frag0 += hlen; 751 NAPI_GRO_CB(skb)->frag0_len -= hlen; 752 } 753 __skb_pull(skb, hlen); 754 755 /* 756 * This works because the only protocols we care about don't require 757 * special handling. 758 * We'll fix it up properly in napi_frags_finish() 759 */ 760 skb->protocol = eth->h_proto; 761 762 return skb; 763 } 764 765 gro_result_t napi_gro_frags(struct napi_struct *napi) 766 { 767 gro_result_t ret; 768 struct sk_buff *skb = napi_frags_skb(napi); 769 770 trace_napi_gro_frags_entry(skb); 771 772 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 773 trace_napi_gro_frags_exit(ret); 774 775 return ret; 776 } 777 EXPORT_SYMBOL(napi_gro_frags); 778 779 /* Compute the checksum from gro_offset and return the folded value 780 * after adding in any pseudo checksum. 781 */ 782 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 783 { 784 __wsum wsum; 785 __sum16 sum; 786 787 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 788 789 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 790 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 791 /* See comments in __skb_checksum_complete(). */ 792 if (likely(!sum)) { 793 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 794 !skb->csum_complete_sw) 795 netdev_rx_csum_fault(skb->dev, skb); 796 } 797 798 NAPI_GRO_CB(skb)->csum = wsum; 799 NAPI_GRO_CB(skb)->csum_valid = 1; 800 801 return sum; 802 } 803 EXPORT_SYMBOL(__skb_gro_checksum_complete); 804