1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #include <net/gro.h> 3 #include <net/dst_metadata.h> 4 #include <net/busy_poll.h> 5 #include <trace/events/net.h> 6 7 #define MAX_GRO_SKBS 8 8 9 /* This should be increased if a protocol with a bigger head is added. */ 10 #define GRO_MAX_HEAD (MAX_HEADER + 128) 11 12 static DEFINE_SPINLOCK(offload_lock); 13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); 14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 15 int gro_normal_batch __read_mostly = 8; 16 17 /** 18 * dev_add_offload - register offload handlers 19 * @po: protocol offload declaration 20 * 21 * Add protocol offload handlers to the networking stack. The passed 22 * &proto_offload is linked into kernel lists and may not be freed until 23 * it has been removed from the kernel lists. 24 * 25 * This call does not sleep therefore it can not 26 * guarantee all CPU's that are in middle of receiving packets 27 * will see the new offload handlers (until the next received packet). 28 */ 29 void dev_add_offload(struct packet_offload *po) 30 { 31 struct packet_offload *elem; 32 33 spin_lock(&offload_lock); 34 list_for_each_entry(elem, &offload_base, list) { 35 if (po->priority < elem->priority) 36 break; 37 } 38 list_add_rcu(&po->list, elem->list.prev); 39 spin_unlock(&offload_lock); 40 } 41 EXPORT_SYMBOL(dev_add_offload); 42 43 /** 44 * __dev_remove_offload - remove offload handler 45 * @po: packet offload declaration 46 * 47 * Remove a protocol offload handler that was previously added to the 48 * kernel offload handlers by dev_add_offload(). The passed &offload_type 49 * is removed from the kernel lists and can be freed or reused once this 50 * function returns. 51 * 52 * The packet type might still be in use by receivers 53 * and must not be freed until after all the CPU's have gone 54 * through a quiescent state. 55 */ 56 static void __dev_remove_offload(struct packet_offload *po) 57 { 58 struct list_head *head = &offload_base; 59 struct packet_offload *po1; 60 61 spin_lock(&offload_lock); 62 63 list_for_each_entry(po1, head, list) { 64 if (po == po1) { 65 list_del_rcu(&po->list); 66 goto out; 67 } 68 } 69 70 pr_warn("dev_remove_offload: %p not found\n", po); 71 out: 72 spin_unlock(&offload_lock); 73 } 74 75 /** 76 * dev_remove_offload - remove packet offload handler 77 * @po: packet offload declaration 78 * 79 * Remove a packet offload handler that was previously added to the kernel 80 * offload handlers by dev_add_offload(). The passed &offload_type is 81 * removed from the kernel lists and can be freed or reused once this 82 * function returns. 83 * 84 * This call sleeps to guarantee that no CPU is looking at the packet 85 * type after return. 86 */ 87 void dev_remove_offload(struct packet_offload *po) 88 { 89 __dev_remove_offload(po); 90 91 synchronize_net(); 92 } 93 EXPORT_SYMBOL(dev_remove_offload); 94 95 /** 96 * skb_eth_gso_segment - segmentation handler for ethernet protocols. 97 * @skb: buffer to segment 98 * @features: features for the output path (see dev->features) 99 * @type: Ethernet Protocol ID 100 */ 101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, 102 netdev_features_t features, __be16 type) 103 { 104 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 105 struct packet_offload *ptype; 106 107 rcu_read_lock(); 108 list_for_each_entry_rcu(ptype, &offload_base, list) { 109 if (ptype->type == type && ptype->callbacks.gso_segment) { 110 segs = ptype->callbacks.gso_segment(skb, features); 111 break; 112 } 113 } 114 rcu_read_unlock(); 115 116 return segs; 117 } 118 EXPORT_SYMBOL(skb_eth_gso_segment); 119 120 /** 121 * skb_mac_gso_segment - mac layer segmentation handler. 122 * @skb: buffer to segment 123 * @features: features for the output path (see dev->features) 124 */ 125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 126 netdev_features_t features) 127 { 128 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 129 struct packet_offload *ptype; 130 int vlan_depth = skb->mac_len; 131 __be16 type = skb_network_protocol(skb, &vlan_depth); 132 133 if (unlikely(!type)) 134 return ERR_PTR(-EINVAL); 135 136 __skb_pull(skb, vlan_depth); 137 138 rcu_read_lock(); 139 list_for_each_entry_rcu(ptype, &offload_base, list) { 140 if (ptype->type == type && ptype->callbacks.gso_segment) { 141 segs = ptype->callbacks.gso_segment(skb, features); 142 break; 143 } 144 } 145 rcu_read_unlock(); 146 147 __skb_push(skb, skb->data - skb_mac_header(skb)); 148 149 return segs; 150 } 151 EXPORT_SYMBOL(skb_mac_gso_segment); 152 153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 154 { 155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 156 unsigned int offset = skb_gro_offset(skb); 157 unsigned int headlen = skb_headlen(skb); 158 unsigned int len = skb_gro_len(skb); 159 unsigned int delta_truesize; 160 unsigned int gro_max_size; 161 unsigned int new_truesize; 162 struct sk_buff *lp; 163 int segs; 164 165 /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */ 166 gro_max_size = READ_ONCE(p->dev->gro_max_size); 167 168 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) 169 return -E2BIG; 170 171 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { 172 if (p->protocol != htons(ETH_P_IPV6) || 173 skb_headroom(p) < sizeof(struct hop_jumbo_hdr) || 174 ipv6_hdr(p)->nexthdr != IPPROTO_TCP || 175 p->encapsulation) 176 return -E2BIG; 177 } 178 179 segs = NAPI_GRO_CB(skb)->count; 180 lp = NAPI_GRO_CB(p)->last; 181 pinfo = skb_shinfo(lp); 182 183 if (headlen <= offset) { 184 skb_frag_t *frag; 185 skb_frag_t *frag2; 186 int i = skbinfo->nr_frags; 187 int nr_frags = pinfo->nr_frags + i; 188 189 if (nr_frags > MAX_SKB_FRAGS) 190 goto merge; 191 192 offset -= headlen; 193 pinfo->nr_frags = nr_frags; 194 skbinfo->nr_frags = 0; 195 196 frag = pinfo->frags + nr_frags; 197 frag2 = skbinfo->frags + i; 198 do { 199 *--frag = *--frag2; 200 } while (--i); 201 202 skb_frag_off_add(frag, offset); 203 skb_frag_size_sub(frag, offset); 204 205 /* all fragments truesize : remove (head size + sk_buff) */ 206 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); 207 delta_truesize = skb->truesize - new_truesize; 208 209 skb->truesize = new_truesize; 210 skb->len -= skb->data_len; 211 skb->data_len = 0; 212 213 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 214 goto done; 215 } else if (skb->head_frag) { 216 int nr_frags = pinfo->nr_frags; 217 skb_frag_t *frag = pinfo->frags + nr_frags; 218 struct page *page = virt_to_head_page(skb->head); 219 unsigned int first_size = headlen - offset; 220 unsigned int first_offset; 221 222 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 223 goto merge; 224 225 first_offset = skb->data - 226 (unsigned char *)page_address(page) + 227 offset; 228 229 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 230 231 __skb_frag_set_page(frag, page); 232 skb_frag_off_set(frag, first_offset); 233 skb_frag_size_set(frag, first_size); 234 235 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 236 /* We dont need to clear skbinfo->nr_frags here */ 237 238 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); 239 delta_truesize = skb->truesize - new_truesize; 240 skb->truesize = new_truesize; 241 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 242 goto done; 243 } 244 245 merge: 246 /* sk owenrship - if any - completely transferred to the aggregated packet */ 247 skb->destructor = NULL; 248 delta_truesize = skb->truesize; 249 if (offset > headlen) { 250 unsigned int eat = offset - headlen; 251 252 skb_frag_off_add(&skbinfo->frags[0], eat); 253 skb_frag_size_sub(&skbinfo->frags[0], eat); 254 skb->data_len -= eat; 255 skb->len -= eat; 256 offset = headlen; 257 } 258 259 __skb_pull(skb, offset); 260 261 if (NAPI_GRO_CB(p)->last == p) 262 skb_shinfo(p)->frag_list = skb; 263 else 264 NAPI_GRO_CB(p)->last->next = skb; 265 NAPI_GRO_CB(p)->last = skb; 266 __skb_header_release(skb); 267 lp = p; 268 269 done: 270 NAPI_GRO_CB(p)->count += segs; 271 p->data_len += len; 272 p->truesize += delta_truesize; 273 p->len += len; 274 if (lp != p) { 275 lp->data_len += len; 276 lp->truesize += delta_truesize; 277 lp->len += len; 278 } 279 NAPI_GRO_CB(skb)->same_flow = 1; 280 return 0; 281 } 282 283 284 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 285 { 286 struct packet_offload *ptype; 287 __be16 type = skb->protocol; 288 struct list_head *head = &offload_base; 289 int err = -ENOENT; 290 291 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 292 293 if (NAPI_GRO_CB(skb)->count == 1) { 294 skb_shinfo(skb)->gso_size = 0; 295 goto out; 296 } 297 298 rcu_read_lock(); 299 list_for_each_entry_rcu(ptype, head, list) { 300 if (ptype->type != type || !ptype->callbacks.gro_complete) 301 continue; 302 303 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 304 ipv6_gro_complete, inet_gro_complete, 305 skb, 0); 306 break; 307 } 308 rcu_read_unlock(); 309 310 if (err) { 311 WARN_ON(&ptype->list == head); 312 kfree_skb(skb); 313 return; 314 } 315 316 out: 317 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); 318 } 319 320 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 321 bool flush_old) 322 { 323 struct list_head *head = &napi->gro_hash[index].list; 324 struct sk_buff *skb, *p; 325 326 list_for_each_entry_safe_reverse(skb, p, head, list) { 327 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 328 return; 329 skb_list_del_init(skb); 330 napi_gro_complete(napi, skb); 331 napi->gro_hash[index].count--; 332 } 333 334 if (!napi->gro_hash[index].count) 335 __clear_bit(index, &napi->gro_bitmask); 336 } 337 338 /* napi->gro_hash[].list contains packets ordered by age. 339 * youngest packets at the head of it. 340 * Complete skbs in reverse order to reduce latencies. 341 */ 342 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 343 { 344 unsigned long bitmask = napi->gro_bitmask; 345 unsigned int i, base = ~0U; 346 347 while ((i = ffs(bitmask)) != 0) { 348 bitmask >>= i; 349 base += i; 350 __napi_gro_flush_chain(napi, base, flush_old); 351 } 352 } 353 EXPORT_SYMBOL(napi_gro_flush); 354 355 static void gro_list_prepare(const struct list_head *head, 356 const struct sk_buff *skb) 357 { 358 unsigned int maclen = skb->dev->hard_header_len; 359 u32 hash = skb_get_hash_raw(skb); 360 struct sk_buff *p; 361 362 list_for_each_entry(p, head, list) { 363 unsigned long diffs; 364 365 NAPI_GRO_CB(p)->flush = 0; 366 367 if (hash != skb_get_hash_raw(p)) { 368 NAPI_GRO_CB(p)->same_flow = 0; 369 continue; 370 } 371 372 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 373 diffs |= p->vlan_all ^ skb->vlan_all; 374 diffs |= skb_metadata_differs(p, skb); 375 if (maclen == ETH_HLEN) 376 diffs |= compare_ether_header(skb_mac_header(p), 377 skb_mac_header(skb)); 378 else if (!diffs) 379 diffs = memcmp(skb_mac_header(p), 380 skb_mac_header(skb), 381 maclen); 382 383 /* in most common scenarions 'slow_gro' is 0 384 * otherwise we are already on some slower paths 385 * either skip all the infrequent tests altogether or 386 * avoid trying too hard to skip each of them individually 387 */ 388 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { 389 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 390 struct tc_skb_ext *skb_ext; 391 struct tc_skb_ext *p_ext; 392 #endif 393 394 diffs |= p->sk != skb->sk; 395 diffs |= skb_metadata_dst_cmp(p, skb); 396 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); 397 398 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 399 skb_ext = skb_ext_find(skb, TC_SKB_EXT); 400 p_ext = skb_ext_find(p, TC_SKB_EXT); 401 402 diffs |= (!!p_ext) ^ (!!skb_ext); 403 if (!diffs && unlikely(skb_ext)) 404 diffs |= p_ext->chain ^ skb_ext->chain; 405 #endif 406 } 407 408 NAPI_GRO_CB(p)->same_flow = !diffs; 409 } 410 } 411 412 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) 413 { 414 const struct skb_shared_info *pinfo = skb_shinfo(skb); 415 const skb_frag_t *frag0 = &pinfo->frags[0]; 416 417 NAPI_GRO_CB(skb)->data_offset = 0; 418 NAPI_GRO_CB(skb)->frag0 = NULL; 419 NAPI_GRO_CB(skb)->frag0_len = 0; 420 421 if (!skb_headlen(skb) && pinfo->nr_frags && 422 !PageHighMem(skb_frag_page(frag0)) && 423 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { 424 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 425 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 426 skb_frag_size(frag0), 427 skb->end - skb->tail); 428 } 429 } 430 431 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 432 { 433 struct skb_shared_info *pinfo = skb_shinfo(skb); 434 435 BUG_ON(skb->end - skb->tail < grow); 436 437 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 438 439 skb->data_len -= grow; 440 skb->tail += grow; 441 442 skb_frag_off_add(&pinfo->frags[0], grow); 443 skb_frag_size_sub(&pinfo->frags[0], grow); 444 445 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 446 skb_frag_unref(skb, 0); 447 memmove(pinfo->frags, pinfo->frags + 1, 448 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 449 } 450 } 451 452 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 453 { 454 struct sk_buff *oldest; 455 456 oldest = list_last_entry(head, struct sk_buff, list); 457 458 /* We are called with head length >= MAX_GRO_SKBS, so this is 459 * impossible. 460 */ 461 if (WARN_ON_ONCE(!oldest)) 462 return; 463 464 /* Do not adjust napi->gro_hash[].count, caller is adding a new 465 * SKB to the chain. 466 */ 467 skb_list_del_init(oldest); 468 napi_gro_complete(napi, oldest); 469 } 470 471 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 472 { 473 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 474 struct gro_list *gro_list = &napi->gro_hash[bucket]; 475 struct list_head *head = &offload_base; 476 struct packet_offload *ptype; 477 __be16 type = skb->protocol; 478 struct sk_buff *pp = NULL; 479 enum gro_result ret; 480 int same_flow; 481 int grow; 482 483 if (netif_elide_gro(skb->dev)) 484 goto normal; 485 486 gro_list_prepare(&gro_list->list, skb); 487 488 rcu_read_lock(); 489 list_for_each_entry_rcu(ptype, head, list) { 490 if (ptype->type == type && ptype->callbacks.gro_receive) 491 goto found_ptype; 492 } 493 rcu_read_unlock(); 494 goto normal; 495 496 found_ptype: 497 skb_set_network_header(skb, skb_gro_offset(skb)); 498 skb_reset_mac_len(skb); 499 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32)); 500 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed), 501 sizeof(u32))); /* Avoid slow unaligned acc */ 502 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; 503 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); 504 NAPI_GRO_CB(skb)->is_atomic = 1; 505 NAPI_GRO_CB(skb)->count = 1; 506 if (unlikely(skb_is_gso(skb))) { 507 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; 508 /* Only support TCP at the moment. */ 509 if (!skb_is_gso_tcp(skb)) 510 NAPI_GRO_CB(skb)->flush = 1; 511 } 512 513 /* Setup for GRO checksum validation */ 514 switch (skb->ip_summed) { 515 case CHECKSUM_COMPLETE: 516 NAPI_GRO_CB(skb)->csum = skb->csum; 517 NAPI_GRO_CB(skb)->csum_valid = 1; 518 break; 519 case CHECKSUM_UNNECESSARY: 520 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 521 break; 522 } 523 524 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 525 ipv6_gro_receive, inet_gro_receive, 526 &gro_list->list, skb); 527 528 rcu_read_unlock(); 529 530 if (PTR_ERR(pp) == -EINPROGRESS) { 531 ret = GRO_CONSUMED; 532 goto ok; 533 } 534 535 same_flow = NAPI_GRO_CB(skb)->same_flow; 536 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 537 538 if (pp) { 539 skb_list_del_init(pp); 540 napi_gro_complete(napi, pp); 541 gro_list->count--; 542 } 543 544 if (same_flow) 545 goto ok; 546 547 if (NAPI_GRO_CB(skb)->flush) 548 goto normal; 549 550 if (unlikely(gro_list->count >= MAX_GRO_SKBS)) 551 gro_flush_oldest(napi, &gro_list->list); 552 else 553 gro_list->count++; 554 555 NAPI_GRO_CB(skb)->age = jiffies; 556 NAPI_GRO_CB(skb)->last = skb; 557 if (!skb_is_gso(skb)) 558 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 559 list_add(&skb->list, &gro_list->list); 560 ret = GRO_HELD; 561 562 pull: 563 grow = skb_gro_offset(skb) - skb_headlen(skb); 564 if (grow > 0) 565 gro_pull_from_frag0(skb, grow); 566 ok: 567 if (gro_list->count) { 568 if (!test_bit(bucket, &napi->gro_bitmask)) 569 __set_bit(bucket, &napi->gro_bitmask); 570 } else if (test_bit(bucket, &napi->gro_bitmask)) { 571 __clear_bit(bucket, &napi->gro_bitmask); 572 } 573 574 return ret; 575 576 normal: 577 ret = GRO_NORMAL; 578 goto pull; 579 } 580 581 struct packet_offload *gro_find_receive_by_type(__be16 type) 582 { 583 struct list_head *offload_head = &offload_base; 584 struct packet_offload *ptype; 585 586 list_for_each_entry_rcu(ptype, offload_head, list) { 587 if (ptype->type != type || !ptype->callbacks.gro_receive) 588 continue; 589 return ptype; 590 } 591 return NULL; 592 } 593 EXPORT_SYMBOL(gro_find_receive_by_type); 594 595 struct packet_offload *gro_find_complete_by_type(__be16 type) 596 { 597 struct list_head *offload_head = &offload_base; 598 struct packet_offload *ptype; 599 600 list_for_each_entry_rcu(ptype, offload_head, list) { 601 if (ptype->type != type || !ptype->callbacks.gro_complete) 602 continue; 603 return ptype; 604 } 605 return NULL; 606 } 607 EXPORT_SYMBOL(gro_find_complete_by_type); 608 609 static gro_result_t napi_skb_finish(struct napi_struct *napi, 610 struct sk_buff *skb, 611 gro_result_t ret) 612 { 613 switch (ret) { 614 case GRO_NORMAL: 615 gro_normal_one(napi, skb, 1); 616 break; 617 618 case GRO_MERGED_FREE: 619 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 620 napi_skb_free_stolen_head(skb); 621 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 622 __kfree_skb(skb); 623 else 624 __kfree_skb_defer(skb); 625 break; 626 627 case GRO_HELD: 628 case GRO_MERGED: 629 case GRO_CONSUMED: 630 break; 631 } 632 633 return ret; 634 } 635 636 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 637 { 638 gro_result_t ret; 639 640 skb_mark_napi_id(skb, napi); 641 trace_napi_gro_receive_entry(skb); 642 643 skb_gro_reset_offset(skb, 0); 644 645 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 646 trace_napi_gro_receive_exit(ret); 647 648 return ret; 649 } 650 EXPORT_SYMBOL(napi_gro_receive); 651 652 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 653 { 654 if (unlikely(skb->pfmemalloc)) { 655 consume_skb(skb); 656 return; 657 } 658 __skb_pull(skb, skb_headlen(skb)); 659 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 660 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 661 __vlan_hwaccel_clear_tag(skb); 662 skb->dev = napi->dev; 663 skb->skb_iif = 0; 664 665 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 666 skb->pkt_type = PACKET_HOST; 667 668 skb->encapsulation = 0; 669 skb_shinfo(skb)->gso_type = 0; 670 skb_shinfo(skb)->gso_size = 0; 671 if (unlikely(skb->slow_gro)) { 672 skb_orphan(skb); 673 skb_ext_reset(skb); 674 nf_reset_ct(skb); 675 skb->slow_gro = 0; 676 } 677 678 napi->skb = skb; 679 } 680 681 struct sk_buff *napi_get_frags(struct napi_struct *napi) 682 { 683 struct sk_buff *skb = napi->skb; 684 685 if (!skb) { 686 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 687 if (skb) { 688 napi->skb = skb; 689 skb_mark_napi_id(skb, napi); 690 } 691 } 692 return skb; 693 } 694 EXPORT_SYMBOL(napi_get_frags); 695 696 static gro_result_t napi_frags_finish(struct napi_struct *napi, 697 struct sk_buff *skb, 698 gro_result_t ret) 699 { 700 switch (ret) { 701 case GRO_NORMAL: 702 case GRO_HELD: 703 __skb_push(skb, ETH_HLEN); 704 skb->protocol = eth_type_trans(skb, skb->dev); 705 if (ret == GRO_NORMAL) 706 gro_normal_one(napi, skb, 1); 707 break; 708 709 case GRO_MERGED_FREE: 710 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 711 napi_skb_free_stolen_head(skb); 712 else 713 napi_reuse_skb(napi, skb); 714 break; 715 716 case GRO_MERGED: 717 case GRO_CONSUMED: 718 break; 719 } 720 721 return ret; 722 } 723 724 /* Upper GRO stack assumes network header starts at gro_offset=0 725 * Drivers could call both napi_gro_frags() and napi_gro_receive() 726 * We copy ethernet header into skb->data to have a common layout. 727 */ 728 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 729 { 730 struct sk_buff *skb = napi->skb; 731 const struct ethhdr *eth; 732 unsigned int hlen = sizeof(*eth); 733 734 napi->skb = NULL; 735 736 skb_reset_mac_header(skb); 737 skb_gro_reset_offset(skb, hlen); 738 739 if (unlikely(skb_gro_header_hard(skb, hlen))) { 740 eth = skb_gro_header_slow(skb, hlen, 0); 741 if (unlikely(!eth)) { 742 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 743 __func__, napi->dev->name); 744 napi_reuse_skb(napi, skb); 745 return NULL; 746 } 747 } else { 748 eth = (const struct ethhdr *)skb->data; 749 gro_pull_from_frag0(skb, hlen); 750 NAPI_GRO_CB(skb)->frag0 += hlen; 751 NAPI_GRO_CB(skb)->frag0_len -= hlen; 752 } 753 __skb_pull(skb, hlen); 754 755 /* 756 * This works because the only protocols we care about don't require 757 * special handling. 758 * We'll fix it up properly in napi_frags_finish() 759 */ 760 skb->protocol = eth->h_proto; 761 762 return skb; 763 } 764 765 gro_result_t napi_gro_frags(struct napi_struct *napi) 766 { 767 gro_result_t ret; 768 struct sk_buff *skb = napi_frags_skb(napi); 769 770 trace_napi_gro_frags_entry(skb); 771 772 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 773 trace_napi_gro_frags_exit(ret); 774 775 return ret; 776 } 777 EXPORT_SYMBOL(napi_gro_frags); 778 779 /* Compute the checksum from gro_offset and return the folded value 780 * after adding in any pseudo checksum. 781 */ 782 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 783 { 784 __wsum wsum; 785 __sum16 sum; 786 787 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 788 789 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 790 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 791 /* See comments in __skb_checksum_complete(). */ 792 if (likely(!sum)) { 793 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 794 !skb->csum_complete_sw) 795 netdev_rx_csum_fault(skb->dev, skb); 796 } 797 798 NAPI_GRO_CB(skb)->csum = wsum; 799 NAPI_GRO_CB(skb)->csum_valid = 1; 800 801 return sum; 802 } 803 EXPORT_SYMBOL(__skb_gro_checksum_complete); 804