Lines Matching full:skb
92 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
94 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
95 unsigned int offset = skb_gro_offset(skb);
96 unsigned int headlen = skb_headlen(skb);
97 unsigned int len = skb_gro_len(skb);
109 if (p->pp_recycle != skb->pp_recycle)
113 NAPI_GRO_CB(skb)->flush))
117 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
124 segs = NAPI_GRO_CB(skb)->count;
151 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
152 delta_truesize = skb->truesize - new_truesize;
154 skb->truesize = new_truesize;
155 skb->len -= skb->data_len;
156 skb->data_len = 0;
158 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
160 } else if (skb->head_frag) {
163 struct page *page = virt_to_head_page(skb->head);
170 first_offset = skb->data -
182 delta_truesize = skb->truesize - new_truesize;
183 skb->truesize = new_truesize;
184 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
190 skb->destructor = NULL;
191 skb->sk = NULL;
192 delta_truesize = skb->truesize;
198 skb->data_len -= eat;
199 skb->len -= eat;
203 __skb_pull(skb, offset);
206 skb_shinfo(p)->frag_list = skb;
208 NAPI_GRO_CB(p)->last->next = skb;
209 NAPI_GRO_CB(p)->last = skb;
210 __skb_header_release(skb);
223 NAPI_GRO_CB(skb)->same_flow = 1;
227 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
229 if (unlikely(p->len + skb->len >= 65536))
233 skb_shinfo(p)->frag_list = skb;
235 NAPI_GRO_CB(p)->last->next = skb;
237 skb_pull(skb, skb_gro_offset(skb));
239 NAPI_GRO_CB(p)->last = skb;
241 p->data_len += skb->len;
244 skb->destructor = NULL;
245 skb->sk = NULL;
246 p->truesize += skb->truesize;
247 p->len += skb->len;
249 NAPI_GRO_CB(skb)->same_flow = 1;
254 static void gro_complete(struct gro_node *gro, struct sk_buff *skb)
258 __be16 type = skb->protocol;
261 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
263 if (NAPI_GRO_CB(skb)->count == 1) {
264 skb_shinfo(skb)->gso_size = 0;
275 skb, 0);
282 kfree_skb(skb);
287 gro_normal_one(gro, skb, NAPI_GRO_CB(skb)->count);
293 struct sk_buff *skb, *p;
295 list_for_each_entry_safe_reverse(skb, p, head, list) {
296 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
298 skb_list_del_init(skb);
299 gro_complete(gro, skb);
325 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
333 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
344 const struct sk_buff *skb)
346 unsigned int maclen = skb->dev->hard_header_len;
347 u32 hash = skb_get_hash_raw(skb);
358 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
359 diffs |= p->vlan_all ^ skb->vlan_all;
360 diffs |= skb_metadata_differs(p, skb);
363 skb_mac_header(skb));
366 skb_mac_header(skb),
374 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
375 diffs |= p->sk != skb->sk;
376 diffs |= skb_metadata_dst_cmp(p, skb);
377 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
379 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
380 diffs |= __psp_skb_coalesce_diff(skb, p, diffs);
387 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
393 NAPI_GRO_CB(skb)->network_offset = 0;
394 NAPI_GRO_CB(skb)->data_offset = 0;
395 headlen = skb_headlen(skb);
396 NAPI_GRO_CB(skb)->frag0 = skb->data;
397 NAPI_GRO_CB(skb)->frag0_len = headlen;
401 pinfo = skb_shinfo(skb);
407 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
408 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
410 skb->end - skb->tail);
414 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
416 struct skb_shared_info *pinfo = skb_shinfo(skb);
418 BUG_ON(skb->end - skb->tail < grow);
420 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
422 skb->data_len -= grow;
423 skb->tail += grow;
429 skb_frag_unref(skb, 0);
435 static void gro_try_pull_from_frag0(struct sk_buff *skb)
437 int grow = skb_gro_offset(skb) - skb_headlen(skb);
440 gro_pull_from_frag0(skb, grow);
456 * SKB to the chain.
463 struct sk_buff *skb)
465 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
469 __be16 type = skb->protocol;
474 if (netif_elide_gro(skb->dev))
477 gro_list_prepare(&gro_list->list, skb);
488 skb_set_network_header(skb, skb_gro_offset(skb));
489 skb_reset_mac_len(skb);
493 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
494 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
495 NAPI_GRO_CB(skb)->count = 1;
496 if (unlikely(skb_is_gso(skb))) {
497 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
499 if (!skb_is_gso_tcp(skb) ||
500 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
501 NAPI_GRO_CB(skb)->flush = 1;
505 switch (skb->ip_summed) {
507 NAPI_GRO_CB(skb)->csum = skb->csum;
508 NAPI_GRO_CB(skb)->csum_valid = 1;
511 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
517 &gro_list->list, skb);
526 same_flow = NAPI_GRO_CB(skb)->same_flow;
527 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
538 if (NAPI_GRO_CB(skb)->flush)
546 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
547 gro_try_pull_from_frag0(skb);
548 NAPI_GRO_CB(skb)->age = jiffies;
549 NAPI_GRO_CB(skb)->last = skb;
550 if (!skb_is_gso(skb))
551 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
552 list_add(&skb->list, &gro_list->list);
566 gro_try_pull_from_frag0(skb);
598 static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb,
603 gro_normal_one(gro, skb, 1);
607 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
608 napi_skb_free_stolen_head(skb);
609 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
610 __kfree_skb(skb);
612 __napi_kfree_skb(skb, SKB_CONSUMED);
624 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb)
628 __skb_mark_napi_id(skb, gro);
629 trace_napi_gro_receive_entry(skb);
631 skb_gro_reset_offset(skb, 0);
633 ret = gro_skb_finish(gro, skb, dev_gro_receive(gro, skb));
640 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
642 if (unlikely(skb->pfmemalloc)) {
643 consume_skb(skb);
646 __skb_pull(skb, skb_headlen(skb));
648 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
649 __vlan_hwaccel_clear_tag(skb);
650 skb->dev = napi->dev;
651 skb->skb_iif = 0;
654 skb->pkt_type = PACKET_HOST;
656 skb->encapsulation = 0;
657 skb->ip_summed = CHECKSUM_NONE;
658 skb_shinfo(skb)->gso_type = 0;
659 skb_shinfo(skb)->gso_size = 0;
660 if (unlikely(skb->slow_gro)) {
661 skb_orphan(skb);
662 skb_ext_reset(skb);
663 nf_reset_ct(skb);
664 skb->slow_gro = 0;
667 napi->skb = skb;
672 struct sk_buff *skb = napi->skb;
674 if (!skb) {
675 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
676 if (skb) {
677 napi->skb = skb;
678 skb_mark_napi_id(skb, napi);
681 return skb;
686 struct sk_buff *skb,
692 __skb_push(skb, ETH_HLEN);
693 skb->protocol = eth_type_trans(skb, skb->dev);
695 gro_normal_one(&napi->gro, skb, 1);
699 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
700 napi_skb_free_stolen_head(skb);
702 napi_reuse_skb(napi, skb);
715 * We copy ethernet header into skb->data to have a common layout.
719 struct sk_buff *skb = napi->skb;
723 napi->skb = NULL;
725 skb_reset_mac_header(skb);
726 skb_gro_reset_offset(skb, hlen);
728 if (unlikely(!skb_gro_may_pull(skb, hlen))) {
729 eth = skb_gro_header_slow(skb, hlen, 0);
731 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
733 napi_reuse_skb(napi, skb);
737 eth = (const struct ethhdr *)skb->data;
739 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
740 gro_pull_from_frag0(skb, hlen);
742 NAPI_GRO_CB(skb)->frag0 += hlen;
743 NAPI_GRO_CB(skb)->frag0_len -= hlen;
745 __skb_pull(skb, hlen);
752 skb->protocol = eth->h_proto;
754 return skb;
760 struct sk_buff *skb = napi_frags_skb(napi);
762 trace_napi_gro_frags_entry(skb);
764 ret = napi_frags_finish(napi, skb, dev_gro_receive(&napi->gro, skb));
774 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
779 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
781 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
782 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
785 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
786 !skb->csum_complete_sw)
787 netdev_rx_csum_fault(skb->dev, skb);
790 NAPI_GRO_CB(skb)->csum = wsum;
791 NAPI_GRO_CB(skb)->csum_valid = 1;
813 struct sk_buff *skb, *n;
816 list_for_each_entry_safe(skb, n, &gro->hash[i].list, list)
817 kfree_skb(skb);
825 list_for_each_entry_safe(skb, n, &gro->rx_list, list)
826 kfree_skb(skb);