| /linux/net/tls/ |
| H A D | tls_strp.c | 32 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_anchor_free() local 34 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); in tls_strp_anchor_free() 36 shinfo->frag_list = NULL; in tls_strp_anchor_free() 147 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_msg_hold() local 152 WARN_ON_ONCE(!shinfo->nr_frags); in tls_strp_msg_hold() 167 iter = shinfo->frag_list; in tls_strp_msg_hold() 194 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_flush_anchor_copy() local 197 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); in tls_strp_flush_anchor_copy() 199 for (i = 0; i < shinfo->nr_frags; i++) in tls_strp_flush_anchor_copy() 200 __skb_frag_unref(&shinfo->frags[i], false); in tls_strp_flush_anchor_copy() [all …]
|
| /linux/net/core/ |
| H A D | gso.c | 145 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen() local 152 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) in skb_gso_transport_seglen() 154 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { in skb_gso_transport_seglen() 158 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { in skb_gso_transport_seglen() 165 return thlen + shinfo->gso_size; in skb_gso_transport_seglen() 226 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check() local 229 if (shinfo->gso_size != GSO_BY_FRAGS) in skb_gso_size_check()
|
| H A D | gro.c | 642 struct skb_shared_info *shinfo; in napi_reuse_skb() local 661 shinfo = skb_shinfo(skb); in napi_reuse_skb() 662 shinfo->gso_type = 0; in napi_reuse_skb() 663 shinfo->gso_size = 0; in napi_reuse_skb() 664 shinfo->hwtstamps.hwtstamp = 0; in napi_reuse_skb()
|
| H A D | skbuff.c | 373 struct skb_shared_info *shinfo; in __finalize_skb_around() local 388 shinfo = skb_shinfo(skb); in __finalize_skb_around() 389 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __finalize_skb_around() 390 atomic_set(&shinfo->dataref, 1); in __finalize_skb_around() 1044 struct skb_shared_info *shinfo; in skb_pp_frag_ref() local 1051 shinfo = skb_shinfo(skb); in skb_pp_frag_ref() 1053 for (i = 0; i < shinfo->nr_frags; i++) { in skb_pp_frag_ref() 1054 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); in skb_pp_frag_ref() 1086 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() local 1089 if (!skb_data_unref(skb, shinfo)) in skb_release_data() [all …]
|
| H A D | filter.c | 3349 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_proto_4_to_6() local 3352 if (shinfo->gso_type & SKB_GSO_TCPV4) { in bpf_skb_proto_4_to_6() 3353 shinfo->gso_type &= ~SKB_GSO_TCPV4; in bpf_skb_proto_4_to_6() 3354 shinfo->gso_type |= SKB_GSO_TCPV6; in bpf_skb_proto_4_to_6() 3379 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_proto_6_to_4() local 3382 if (shinfo->gso_type & SKB_GSO_TCPV6) { in bpf_skb_proto_6_to_4() 3383 shinfo->gso_type &= ~SKB_GSO_TCPV6; in bpf_skb_proto_6_to_4() 3384 shinfo->gso_type |= SKB_GSO_TCPV4; in bpf_skb_proto_6_to_4() 3587 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_net_grow() local 3590 shinfo->gso_type |= gso_type; in bpf_skb_net_grow() [all …]
|
| H A D | dev.c | 3959 struct skb_shared_info *shinfo; in validate_xmit_unreadable_skb() local 3968 shinfo = skb_shinfo(skb); in validate_xmit_unreadable_skb() 3970 if (shinfo->nr_frags > 0) { in validate_xmit_unreadable_skb() 3971 niov = netmem_to_net_iov(skb_frag_netmem(&shinfo->frags[0])); in validate_xmit_unreadable_skb() 4074 struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_segs_init() local 4078 if (!shinfo->gso_size) { in qdisc_pkt_len_segs_init() 4083 qdisc_skb_cb(skb)->pkt_segs = gso_segs = shinfo->gso_segs; in qdisc_pkt_len_segs_init() 4098 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { in qdisc_pkt_len_segs_init() 4106 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { in qdisc_pkt_len_segs_init() 4114 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { in qdisc_pkt_len_segs_init() [all …]
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | xen_shinfo_test.c | 381 static struct shared_info *shinfo; variable 409 .u.shared_info.hva = (unsigned long)shinfo in juggle_shinfo_state() 461 shinfo = addr_gpa2hva(vm, SHINFO_VADDR); in main() 501 ha.u.shared_info.hva = (unsigned long)shinfo; in main() 514 struct pvclock_wall_clock wc_copy = shinfo->wc; in main() 515 void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0); in main() 516 TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info"); in main() 517 shinfo->wc = wc_copy; in main() 698 shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1; in main() 707 shinfo->evtchn_pending[0] = 0; in main() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | en_tx.c | 603 const struct skb_shared_info *shinfo, in is_inline() argument 611 if (shinfo->nr_frags == 1) { in is_inline() 612 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline() 618 if (shinfo->nr_frags) in is_inline() 635 const struct skb_shared_info *shinfo, in get_real_size() argument 645 if (shinfo->gso_size) { in get_real_size() 659 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size() 675 shinfo, pfrag); in get_real_size() 681 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size() 689 const struct skb_shared_info *shinfo, in build_inline_wqe() argument [all …]
|
| /linux/drivers/net/xen-netback/ |
| H A D | netback.c | 387 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local 388 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests() 396 nr_slots = shinfo->nr_frags + frag_overflow + 1; in xenvif_get_requests() 462 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; in xenvif_get_requests() 474 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests() 475 ++shinfo->nr_frags; in xenvif_get_requests() 486 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 487 frags = shinfo->frags; in xenvif_get_requests() 489 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { in xenvif_get_requests() 500 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests() [all …]
|
| /linux/drivers/net/ethernet/fungible/funeth/ |
| H A D | funeth_tx.c | 154 const struct skb_shared_info *shinfo; in write_pkt_desc() local 164 shinfo = skb_shinfo(skb); in write_pkt_desc() 165 if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, in write_pkt_desc() 179 if (likely(shinfo->gso_size)) { in write_pkt_desc() 186 if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL | in write_pkt_desc() 190 if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in write_pkt_desc() 213 shinfo->gso_size, in write_pkt_desc() 219 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { in write_pkt_desc() 232 shinfo->gso_size, in write_pkt_desc() 242 if (shinfo->gso_type & SKB_GSO_TCPV6) in write_pkt_desc() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | net_timestamping.c | 121 struct skb_shared_info *shinfo; in bpf_test_delay() local 134 shinfo = bpf_core_cast(skb->head + skb->end, struct skb_shared_info); in bpf_test_delay() 146 key.tskey = shinfo->tskey; in bpf_test_delay() 153 key.tskey = shinfo->tskey; in bpf_test_delay()
|
| /linux/include/linux/ |
| H A D | skbuff_ref.h | 68 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_frag_unref() local 71 __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); in skb_frag_unref()
|
| H A D | virtio_net.h | 160 struct skb_shared_info *shinfo = skb_shinfo(skb); in __virtio_net_hdr_to_skb() local 191 shinfo->gso_size = gso_size; in __virtio_net_hdr_to_skb() 192 shinfo->gso_type = gso_type; in __virtio_net_hdr_to_skb() 195 shinfo->gso_type |= SKB_GSO_DODGY; in __virtio_net_hdr_to_skb() 196 shinfo->gso_segs = 0; in __virtio_net_hdr_to_skb()
|
| H A D | skbuff.h | 1299 struct skb_shared_info *shinfo) in skb_data_unref() argument 1308 if (atomic_read(&shinfo->dataref) == bias) in skb_data_unref() 1310 else if (atomic_sub_return(bias, &shinfo->dataref)) in skb_data_unref() 2566 static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo, in __skb_fill_netmem_desc_noacc() argument 2570 skb_frag_t *frag = &shinfo->frags[i]; in __skb_fill_netmem_desc_noacc() 2575 static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, in __skb_fill_page_desc_noacc() argument 2579 __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off, in __skb_fill_page_desc_noacc() 2679 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_fill_page_desc_noacc() local 2681 __skb_fill_page_desc_noacc(shinfo, i, page, off, size); in skb_fill_page_desc_noacc() 2682 shinfo->nr_frags = i + 1; in skb_fill_page_desc_noacc() [all …]
|
| /linux/arch/x86/kvm/ |
| H A D | xen.c | 76 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init() local 78 wc_sec_hi = &shinfo->wc_sec_hi; in kvm_xen_shared_info_init() 79 wc = &shinfo->wc; in kvm_xen_shared_info_init() 83 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init() local 85 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init() 86 wc = &shinfo->wc; in kvm_xen_shared_info_init() 1450 struct shared_info *shinfo = gpc->khva; in wait_pending_event() local 1451 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event() 1453 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event() local 1454 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event() [all …]
|
| /linux/net/ipv4/ |
| H A D | tcp_output.c | 1721 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local 1724 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp() 1726 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp() 1728 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp() 1730 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp() 1856 struct skb_shared_info *shinfo; in __pskb_trim_head() local 1862 shinfo = skb_shinfo(skb); in __pskb_trim_head() 1863 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head() 1864 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head() 1870 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head() [all …]
|
| /linux/drivers/net/ethernet/marvell/octeon_ep_vf/ |
| H A D | octep_vf_main.c | 601 struct skb_shared_info *shinfo; in octep_vf_start_xmit() local 620 shinfo = skb_shinfo(skb); in octep_vf_start_xmit() 621 nr_frags = shinfo->nr_frags; in octep_vf_start_xmit() 664 frag = &shinfo->frags[0]; in octep_vf_start_xmit()
|
| /linux/drivers/net/ethernet/google/gve/ |
| H A D | gve_rx_dqo.c | 947 struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_rx_complete_rsc() local 955 shinfo->gso_type = SKB_GSO_TCPV4; in gve_rx_complete_rsc() 958 shinfo->gso_type = SKB_GSO_TCPV6; in gve_rx_complete_rsc() 964 shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len); in gve_rx_complete_rsc()
|
| /linux/drivers/net/ethernet/marvell/octeon_ep/ |
| H A D | octep_main.c | 849 struct skb_shared_info *shinfo; in octep_start_xmit() local 868 shinfo = skb_shinfo(skb); in octep_start_xmit() 869 nr_frags = shinfo->nr_frags; in octep_start_xmit() 911 frag = &shinfo->frags[0]; in octep_start_xmit()
|
| /linux/net/sched/ |
| H A D | sch_cake.c | 1398 const struct skb_shared_info *shinfo = skb_shinfo(skb); in cake_overhead() local 1416 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | in cake_overhead() 1433 len = shinfo->gso_size + hdr_len; in cake_overhead() 1434 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_lib.c | 2323 const struct skb_shared_info *shinfo = skb_shinfo(skb); in idpf_chk_tso_segment() local 2328 nr_frags = shinfo->nr_frags; in idpf_chk_tso_segment() 2336 frag = &shinfo->frags[0]; in idpf_chk_tso_segment() 2344 sum = 1 - shinfo->gso_size; in idpf_chk_tso_segment() 2356 for (stale = &shinfo->frags[0];; stale++) { in idpf_chk_tso_segment()
|
| /linux/io_uring/ |
| H A D | net.c | 1398 struct skb_shared_info *shinfo = skb_shinfo(skb); in io_sg_from_iter() local 1399 int frag = shinfo->nr_frags; in io_sg_from_iter() 1406 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; in io_sg_from_iter() 1419 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, in io_sg_from_iter() 1426 shinfo->nr_frags = frag; in io_sg_from_iter()
|
| /linux/drivers/net/ |
| H A D | virtio_net.c | 1651 struct skb_shared_info *shinfo; in __virtnet_xdp_xmit_one() local 1659 shinfo = xdp_get_shared_info_from_frame(xdpf); in __virtnet_xdp_xmit_one() 1660 nr_frags = shinfo->nr_frags; in __virtnet_xdp_xmit_one() 1680 skb_frag_t *frag = &shinfo->frags[i]; in __virtnet_xdp_xmit_one() 1793 struct skb_shared_info *shinfo; in put_xdp_frags() local 1798 shinfo = xdp_get_shared_info_from_buff(xdp); in put_xdp_frags() 1799 for (i = 0; i < shinfo->nr_frags; i++) { in put_xdp_frags() 1800 xdp_page = skb_frag_page(&shinfo->frags[i]); in put_xdp_frags() 2227 struct skb_shared_info *shinfo; in virtnet_build_xdp_buff_mrg() local 2250 shinfo = xdp_get_shared_info_from_buff(xdp); in virtnet_build_xdp_buff_mrg() [all …]
|
| /linux/drivers/net/ethernet/realtek/rtase/ |
| H A D | rtase_main.c | 1311 struct skb_shared_info *shinfo = skb_shinfo(skb); in rtase_start_xmit() local 1316 u32 mss = shinfo->gso_size; in rtase_start_xmit() 1344 if (shinfo->gso_type & SKB_GSO_TCPV4) { in rtase_start_xmit() 1346 } else if (shinfo->gso_type & SKB_GSO_TCPV6) { in rtase_start_xmit()
|
| /linux/drivers/net/ethernet/hisilicon/hns3/ |
| H A D | hns3_enet.h | 753 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
|