Home
last modified time | relevance | path

Searched refs:skb_shinfo (Results 1 – 25 of 302) sorted by relevance

12345678910>>...13

/linux/net/ipv4/
H A Dudp_offload.c42 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) in __skb_udp_tunnel_segment()
58 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); in __skb_udp_tunnel_segment()
61 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); in __skb_udp_tunnel_segment()
94 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment()
126 uh->len = htons(skb_shinfo(skb)->gso_size + in __skb_udp_tunnel_segment()
254 unsigned int mss = skb_shinfo(skb)->gso_size; in __udp_gso_segment_list()
277 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment()
283 !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST))) in __udp_gso_segment()
294 skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh), in __udp_gso_segment()
299 if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) { in __udp_gso_segment()
[all …]
H A Dtcp_offload.c19 u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; in tcp_gso_tstamp()
20 u32 ts_seq = skb_shinfo(gso_skb)->tskey; in tcp_gso_tstamp()
24 skb_shinfo(skb)->tx_flags |= flags; in tcp_gso_tstamp()
25 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp()
101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment()
107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { in tcp4_gso_segment()
110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) in tcp4_gso_segment()
161 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment()
168 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment()
191 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment()
[all …]
H A Dgre_offload.c45 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); in gre_gso_segment()
65 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment()
103 skb_shinfo(skb)->gso_size; in gre_gso_segment()
245 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; in gre_gro_complete()
/linux/net/core/
H A Dskbuff.c316 shinfo = skb_shinfo(skb); in __finalize_skb_around()
804 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
823 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
979 shinfo = skb_shinfo(skb); in skb_pp_frag_ref()
1014 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
1214 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
1278 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1279 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1515 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1538 skb_shinfo(n)->frag_list = first; in alloc_skb_for_msg()
[all …]
H A Dtso.c53 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_build_data()
54 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data()
79 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_start()
80 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start()
H A Dgro.c93 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
125 pinfo = skb_shinfo(lp); in skb_gro_receive()
205 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
232 skb_shinfo(p)->frag_list = skb; in skb_gro_receive_list()
264 skb_shinfo(skb)->gso_size = 0; in napi_gro_complete()
400 pinfo = skb_shinfo(skb); in skb_gro_reset_offset()
415 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0()
495 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; in dev_gro_receive()
498 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) in dev_gro_receive()
549 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive()
[all …]
/linux/drivers/net/xen-netback/
H A Dnetback.c370 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
386 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests()
485 shinfo = skb_shinfo(nskb); in xenvif_get_requests()
506 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
560 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_tx_check_gop()
677 shinfo = skb_shinfo(shinfo->frag_list); in xenvif_tx_check_gop()
689 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_fill_frags()
704 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
771 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
774 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
[all …]
/linux/net/ipv6/
H A Dip6_offload.c142 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) in ipv6_gso_segment()
144 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment()
147 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment()
160 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment()
165 payload_len = skb_shinfo(skb)->gso_size + in ipv6_gso_segment()
393 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; in sit_gro_complete()
400 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip6ip6_gro_complete()
407 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip4ip6_gro_complete()
415 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) in sit_gso_segment()
424 if (!(skb_shinfo(sk in ip4ip6_gso_segment()
[all...]
H A Dudp_offload.c33 if (skb->encapsulation && skb_shinfo(skb)->gso_type & in udp6_ufo_fragment()
40 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) in udp6_ufo_fragment()
46 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in udp6_ufo_fragment()
49 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment()
175 skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); in udp6_gro_complete()
176 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in udp6_gro_complete()
H A Dtcpv6_offload.c80 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6; in tcp6_gro_complete()
81 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in tcp6_gro_complete()
90 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; in tcp6_gro_complete()
156 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) in tcp6_gso_segment()
162 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { in tcp6_gso_segment()
165 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) in tcp6_gso_segment()
/linux/drivers/net/ethernet/sfc/siena/
H A Dtx.h28 if (skb_shinfo(skb)->gso_segs > 1 && in efx_tx_csum_type_skb()
29 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in efx_tx_csum_type_skb()
30 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in efx_tx_csum_type_skb()
/linux/net/openvswitch/
H A Dopenvswitch_trace.h52 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
53 __entry->gso_size = skb_shinfo(skb)->gso_size;
54 __entry->gso_type = skb_shinfo(skb)->gso_type;
122 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
123 __entry->gso_size = skb_shinfo(skb)->gso_size;
124 __entry->gso_type = skb_shinfo(skb)->gso_type;
/linux/include/trace/events/
H A Dnet.h56 __entry->tx_flags = skb_shinfo(skb)->tx_flags;
57 __entry->gso_size = skb_shinfo(skb)->gso_size;
58 __entry->gso_segs = skb_shinfo(skb)->gso_segs;
59 __entry->gso_type = skb_shinfo(skb)->gso_type;
214 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
215 __entry->gso_size = skb_shinfo(skb)->gso_size;
216 __entry->gso_type = skb_shinfo(skb)->gso_type;
/linux/drivers/net/ethernet/sfc/
H A Dtx_tso.c291 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; in tso_start_new_packet()
295 st->packet_space = skb_shinfo(skb)->gso_size; in tso_start_new_packet()
341 st->seqnum += skb_shinfo(skb)->gso_size; in tso_start_new_packet()
386 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1); in efx_enqueue_skb_tso()
389 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
408 if (++frag_i >= skb_shinfo(skb)->nr_frags) in efx_enqueue_skb_tso()
412 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
H A Def100_tx.c67 mss = skb_shinfo(skb)->gso_size; in ef100_tx_can_tso()
77 if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { in ef100_tx_can_tso()
190 bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ef100_make_tso_desc()
195 u32 mss = skb_shinfo(skb)->gso_size; in ef100_make_tso_desc()
203 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID) in ef100_make_tso_desc()
216 if (skb_shinfo(skb)->gso_type & in ef100_make_tso_desc()
224 outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM; in ef100_make_tso_desc()
392 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __ef100_enqueue_skb()
/linux/net/tls/
H A Dtls_strp.c32 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_anchor_free()
53 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in tls_strp_skb_copy()
54 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tls_strp_skb_copy()
147 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_msg_hold()
194 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_flush_anchor_copy()
218 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; in tls_strp_copyin_frag()
299 shinfo = skb_shinfo(skb); in tls_strp_copyin_skb()
398 shinfo = skb_shinfo(strp->anchor); in tls_strp_read_copy()
433 first = skb_shinfo(strp->anchor)->frag_list; in tls_strp_check_queue_ok()
469 skb_shinfo(strp->anchor)->frag_list = first; in tls_strp_load_anchor_with_queue()
/linux/include/net/
H A Dtso.h23 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; in tso_count_descs()
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_tx.c156 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in mlx5e_tx_get_gso_ihs()
164 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in mlx5e_tx_get_gso_ihs()
204 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mlx5e_txwqe_build_dsegs()
205 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mlx5e_txwqe_build_dsegs()
278 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size), in mlx5e_sq_xmit_prepare()
280 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs, in mlx5e_sq_xmit_prepare()
285 stats->packets += skb_shinfo(skb)->gso_segs; in mlx5e_sq_xmit_prepare()
318 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids; in mlx5e_sq_calc_wqe_attr()
342 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in mlx5e_tx_skb_update_hwts_flags()
343 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in mlx5e_tx_skb_update_hwts_flags()
[all …]
/linux/include/linux/
H A Dskbuff_ref.h32 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); in skb_frag_ref()
68 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_frag_unref()
/linux/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_ptp.h57 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; in cxgb4_xmit_with_hwtstamp()
62 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_xmit_hwtstamp_pending()
/linux/drivers/net/ethernet/sun/
H A Dsunvnet_common.c1084 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vnet_skb_map()
1085 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in vnet_skb_map()
1124 docopy = skb_shinfo(skb)->nr_frags >= ncookies; in vnet_skb_shape()
1125 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vnet_skb_shape()
1126 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in vnet_skb_shape()
1210 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; in vnet_skb_shape()
1211 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; in vnet_skb_shape()
1250 gso_size = skb_shinfo(skb)->gso_size; in vnet_handle_offloads()
1251 gso_type = skb_shinfo(skb)->gso_type; in vnet_handle_offloads()
1252 gso_segs = skb_shinfo(skb)->gso_segs; in vnet_handle_offloads()
[all …]
/linux/net/sctp/
H A Dinqueue.c133 chunk->skb = skb_shinfo(chunk->skb)->frag_list; in sctp_inq_pop()
168 if (skb_shinfo(chunk->skb)->frag_list) in sctp_inq_pop()
173 chunk->skb = skb_shinfo(chunk->skb)->frag_list; in sctp_inq_pop()
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { in qede_free_tx_pkt()
192 unsigned short gso_type = skb_shinfo(skb)->gso_type; in qede_xmit_type()
284 return (skb_shinfo(skb)->nr_frags > allowed_frags); in qede_pkt_req_lin()
656 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in qede_set_gro_params()
658 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in qede_set_gro_params()
660 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
928 skb_shinfo(skb)->gso_type = 0; in qede_gro_receive()
929 skb_shinfo(skb)->gso_size = 0; in qede_gro_receive()
934 if (skb_shinfo(skb)->gso_size) { in qede_gro_receive()
1208 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data, in qede_rx_build_jumbo()
[all …]
/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.c1263 struct skb_shared_info *sh = skb_shinfo(skb); in nicvf_tso_count_subdescs()
1311 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { in nicvf_sq_subdesc_required()
1317 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) in nicvf_sq_subdesc_required()
1320 if (skb_shinfo(skb)->nr_frags) in nicvf_sq_subdesc_required()
1321 subdesc_cnt += skb_shinfo(skb)->nr_frags; in nicvf_sq_subdesc_required()
1346 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { in nicvf_sq_add_hdr_subdesc()
1383 if (nic->hw_tso && skb_shinfo(skb)->gso_size) { in nicvf_sq_add_hdr_subdesc()
1386 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; in nicvf_sq_add_hdr_subdesc()
1393 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in nicvf_sq_add_hdr_subdesc()
1399 if (skb_shinfo(skb)->gso_size) in nicvf_sq_add_hdr_subdesc()
[all …]
/linux/drivers/net/ethernet/pensando/ionic/
H A Dionic_txrx.c163 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in ionic_rx_add_skb_frag()
1125 frag = skb_shinfo(skb)->frags; in ionic_tx_map_skb()
1126 nfrags = skb_shinfo(skb)->nr_frags; in ionic_tx_map_skb()
1215 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ionic_tx_clean()
1453 mss = skb_shinfo(skb)->gso_size; in ionic_tx_tso()
1454 outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in ionic_tx_tso()
1566 flags, skb_shinfo(skb)->nr_frags, in ionic_tx_calc_csum()
1605 flags, skb_shinfo(skb)->nr_frags, in ionic_tx_calc_no_csum()
1632 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { in ionic_tx_skb_frags()
1637 stats->frags += skb_shinfo(skb)->nr_frags; in ionic_tx_skb_frags()
[all …]

12345678910>>...13