Home
last modified time | relevance | path

Searched refs:segs (Results 1 – 25 of 96) sorted by relevance

1234

/linux/net/core/
H A Dnet_test.c52 const unsigned int *segs; member
61 .segs = (const unsigned int[]) { GSO_TEST_SIZE },
68 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
77 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
85 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
94 .segs = (const unsigned int[]) { 2 * GSO_TEST_SIZE, 3 },
104 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE },
112 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
122 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE, 3 },
136 .segs = (const unsigned int[]) { 100, 200, 300, 400 },
[all …]
H A Dgso.c16 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); in skb_eth_gso_segment() local
22 segs = ptype->callbacks.gso_segment(skb, features); in skb_eth_gso_segment()
28 return segs; in skb_eth_gso_segment()
40 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); in skb_mac_gso_segment() local
53 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
61 return segs; in skb_mac_gso_segment()
91 struct sk_buff *segs; in __skb_gso_segment() local
124 segs = skb_mac_gso_segment(skb, features); in __skb_gso_segment()
126 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) in __skb_gso_segment()
129 return segs; in __skb_gso_segment()
/linux/drivers/net/ethernet/intel/ice/
H A Dice_flow.c650 static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) in ice_flow_val_hdrs() argument
656 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && in ice_flow_val_hdrs()
657 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) in ice_flow_val_hdrs()
661 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && in ice_flow_val_hdrs()
662 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) in ice_flow_val_hdrs()
690 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? in ice_flow_calc_seg_sz()
694 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) in ice_flow_calc_seg_sz()
696 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) in ice_flow_calc_seg_sz()
698 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) in ice_flow_calc_seg_sz()
700 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) in ice_flow_calc_seg_sz()
[all …]
/linux/net/ipv4/
H A Dudp_offload.c24 struct sk_buff *segs = ERR_PTR(-EINVAL); in __skb_udp_tunnel_segment() local
87 segs = gso_inner_segment(skb, features); in __skb_udp_tunnel_segment()
88 if (IS_ERR_OR_NULL(segs)) { in __skb_udp_tunnel_segment()
94 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment()
98 skb = segs; in __skb_udp_tunnel_segment()
150 return segs; in __skb_udp_tunnel_segment()
160 struct sk_buff *segs = ERR_PTR(-EINVAL); in skb_udp_tunnel_segment() local
182 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, in skb_udp_tunnel_segment()
188 return segs; in skb_udp_tunnel_segment()
219 static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) in __udpv4_gso_segment_list_csum() argument
[all …]
H A Dtcp_offload.c52 static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs) in __tcpv4_gso_segment_list_csum() argument
60 seg = segs; in __tcpv4_gso_segment_list_csum()
68 return segs; in __tcpv4_gso_segment_list_csum()
82 return segs; in __tcpv4_gso_segment_list_csum()
132 struct sk_buff *segs = ERR_PTR(-EINVAL); in tcp_gso_segment() local
167 segs = NULL; in tcp_gso_segment()
176 segs = skb_segment(skb, features); in tcp_gso_segment()
177 if (IS_ERR(segs)) in tcp_gso_segment()
181 segs->ooo_okay = ooo_okay; in tcp_gso_segment()
187 if (skb_is_gso(segs)) in tcp_gso_segment()
[all …]
H A Dgre_offload.c21 struct sk_buff *segs = ERR_PTR(-EINVAL); in gre_gso_segment() local
58 segs = skb_mac_gso_segment(skb, features); in gre_gso_segment()
59 if (IS_ERR_OR_NULL(segs)) { in gre_gso_segment()
65 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment()
69 skb = segs; in gre_gso_segment()
119 return segs; in gre_gso_segment()
/linux/net/sctp/
H A Doffload.c42 struct sk_buff *segs = ERR_PTR(-EINVAL); in sctp_gso_segment() local
68 segs = NULL; in sctp_gso_segment()
72 segs = skb_segment(skb, (features | NETIF_F_HW_CSUM) & ~NETIF_F_SG); in sctp_gso_segment()
73 if (IS_ERR(segs)) in sctp_gso_segment()
78 for (skb = segs; skb; skb = skb->next) { in sctp_gso_segment()
87 return segs; in sctp_gso_segment()
/linux/net/mpls/
H A Dmpls_gso.c23 struct sk_buff *segs = ERR_PTR(-EINVAL); in mpls_gso_segment() local
51 segs = skb_mac_gso_segment(skb, mpls_features); in mpls_gso_segment()
52 if (IS_ERR_OR_NULL(segs)) { in mpls_gso_segment()
57 skb = segs; in mpls_gso_segment()
73 return segs; in mpls_gso_segment()
/linux/net/nsh/
H A Dnsh.c81 struct sk_buff *segs = ERR_PTR(-EINVAL); in nsh_gso_segment() local
110 segs = skb_mac_gso_segment(skb, features); in nsh_gso_segment()
111 if (IS_ERR_OR_NULL(segs)) { in nsh_gso_segment()
117 for (skb = segs; skb; skb = skb->next) { in nsh_gso_segment()
126 return segs; in nsh_gso_segment()
/linux/net/ipv6/
H A Dip6_offload.c110 struct sk_buff *segs = ERR_PTR(-EINVAL); in ipv6_gso_segment() local
137 segs = ERR_PTR(-EPROTONOSUPPORT); in ipv6_gso_segment()
152 segs = ops->callbacks.gso_segment(skb, features); in ipv6_gso_segment()
153 if (!segs) in ipv6_gso_segment()
157 if (IS_ERR_OR_NULL(segs)) in ipv6_gso_segment()
160 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment()
162 for (skb = segs; skb; skb = skb->next) { in ipv6_gso_segment()
177 kfree_skb_list(segs); in ipv6_gso_segment()
192 return segs; in ipv6_gso_segment()
H A Dudp_offload.c22 struct sk_buff *segs = ERR_PTR(-EINVAL); in udp6_ufo_fragment() local
35 segs = skb_udp_tunnel_segment(skb, features, true); in udp6_ufo_fragment()
109 segs = skb_segment(skb, features); in udp6_ufo_fragment()
113 return segs; in udp6_ufo_fragment()
H A Dtcpv6_offload.c109 static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs) in __tcpv6_gso_segment_list_csum() argument
117 seg = segs; in __tcpv6_gso_segment_list_csum()
126 return segs; in __tcpv6_gso_segment_list_csum()
138 return segs; in __tcpv6_gso_segment_list_csum()
/linux/drivers/infiniband/sw/rdmavt/
H A Dmr.c379 mr->mr.map[m]->segs[n].vaddr = vaddr; in rvt_reg_user_mr()
380 mr->mr.map[m]->segs[n].length = PAGE_SIZE; in rvt_reg_user_mr()
569 mr->mr.map[m]->segs[n].vaddr = (void *)addr; in rvt_set_page()
570 mr->mr.map[m]->segs[n].length = ps; in rvt_set_page()
599 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; in rvt_map_mr_sg()
787 while (off >= mr->map[m]->segs[n].length) { in rvt_lkey_ok()
788 off -= mr->map[m]->segs[n].length; in rvt_lkey_ok()
797 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_lkey_ok()
798 isge->length = mr->map[m]->segs[n].length - off; in rvt_lkey_ok()
894 while (off >= mr->map[m]->segs[n].length) { in rvt_rkey_ok()
[all …]
/linux/net/sched/
H A Dsch_tbf.c209 struct sk_buff *segs, *nskb; in tbf_segment() local
214 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in tbf_segment()
216 if (IS_ERR_OR_NULL(segs)) in tbf_segment()
220 skb_list_walk_safe(segs, segs, nskb) { in tbf_segment()
221 skb_mark_not_on_list(segs); in tbf_segment()
222 seg_len = segs->len; in tbf_segment()
223 qdisc_skb_cb(segs)->pkt_len = seg_len; in tbf_segment()
224 ret = qdisc_enqueue(segs, q->qdisc, to_free); in tbf_segment()
H A Dsch_netem.c429 struct sk_buff *segs; in netem_segment() local
432 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in netem_segment()
434 if (IS_ERR_OR_NULL(segs)) { in netem_segment()
439 return segs; in netem_segment()
455 struct sk_buff *segs = NULL; in netem_enqueue() local
504 segs = skb->next; in netem_enqueue()
527 skb->next = segs; in netem_enqueue()
620 if (segs) { in netem_enqueue()
627 while (segs) { in netem_enqueue()
628 skb2 = segs->next; in netem_enqueue()
[all …]
H A Dsch_cake.c1342 u16 segs = 1; in cake_overhead() local
1371 segs = DIV_ROUND_UP(skb->len - hdr_len, in cake_overhead()
1374 segs = shinfo->gso_segs; in cake_overhead()
1377 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
1379 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead()
1729 struct sk_buff *segs, *nskb; in cake_enqueue() local
1733 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in cake_enqueue()
1734 if (IS_ERR_OR_NULL(segs)) in cake_enqueue()
1737 skb_list_walk_safe(segs, segs, nskb) { in cake_enqueue()
1738 skb_mark_not_on_list(segs); in cake_enqueue()
[all …]
/linux/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_coredump.c96 info->segs = le16_to_cpu(*((__le16 *)(resp + in bnxt_hwrm_dbg_dma_data()
98 if (!info->segs) { in bnxt_hwrm_dbg_dma_data()
103 info->dest_buf_size = info->segs * in bnxt_hwrm_dbg_dma_data()
157 coredump->total_segs = info.segs; in bnxt_hwrm_dbg_coredump_list()
334 u32 *segs) in bnxt_get_ctx_coredump() argument
344 *segs = 0; in bnxt_get_ctx_coredump()
379 *segs += 1; in bnxt_get_ctx_coredump()
417 u32 drv_len, segs = 0; in __bnxt_get_coredump() local
419 drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs); in __bnxt_get_coredump()
423 coredump.total_segs += segs; in __bnxt_get_coredump()
/linux/include/uapi/linux/
H A Delf-fdpic.h30 struct elf32_fdpic_loadseg segs[]; member
45 struct elf64_fdpic_loadseg segs[]; member
/linux/include/rdma/
H A Drdmavt_mr.h28 struct rvt_seg segs[RVT_SEGSZ]; member
133 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in rvt_update_sge()
134 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in rvt_update_sge()
/linux/net/xfrm/
H A Dxfrm_output.c620 struct sk_buff *segs, *nskb; in xfrm_output_gso() local
624 segs = skb_gso_segment(skb, 0); in xfrm_output_gso()
626 if (IS_ERR(segs)) in xfrm_output_gso()
627 return PTR_ERR(segs); in xfrm_output_gso()
628 if (segs == NULL) in xfrm_output_gso()
631 skb_list_walk_safe(segs, segs, nskb) { in xfrm_output_gso()
634 skb_mark_not_on_list(segs); in xfrm_output_gso()
635 err = xfrm_output2(net, sk, segs); in xfrm_output_gso()
H A Dxfrm_device.c162 struct sk_buff *segs; in validate_xmit_xfrm() local
167 segs = skb_gso_segment(skb, esp_features); in validate_xmit_xfrm()
168 if (IS_ERR(segs)) { in validate_xmit_xfrm()
174 skb = segs; in validate_xmit_xfrm()
/linux/drivers/net/wireguard/
H A Ddevice.c179 struct sk_buff *segs = skb_gso_segment(skb, 0); in wg_xmit() local
181 if (IS_ERR(segs)) { in wg_xmit()
182 ret = PTR_ERR(segs); in wg_xmit()
186 skb = segs; in wg_xmit()
/linux/include/net/
H A Dudp.h588 struct sk_buff *segs; in udp_rcv_segment() local
610 segs = __skb_gso_segment(skb, features, false); in udp_rcv_segment()
611 if (IS_ERR_OR_NULL(segs)) { in udp_rcv_segment()
621 return segs; in udp_rcv_segment()
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c267 for (seg = 0; seg < sg->segs; seg++, seg_addr++) in otx2_free_rcv_seg()
333 if (cqe->sg.segs) in otx2_check_rcv_errors()
370 for (seg = 0; seg < sg->segs; seg++, seg_addr++) { in otx2_rcv_pkt_handler()
645 sg->segs = 0; in otx2_sqe_add_sg()
661 sg->segs++; in otx2_sqe_add_sg()
879 sg->segs = 0; in otx2_sqe_tso_add_sg()
892 sg->segs++; in otx2_sqe_tso_add_sg()
1253 if (cqe->sg.segs > 1) { in otx2_cleanup_rx_cqes()
1375 sg->segs = 1; in otx2_xdp_sqe_add_sg()
/linux/arch/um/drivers/
H A Dubd_kern.c1265 int segs = 0; in ubd_submit_request() local
1271 segs = 0; in ubd_submit_request()
1273 segs = 1; in ubd_submit_request()
1275 segs = blk_rq_nr_phys_segments(req); in ubd_submit_request()
1277 io_req = ubd_alloc_req(dev, req, segs); in ubd_submit_request()
1281 io_req->desc_cnt = segs; in ubd_submit_request()
1282 if (segs) in ubd_submit_request()

1234