/linux/net/ipv6/ |
H A D | esp6.c | 64 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) in esp_alloc_tmp() argument 81 len += sizeof(struct scatterlist) * nfrags; in esp_alloc_tmp() 474 int nfrags; in esp6_output_head() local 493 nfrags = 1; in esp6_output_head() 522 nfrags = skb_shinfo(skb)->nr_frags; in esp6_output_head() 524 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, in esp6_output_head() 526 skb_shinfo(skb)->nr_frags = ++nfrags; in esp6_output_head() 532 nfrags++; in esp6_output_head() 547 nfrags = skb_cow_data(skb, tailen, &trailer); in esp6_output_head() 548 if (nfrags < 0) in esp6_output_head() [all …]
|
H A D | ah6.c | 49 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, in ah_alloc_tmp() argument 61 len += sizeof(struct scatterlist) * nfrags; in ah_alloc_tmp() 319 int nfrags; in ah6_output() local 343 nfrags = err; in ah6_output() 355 iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN + in ah6_output() 365 seqhisg = sg + nfrags; in ah6_output() 409 sg_init_table(sg, nfrags + sglists); in ah6_output() 520 int nfrags; in ah6_input() local 555 nfrags = err; in ah6_input() 567 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len + in ah6_input() [all …]
|
/linux/net/ipv4/ |
H A D | esp4.c | 47 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen) in esp_alloc_tmp() argument 64 len += sizeof(struct scatterlist) * nfrags; in esp_alloc_tmp() 444 int nfrags; in esp_output_head() local 464 nfrags = 1; in esp_output_head() 493 nfrags = skb_shinfo(skb)->nr_frags; in esp_output_head() 495 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, in esp_output_head() 497 skb_shinfo(skb)->nr_frags = ++nfrags; in esp_output_head() 503 nfrags++; in esp_output_head() 516 nfrags = skb_cow_data(skb, tailen, &trailer); in esp_output_head() 517 if (nfrags < 0) in esp_output_head() [all …]
|
H A D | ah4.c | 25 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, in ah_alloc_tmp() argument 37 len += sizeof(struct scatterlist) * nfrags; in ah_alloc_tmp() 147 int nfrags; in ah_output() local 167 nfrags = err; in ah_output() 178 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len); in ah_output() 185 seqhisg = sg + nfrags; in ah_output() 221 sg_init_table(sg, nfrags + sglists); in ah_output() 305 int nfrags; in ah_input() local 354 nfrags = err; in ah_input() 365 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + in ah_input() [all …]
|
/linux/drivers/net/ethernet/pasemi/ |
H A D | pasemi_mac.c | 243 const int nfrags, in pasemi_mac_unmap_tx_skb() argument 252 for (f = 0; f < nfrags; f++) { in pasemi_mac_unmap_tx_skb() 263 return (nfrags + 3) & ~1; in pasemi_mac_unmap_tx_skb() 514 int freed, nfrags; in pasemi_mac_free_tx_resources() local 527 nfrags = skb_shinfo(info->skb)->nr_frags; in pasemi_mac_free_tx_resources() 528 for (j = 0; j <= nfrags; j++) in pasemi_mac_free_tx_resources() 531 freed = pasemi_mac_unmap_tx_skb(mac, nfrags, in pasemi_mac_free_tx_resources() 1350 const int nfrags = skb_shinfo(skb)->nr_frags; in pasemi_mac_queue_csdesc() local 1384 for (i = 1; i <= nfrags; i++) in pasemi_mac_queue_csdesc() 1436 int i, nfrags; in pasemi_mac_start_tx() local [all …]
|
/linux/drivers/target/iscsi/cxgbit/ |
H A D | cxgbit_main.c | 229 for (i = 1; i < gl->nfrags; i++) in cxgbit_copy_frags() 235 skb_shinfo(skb)->nr_frags += gl->nfrags; in cxgbit_copy_frags() 238 get_page(gl->frags[gl->nfrags - 1].page); in cxgbit_copy_frags() 260 if (unlikely(gl->nfrags > 1)) in cxgbit_lro_add_packet_gl() 272 pdu_cb->nr_dfrags = gl->nfrags; in cxgbit_lro_add_packet_gl() 288 if (unlikely(gl->nfrags > 1)) in cxgbit_lro_add_packet_gl() 308 pdu_cb->frags += gl->nfrags; in cxgbit_lro_add_packet_gl() 413 if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > in cxgbit_lro_receive()
|
/linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | sge.c | 310 unsigned int nfrags = skb_shinfo(skb)->nr_frags; in unmap_sgl() local 318 nfrags--; in unmap_sgl() 325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl() 354 if (nfrags) { in unmap_sgl() 908 unsigned int nfrags = si->nr_frags; in write_sgl() local 915 nfrags++; in write_sgl() 922 ULPTX_NSGE_V(nfrags)); in write_sgl() 923 if (likely(--nfrags == 0)) in write_sgl() 932 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { in write_sgl() 938 if (nfrags) { in write_sgl() [all …]
|
H A D | adapter.h | 156 unsigned int nfrags; /* # of fragments */ member
|
/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_tx.c | 516 unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len; in fun_xdp_tx() local 528 nfrags += si->nr_frags; in fun_xdp_tx() 529 ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags * in fun_xdp_tx() 554 req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len); in fun_xdp_tx() 556 fun_write_gl(q, req, dma, lens, nfrags); in fun_xdp_tx() 565 trace_funeth_tx(q, tot_len, idx, nfrags); in fun_xdp_tx()
|
/linux/drivers/net/ethernet/google/gve/ |
H A D | gve_tx.c | 89 int nfrags = 0; in gve_tx_alloc_fifo() local 103 nfrags++; in gve_tx_alloc_fifo() 113 nfrags++; in gve_tx_alloc_fifo() 125 iov[nfrags - 1].iov_padding = padding; in gve_tx_alloc_fifo() 132 return nfrags; in gve_tx_alloc_fifo() 789 int pad, nfrags, ndescs, iovi, offset; in gve_tx_fill_xdp() local 801 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, in gve_tx_fill_xdp() 804 ndescs = nfrags - iovi; in gve_tx_fill_xdp() 807 while (iovi < nfrags) { in gve_tx_fill_xdp()
|
/linux/drivers/net/wireless/mediatek/mt76/ |
H A D | testmode.c | 111 int nfrags, i; in mt76_testmode_alloc_skb() local 119 nfrags = len / MT_TXP_MAX_LEN; in mt76_testmode_alloc_skb() 120 head_len = nfrags ? MT_TXP_MAX_LEN : len; in mt76_testmode_alloc_skb() 146 for (i = 0; i < nfrags; i++) { in mt76_testmode_alloc_skb() 150 if (i == nfrags - 1) in mt76_testmode_alloc_skb()
|
/linux/drivers/net/ethernet/pensando/ionic/ |
H A D | ionic_txrx.c | 777 unsigned int nfrags; in ionic_rx_fill() local 809 nfrags = 0; in ionic_rx_fill() 833 nfrags++; in ionic_rx_fill() 861 nfrags++; in ionic_rx_fill() 868 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : in ionic_rx_fill() 870 desc_info->nbufs = nfrags; in ionic_rx_fill() 1114 unsigned int nfrags; in ionic_tx_map_skb() local 1126 nfrags = skb_shinfo(skb)->nr_frags; in ionic_tx_map_skb() 1127 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { in ionic_tx_map_skb() 1136 desc_info->nbufs = 1 + nfrags; in ionic_tx_map_skb()
|
/linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/ |
H A D | chcr_ktls.c | 28 u8 nfrags = 0, frag_idx = 0; in chcr_get_nfrags_to_send() local 51 nfrags++; in chcr_get_nfrags_to_send() 56 nfrags++; in chcr_get_nfrags_to_send() 59 return nfrags; in chcr_get_nfrags_to_send() 1100 u32 skb_offset, u32 nfrags, in chcr_ktls_xmit_wr_complete() argument 1116 flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len); in chcr_ktls_xmit_wr_complete() 1278 u32 len16, wr_mid = 0, cipher_start, nfrags; in chcr_ktls_xmit_wr_short() local 1292 nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len); in chcr_ktls_xmit_wr_short() 1296 flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2; in chcr_ktls_xmit_wr_short() 1471 u32 wr_mid = 0, nfrags; in chcr_ktls_tx_plaintxt() local [all …]
|
/linux/drivers/net/ethernet/chelsio/cxgb/ |
H A D | sge.c | 1118 unsigned int nfrags = skb_shinfo(skb)->nr_frags; in compute_large_page_tx_descs() local 1124 for (i = 0; nfrags--; i++) { in compute_large_page_tx_descs() 1166 unsigned int nfrags, in write_large_page_tx_descs() argument 1176 *gen, nfrags == 0 && *desc_len == 0); in write_large_page_tx_descs() 1209 nfrags = skb_shinfo(skb)->nr_frags; in write_tx_descs() local 1221 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | in write_tx_descs() 1245 nfrags, q); in write_tx_descs() 1249 nfrags == 0); in write_tx_descs() 1256 for (i = 0; nfrags--; i++) { in write_tx_descs() 1274 nfrags, q); in write_tx_descs() [all …]
|
/linux/include/net/ |
H A D | esp.h | 39 int nfrags; member
|
/linux/drivers/infiniband/hw/qib/ |
H A D | qib_user_sdma.c | 821 int nfrags = 0; in qib_user_sdma_queue_pkts() local 890 nfrags++; in qib_user_sdma_queue_pkts() 991 if (nfrags) { in qib_user_sdma_queue_pkts() 994 nfrags, npages); in qib_user_sdma_queue_pkts()
|
/linux/drivers/net/ethernet/faraday/ |
H A D | ftgmac100.c | 728 unsigned int pointer, nfrags, len, i, j; in ftgmac100_hard_start_xmit() local 748 nfrags = skb_shinfo(skb)->nr_frags; in ftgmac100_hard_start_xmit() 785 if (nfrags == 0) in ftgmac100_hard_start_xmit() 794 for (i = 0; i < nfrags; i++) { in ftgmac100_hard_start_xmit() 811 if (i == (nfrags - 1)) in ftgmac100_hard_start_xmit()
|
/linux/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 238 int nfrags, frag_idx, curflit, j = d->addr_idx; in unmap_skb() local 250 nfrags = skb_shinfo(skb)->nr_frags; in unmap_skb() 252 while (frag_idx < nfrags && curflit < WR_FLITS) { in unmap_skb() 265 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ in unmap_skb() 1011 unsigned int i, j = 0, k = 0, nfrags; in write_sgl() local 1018 nfrags = skb_shinfo(skb)->nr_frags; in write_sgl() 1019 for (i = 0; i < nfrags; i++) { in write_sgl() 1030 return ((nfrags + (len != 0)) * 3) / 2 + j; in write_sgl()
|
/linux/net/core/ |
H A D | skbuff.c | 2615 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim() local 2627 for (; i < nfrags; i++) { in ___pskb_trim() 2640 for (; i < nfrags; i++) in ___pskb_trim() 4015 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header() local 4022 for (i = 0; i < nfrags; i++) { in skb_split_no_header() 4620 int nfrags, pos; in skb_segment() local 4701 nfrags = skb_shinfo(head_skb)->nr_frags; in skb_segment() 4721 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && in skb_segment() 4730 nfrags = skb_shinfo(list_skb)->nr_frags; in skb_segment() 4736 BUG_ON(i >= nfrags); in skb_segment() [all …]
|
H A D | pktgen.c | 286 int nfrags; member 585 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, in pktgen_if_show() 1090 pkt_dev->nfrags = value; in pktgen_if_write() 1091 sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags); in pktgen_if_write() 2779 if (pkt_dev->nfrags <= 0) { in pktgen_finalize_skb() 2782 int frags = pkt_dev->nfrags; in pktgen_finalize_skb() 3805 pkt_dev->nfrags = 0; in pktgen_add_device()
|
/linux/fs/ufs/ |
H A D | inode.c | 263 unsigned nfrags = uspi->s_fpb; in ufs_inode_getfrag() local 275 nfrags = (new_fragment & uspi->s_fpbmask) + 1; in ufs_inode_getfrag() 285 goal, nfrags, err, locked_folio); in ufs_inode_getfrag()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
H A D | ktls_tx.c | 18 mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags, in mlx5e_ktls_dumps_num_wqes() argument 24 return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu)); in mlx5e_ktls_dumps_num_wqes()
|
/linux/drivers/net/ethernet/stmicro/stmmac/ |
H A D | stmmac_main.c | 4267 int i, first_tx, nfrags; in stmmac_tso_xmit() local 4285 nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit() 4357 (nfrags == 0), queue); in stmmac_tso_xmit() 4377 for (i = 0; i < nfrags; i++) { in stmmac_tso_xmit() 4387 (i == nfrags - 1), queue); in stmmac_tso_xmit() 4443 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); in stmmac_tso_xmit() 4479 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit() 4535 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit() local 4567 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { in stmmac_xmit() 4625 for (i = 0; i < nfrags; i++) { in stmmac_xmit() [all …]
|
/linux/drivers/net/ethernet/mediatek/ |
H A D | mtk_star_emac.c | 1104 int nfrags = skb_shinfo(skb)->nr_frags; in mtk_star_netdev_start_xmit() local 1106 if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) { in mtk_star_netdev_start_xmit()
|
/linux/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 1646 int nfrags, sg1entry_contains_frag_data, i; in write_swqe2_data() local 1648 nfrags = skb_shinfo(skb)->nr_frags; in write_swqe2_data() 1656 if (nfrags > 0) { in write_swqe2_data() 1670 for (i = sg1entry_contains_frag_data; i < nfrags; i++) { in write_swqe2_data()
|