| /linux/Documentation/networking/ |
| H A D | driver.rst | 84 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS)) 98 drv_tx_avail(dr), 2 * MAX_SKB_FRAGS);
|
| /linux/net/core/ |
| H A D | hotdata.c | 23 .sysctl_max_skb_frags = MAX_SKB_FRAGS,
|
| H A D | datagram.c | 645 struct page *pages[MAX_SKB_FRAGS]; in zerocopy_fill_skb_from_iter() 650 if (frag == MAX_SKB_FRAGS) in zerocopy_fill_skb_from_iter() 654 MAX_SKB_FRAGS - frag, &start); in zerocopy_fill_skb_from_iter() 724 if (i == MAX_SKB_FRAGS) in zerocopy_fill_skb_from_devmem()
|
| H A D | skbuff.c | 955 if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) in skb_pp_cow_data() 986 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { in skb_pp_cow_data() 2021 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) in skb_copy_ubufs() 3139 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) in __splice_segment() 3261 struct partial_page partial[MAX_SKB_FRAGS]; in sendmsg_locked() 3262 struct page *pages[MAX_SKB_FRAGS]; 3266 .nr_pages_max = MAX_SKB_FRAGS, in sendmsg_unlocked() 3838 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { in skb_zerocopy() 4319 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift() 4326 if (to == MAX_SKB_FRAGS) in skb_shift() [all...] |
| /linux/drivers/scsi/cxgbi/ |
| H A D | libcxgbi.h | 81 min_t(u32, MAX_SKB_FRAGS << PAGE_SHIFT, CXGBI_ULP2_MAX_ISO_PAYLOAD) 109 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 384 #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) 544 struct page_frag frags[MAX_SKB_FRAGS];
|
| /linux/drivers/target/iscsi/cxgbit/ |
| H A D | cxgbit_lro.h | 57 (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb)))
|
| H A D | cxgbit_main.c | 48 mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); in cxgbit_set_mdsl() 414 MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) || in cxgbit_lro_receive() 415 (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) { in cxgbit_lro_receive()
|
| H A D | cxgbit.h | 54 min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
|
| /linux/include/net/ |
| H A D | tls.h | 151 skb_frag_t frags[MAX_SKB_FRAGS]; 164 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
|
| /linux/drivers/net/xen-netback/ |
| H A D | netback.c | 462 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; in xenvif_get_requests() 1041 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { in xenvif_tx_build_gops() 1042 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; in xenvif_tx_build_gops() 1043 BUG_ON(frag_overflow > MAX_SKB_FRAGS); in xenvif_tx_build_gops() 1044 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; in xenvif_tx_build_gops() 1115 skb_frag_t frags[MAX_SKB_FRAGS]; in xenvif_handle_frag_list() 1134 BUG_ON(i >= MAX_SKB_FRAGS); in xenvif_handle_frag_list()
|
| /linux/drivers/net/ethernet/fungible/funeth/ |
| H A D | funeth_tx.c | 155 unsigned int lens[MAX_SKB_FRAGS + 1]; in write_pkt_desc() 156 dma_addr_t addrs[MAX_SKB_FRAGS + 1]; in write_pkt_desc() 518 unsigned int lens[MAX_SKB_FRAGS + 1]; in fun_xdp_tx() 519 dma_addr_t dma[MAX_SKB_FRAGS + 1]; in fun_xdp_tx()
|
| H A D | funeth_txrx.h | 20 #define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
|
| /linux/drivers/net/ethernet/cisco/enic/ |
| H A D | enic_wq.c | 78 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { in enic_wq_service()
|
| /linux/drivers/net/ethernet/sfc/siena/ |
| H A D | tx_common.c | 408 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; in efx_siena_tx_max_skb_descs() 416 max_descs += max_t(unsigned int, MAX_SKB_FRAGS, in efx_siena_tx_max_skb_descs()
|
| /linux/drivers/net/ethernet/qlogic/qede/ |
| H A D | qede_fp.c | 270 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) 491 >= (MAX_SKB_FRAGS + 1))) { in qede_tx_int() 1503 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit() 1507 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) in qede_start_xmit() 1720 < (MAX_SKB_FRAGS + 1))) { in qede_start_xmit() 1735 (MAX_SKB_FRAGS + 1)) && in qede_start_xmit()
|
| /linux/include/scsi/ |
| H A D | fc_frame.h | 48 #define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1)
|
| /linux/drivers/net/ethernet/freescale/fs_enet/ |
| H A D | fs_enet-main.c | 171 if (++fep->tx_free == MAX_SKB_FRAGS) in fs_enet_napi() 571 if (fep->tx_free < MAX_SKB_FRAGS) in fs_enet_start_xmit() 608 wake = fep->tx_free >= MAX_SKB_FRAGS && in fs_timeout_work()
|
| /linux/drivers/net/ethernet/intel/iavf/ |
| H A D | iavf_txrx.h | 154 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_txrx.h | 35 #define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6) 219 #define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
|
| /linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
| H A D | adapter.h | 154 struct page_frag frags[MAX_SKB_FRAGS];
|
| /linux/drivers/net/ethernet/realtek/rtase/ |
| H A D | rtase.h | 258 #define RTASE_TX_STOP_THRS (MAX_SKB_FRAGS + 1)
|
| /linux/drivers/net/ethernet/pasemi/ |
| H A D | pasemi_mac.c | 514 dma_addr_t dmas[MAX_SKB_FRAGS+1]; in pasemi_mac_free_tx_resources() 813 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) 825 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; in pasemi_mac_clean_tx() 1435 dma_addr_t map[MAX_SKB_FRAGS+1]; in pasemi_mac_start_tx() 1436 unsigned int map_size[MAX_SKB_FRAGS+1]; in pasemi_mac_start_tx()
|
| /linux/drivers/net/ethernet/qlogic/ |
| H A D | qla3xxx.h | 1016 #define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) 1040 struct map_list map[MAX_SKB_FRAGS+1];
|
| /linux/drivers/net/ |
| H A D | tap.c | 553 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) in tap_alloc_skb() 554 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); in tap_alloc_skb() 613 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) in tap_get_user()
|
| /linux/drivers/net/ethernet/xilinx/ |
| H A D | xilinx_axienet.h | 463 struct scatterlist sgl[MAX_SKB_FRAGS + 1];
|