/titanic_44/usr/src/uts/intel/io/amd8111s/ |
H A D | amd8111s_hw.h | 764 struct tx_desc { struct 945 struct tx_desc *TxDescQRead; /* The next ring entry to be freed */ 946 struct tx_desc *TxDescQWrite; /* The next free ring entry */ 947 struct tx_desc *TxDescQStart; /* The start of the ring entries */ 948 struct tx_desc *TxDescQEnd; /* The end of the ring entries */ 967 struct tx_desc *Tx_desc;
|
H A D | amd8111s_main.c | 831 length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT; in amd8111s_allocate_descriptors() 860 pMil->Tx_desc = (struct tx_desc *) in amd8111s_allocate_descriptors() 1425 struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart; in amd8111s_tx_drain()
|
/titanic_44/usr/src/uts/common/io/hxge/ |
H A D | hxge_send.c | 97 tx_desc_t tx_desc, *tmp_desc_p; in hxge_start() local 308 tx_desc.value = 0; in hxge_start() 478 hpi_desc_handle, &tx_desc, in hxge_start() 514 tx_desc.value = 0; in hxge_start() 554 save_desc_p = &tx_desc; in hxge_start() 556 tmp_desc_p = &tx_desc; in hxge_start()
|
/titanic_44/usr/src/uts/common/io/nge/ |
H A D | nge_main.c | 383 txbuffsize = ngep->tx_desc * ngep->buf_size; in nge_alloc_bufs() 385 txdescsize = ngep->tx_desc; in nge_alloc_bufs() 506 srp->desc.nslots = ngep->tx_desc; in nge_init_send_ring() 1019 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; in nge_get_props() 1026 ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC; in nge_get_props() 1033 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; in nge_get_props() 1040 ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC; in nge_get_props() 1047 ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC; in nge_get_props() 1054 ngep->tx_desc = dev_param_p->tx_desc_num; in nge_get_props() 1636 ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC; in nge_m_setprop() [all …]
|
H A D | nge.h | 731 uint32_t tx_desc; member
|
/titanic_44/usr/src/uts/common/io/rge/ |
H A D | rge_main.c | 315 DMA_ZERO(rgep->tx_desc); in rge_reinit_send_ring() 329 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV); in rge_reinit_send_ring() 486 rgep->tx_desc = rgep->dma_area_txdesc; in rge_init_send_ring() 487 DMA_ZERO(rgep->tx_desc); in rge_init_send_ring() 488 rgep->tx_ring = rgep->tx_desc.mem_va; in rge_init_send_ring() 490 desc = rgep->tx_desc; in rge_init_send_ring() 511 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV); in rge_init_send_ring()
|
H A D | rge.h | 440 dma_area_t tx_desc; member
|
H A D | rge_rxtx.c | 651 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV); in rge_send()
|
H A D | rge_chip.c | 994 val32 = rgep->tx_desc.cookie.dmac_laddress; in rge_chip_init() 996 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32; in rge_chip_init()
|
/titanic_44/usr/src/uts/intel/io/dnet/ |
H A D | dnet.c | 1016 desc = &dnetp->tx_desc[current_desc]; in dnet_set_addr() 1324 struct tx_desc_type *ring = dnetp->tx_desc; in dnet_send() 1868 struct tx_desc_type *descp = &(dnetp->tx_desc[index]); in update_tx_stats() 2152 if ((dnetp->tx_desc != NULL) && in dnet_alloc_bufs() 2209 if (dnetp->tx_desc == NULL) { in dnet_alloc_bufs() 2213 (caddr_t *)&dnetp->tx_desc, &len, in dnet_alloc_bufs() 2218 NULL, (caddr_t)dnetp->tx_desc, in dnet_alloc_bufs() 2224 bzero(dnetp->tx_desc, len); in dnet_alloc_bufs() 2307 if (dnetp->tx_desc != NULL) { in dnet_free_bufs() 2309 dnetp->tx_desc = NULL; in dnet_free_bufs() [all …]
|
H A D | dnet.h | 376 struct tx_desc_type *tx_desc; /* virtual addr of xmit desc */ member
|
/titanic_44/usr/src/uts/common/io/dmfe/ |
H A D | dmfe_main.c | 448 descp = &dmfep->tx_desc; in dmfe_init_rings() 942 descp = &dmfep->tx_desc; in dmfe_reclaim_tx_desc() 1062 descp = &dmfep->tx_desc; in dmfe_send_msg() 1246 descp = &dmfep->tx_desc; in dmfe_update_hash() 1378 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2, in dmfe_m_unicst() 2320 &dmfep->tx_desc); in dmfe_alloc_bufs() 2402 dmfe_free_dma_mem(&dmfep->tx_desc); in dmfe_free_bufs() 2700 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE); in dmfe_attach()
|
H A D | dmfe_impl.h | 173 dma_area_t tx_desc; /* transmit descriptors */ member
|
/titanic_44/usr/src/uts/common/io/ntxn/ |
H A D | unm_gem.c | 702 int i, ring, tx_desc, rx_desc, rx_jdesc, maxrx; in unm_check_options() local 726 tx_desc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, in unm_check_options() 728 if (tx_desc >= 256 && tx_desc <= MAX_CMD_DESCRIPTORS && ISP2(tx_desc)) { in unm_check_options() 729 adapter->MaxTxDescCount = tx_desc; in unm_check_options()
|
/titanic_44/usr/src/uts/common/io/nxge/ |
H A D | nxge_send.c | 145 tx_desc_t tx_desc, *tmp_desc_p; in nxge_start() local 480 tx_desc.value = 0; in nxge_start() 673 &tx_desc, in nxge_start() 717 tx_desc.value = 0; in nxge_start() 766 save_desc_p = &tx_desc; in nxge_start() 768 tmp_desc_p = &tx_desc; in nxge_start()
|
/titanic_44/usr/src/uts/common/io/e1000g/ |
H A D | e1000g_tx.c | 1651 struct e1000_tx_desc *tx_desc; in e1000g_82547_tx_move_tail_work() local 1663 tx_desc = &(tx_ring->tbd_first[hw_tdt]); in e1000g_82547_tx_move_tail_work() 1664 length += tx_desc->lower.flags.length; in e1000g_82547_tx_move_tail_work() 1665 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; in e1000g_82547_tx_move_tail_work()
|
/titanic_44/usr/src/uts/common/io/cxgbe/t4nex/ |
H A D | adapter.h | 130 struct tx_desc { struct 218 struct tx_desc *desc; /* KVA of descriptor ring */
|
H A D | t4_sge.c | 2809 offset * sizeof (struct tx_desc), 0, in ring_tx_db() 2812 0, eq->pidx * sizeof (struct tx_desc), in ring_tx_db() 2816 (eq->pidx - eq->pending) * sizeof (struct tx_desc), in ring_tx_db() 2817 eq->pending * sizeof (struct tx_desc), in ring_tx_db()
|
/titanic_44/usr/src/uts/common/io/bge/ |
H A D | bge_impl.h | 779 dma_area_t tx_desc; /* transmit descriptors */ member
|
H A D | bge_kstats.c | 420 (knp++)->value.ui64 = bgep->tx_desc.cookie.dmac_laddress; in bge_driverinfo_update()
|
H A D | bge_main2.c | 2767 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2959 area = bgep->tx_desc; 3036 bge_free_dma_mem(&bgep->tx_desc);
|
H A D | bge_chip2.c | 6089 areap = &bgep->tx_desc; in bge_pp_ioctl()
|