/freebsd/sys/ofed/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_ib.c | 276 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max) in ipoib_dma_map_tx() argument 278 struct mbuf *mb = tx_req->mb; in ipoib_dma_map_tx() 279 u64 *mapping = tx_req->mapping; in ipoib_dma_map_tx() 295 tx_req->mb = mb = m_defrag(mb, M_NOWAIT); in ipoib_dma_map_tx() 322 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) in ipoib_dma_unmap_tx() argument 324 struct mbuf *mb = tx_req->mb; in ipoib_dma_unmap_tx() 325 u64 *mapping = tx_req->mapping; in ipoib_dma_unmap_tx() 337 struct ipoib_tx_buf *tx_req; in ipoib_ib_handle_tx_wc() local 348 tx_req = &priv->tx_ring[wr_id]; in ipoib_ib_handle_tx_wc() 350 ipoib_dma_unmap_tx(priv->ca, tx_req); in ipoib_ib_handle_tx_wc() [all …]
|
H A D | ipoib_cm.c | 598 struct ipoib_cm_tx_buf *tx_req, in post_send() argument 602 struct mbuf *mb = tx_req->mb; in post_send() 603 u64 *mapping = tx_req->mapping; in post_send() 620 struct ipoib_cm_tx_buf *tx_req; in ipoib_cm_send() local 648 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; in ipoib_cm_send() 649 tx_req->mb = mb; in ipoib_cm_send() 650 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req, in ipoib_cm_send() 653 if (tx_req->mb) in ipoib_cm_send() 654 m_freem(tx_req->mb); in ipoib_cm_send() 658 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) { in ipoib_cm_send() [all …]
|
H A D | ipoib.h | 523 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max); 524 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
|
/freebsd/sys/ofed/drivers/infiniband/ulp/sdp/ |
H A D | sdp_tx.c | 68 struct sdp_buf *tx_req; in sdp_post_send() local 123 tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)]; in sdp_post_send() 124 tx_req->mb = mb; in sdp_post_send() 133 tx_req->mapping[i] = addr; in sdp_post_send() 144 if (unlikely(tx_req->mb->m_flags & M_URG)) in sdp_post_send() 152 sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE); in sdp_post_send() 155 m_freem(tx_req->mb); in sdp_post_send() 170 struct sdp_buf *tx_req; in sdp_send_completion() local 181 tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)]; in sdp_send_completion() 182 mb = tx_req->mb; in sdp_send_completion() [all …]
|
/freebsd/sys/contrib/dev/rtw89/ |
H A D | core.c | 483 struct rtw89_core_tx_request *tx_req, in rtw89_core_tx_update_ampdu_info() argument 486 struct ieee80211_sta *sta = tx_req->sta; in rtw89_core_tx_update_ampdu_info() 487 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_tx_update_ampdu_info() 488 struct sk_buff *skb = tx_req->skb; in rtw89_core_tx_update_ampdu_info() 520 struct rtw89_core_tx_request *tx_req) in rtw89_core_tx_update_sec_key() argument 527 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_tx_update_sec_key() 528 struct sk_buff *skb = tx_req->skb; in rtw89_core_tx_update_sec_key() 588 struct rtw89_core_tx_request *tx_req, in rtw89_core_get_mgmt_rate() argument 591 struct sk_buff *skb = tx_req->skb; in rtw89_core_get_mgmt_rate() 604 if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta) in rtw89_core_get_mgmt_rate() [all …]
|
H A D | pci.c | 1365 struct rtw89_core_tx_request *tx_req) in rtw89_pci_tx_write() 1369 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_pci_tx_write() 1373 struct sk_buff *skb = tx_req->skb; in rtw89_pci_tx_write() 1432 struct rtw89_core_tx_request *tx_req) in rtw89_pci_reset_trx_rings() 1436 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_pci_reset_trx_rings() 1440 struct sk_buff *skb = tx_req->skb; in rtw89_pci_reset_trx_rings() 1471 struct rtw89_core_tx_request *tx_req) in rtw89_pci_reset_trx_rings() 1482 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1491 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); in rtw89_pci_ops_reset() 1515 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, in rtw89_pci_enable_intr_lock() 1201 rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req) rtw89_pci_txwd_submit() argument 1269 rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req) rtw89_pci_fwcmd_submit() argument 1305 rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req) rtw89_pci_txbd_submit() argument 1346 rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch) rtw89_pci_tx_write() argument 1389 rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req) rtw89_pci_ops_tx_write() argument [all...] |
H A D | core.h | 3464 int (*tx_write)(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req); 5530 struct rtw89_core_tx_request *tx_req) 5532 return rtwdev->hci.ops->tx_write(rtwdev, tx_req); 4391 rtw89_hci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req) rtw89_hci_tx_write() argument
|