Home
last modified time | relevance | path

Searched refs:hwq (Results 1 – 25 of 28) sorted by relevance

12

/linux/drivers/ufs/core/
H A Dufs-mcq.c234 struct ufs_hw_queue *hwq; in ufshcd_mcq_memory_alloc() local
239 hwq = &hba->uhq[i]; in ufshcd_mcq_memory_alloc()
242 hwq->max_entries; in ufshcd_mcq_memory_alloc()
243 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, in ufshcd_mcq_memory_alloc()
244 &hwq->sqe_dma_addr, in ufshcd_mcq_memory_alloc()
246 if (!hwq->sqe_dma_addr) { in ufshcd_mcq_memory_alloc()
251 cqe_size = sizeof(struct cq_entry) * hwq->max_entries; in ufshcd_mcq_memory_alloc()
252 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, in ufshcd_mcq_memory_alloc()
253 &hwq->cqe_dma_addr, in ufshcd_mcq_memory_alloc()
255 if (!hwq->cqe_dma_addr) { in ufshcd_mcq_memory_alloc()
[all …]
H A Dufshcd.c470 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); in ufshcd_add_command_trace() local
472 hwq_id = hwq->id; in ufshcd_add_command_trace()
2270 struct ufs_hw_queue *hwq) in ufshcd_send_command() argument
2290 spin_lock(&hwq->sq_lock); in ufshcd_send_command()
2291 dest = hwq->sqe_base_addr + hwq->sq_tail_slot; in ufshcd_send_command()
2293 ufshcd_inc_sq_tail(hwq); in ufshcd_send_command()
2294 spin_unlock(&hwq->sq_lock); in ufshcd_send_command()
2939 struct ufs_hw_queue *hwq = NULL; in ufshcd_queuecommand() local
2999 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_queuecommand()
3001 ufshcd_send_command(hba, tag, hwq); in ufshcd_queuecommand()
[all …]
H A Dufshcd-priv.h73 struct ufs_hw_queue *hwq);
/linux/drivers/scsi/cxlflash/
H A Dmain.c164 struct hwq *hwq = get_hwq(afu, cmd->hwq_index); in cmd_complete() local
166 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); in cmd_complete()
168 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); in cmd_complete()
196 static void flush_pending_cmds(struct hwq *hwq) in flush_pending_cmds() argument
198 struct cxlflash_cfg *cfg = hwq->afu->parent; in flush_pending_cmds()
203 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { in flush_pending_cmds()
240 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) in context_reset() argument
242 struct cxlflash_cfg *cfg = hwq->afu->parent; in context_reset()
249 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); in context_reset()
251 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); in context_reset()
[all …]
H A Dcommon.h196 struct hwq { struct
231 struct hwq hwqs[CXLFLASH_MAX_HWQS]; argument
233 int (*context_reset)(struct hwq *hwq);
255 static inline struct hwq *get_hwq(struct afu *afu, u32 index) in get_hwq()
H A Dsuperpipe.c267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in afu_attach() local
291 val = hwq->ctx_hndl; in afu_attach()
298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); in afu_attach()
1658 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in cxlflash_afu_recover() local
1735 reg = readq_be(&hwq->ctrl_map->mbox_r); in cxlflash_afu_recover()
/linux/drivers/infiniband/hw/bnxt_re/
H A Dqplib_res.h98 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) argument
100 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ argument
101 ((HWQ_CMP(hwq->prod, hwq)\
102 - HWQ_CMP(hwq->cons, hwq))\
103 & (hwq->max_elements - 1)))
201 struct bnxt_qplib_hwq *hwq; member
352 static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq) in bnxt_qplib_base_pg_size() argument
357 pbl = &hwq->pbl[PBL_LVL_0]; in bnxt_qplib_base_pg_size()
384 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq, in bnxt_qplib_get_qe() argument
389 pg_num = (indx / hwq->qe_ppg); in bnxt_qplib_get_qe()
[all …]
H A Dqplib_fp.c80 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp()
88 dev_dbg(&rcq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp()
147 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp()
148 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp()
150 qp->rq.hwq.prod = 0; in bnxt_qplib_clean_qp()
151 qp->rq.hwq.cons = 0; in bnxt_qplib_clean_qp()
241 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() local
247 spin_lock_bh(&hwq->lock); in clean_nq()
250 nq_ptr = (struct nq_base **)hwq->pbl_ptr; in clean_nq()
251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)]; in clean_nq()
[all …]
H A Dqplib_rcfw.c239 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message_no_waiter() local
264 sw_prod = HWQ_CMP(hwq->prod, hwq); in __send_message_no_waiter()
265 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL); in __send_message_no_waiter()
271 hwq->prod++; in __send_message_no_waiter()
275 cmdq_prod = hwq->prod; in __send_message_no_waiter()
290 struct bnxt_qplib_hwq *hwq; in __send_message() local
297 hwq = &cmdq->hwq; in __send_message()
303 spin_lock_bh(&hwq->lock); in __send_message()
305 free_slots = HWQ_FREE_SLOTS(hwq); in __send_message()
313 spin_unlock_bh(&hwq->lock); in __send_message()
[all …]
H A Dqplib_fp.h101 struct bnxt_qplib_hwq hwq; member
249 struct bnxt_qplib_hwq hwq; member
361 static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq) in __bnxt_qplib_get_avail() argument
365 cons = hwq->cons; in __bnxt_qplib_get_avail()
366 prod = hwq->prod; in __bnxt_qplib_get_avail()
369 avail += hwq->depth; in __bnxt_qplib_get_avail()
376 struct bnxt_qplib_hwq *hwq; in bnxt_qplib_queue_full() local
379 hwq = &que->hwq; in bnxt_qplib_queue_full()
381 avail = hwq->cons - hwq->prod; in bnxt_qplib_queue_full()
382 if (hwq->cons <= hwq->prod) in bnxt_qplib_queue_full()
[all …]
H A Dqplib_sp.c540 if (mrw->hwq.max_elements) in bnxt_qplib_free_mrw()
541 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_free_mrw()
605 if (mrw->hwq.max_elements) { in bnxt_qplib_dereg_mrw()
608 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_dereg_mrw()
633 if (mr->hwq.max_elements) in bnxt_qplib_reg_mr()
634 bnxt_qplib_free_hwq(res, &mr->hwq); in bnxt_qplib_reg_mr()
644 rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); in bnxt_qplib_reg_mr()
657 if (mr->hwq.level == PBL_LVL_MAX) { in bnxt_qplib_reg_mr()
663 level = mr->hwq.level; in bnxt_qplib_reg_mr()
664 req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_reg_mr()
[all …]
H A Dmain.c1013 mr_hwq = &mr->qplib_mr.hwq; in bnxt_re_fill_res_mr_entry()
1071 cq_hwq = &cq->qplib_cq.hwq; in bnxt_re_fill_res_cq_entry()
1766 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; in bnxt_re_alloc_res()
1774 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_alloc_res()
1775 rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res()
2143 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_dev_init()
2144 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; in bnxt_re_dev_init()
H A Dib_verbs.c2693 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; in bnxt_re_build_reg_wqe()
2694 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; in bnxt_re_build_reg_wqe()
2697 wqe->frmr.levels = qplib_frpl->hwq.level; in bnxt_re_build_reg_wqe()
3179 resp.tail = cq->qplib_cq.hwq.cons; in bnxt_re_create_cq()
3786 lib_qp->id, lib_qp->sq.hwq.prod, in send_phantom_wqe()
3787 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), in send_phantom_wqe()
3990 mr->qplib_mr.hwq.level = PBL_LVL_MAX; in bnxt_re_get_dma_mr()
/linux/drivers/scsi/fnic/
H A Dfnic_scsi.c189 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) in free_wq_copy_descs() argument
192 if (!fnic->fw_ack_recd[hwq]) in free_wq_copy_descs()
199 if (wq->to_clean_index <= fnic->fw_ack_index[hwq]) in free_wq_copy_descs()
200 wq->ring.desc_avail += (fnic->fw_ack_index[hwq] in free_wq_copy_descs()
205 + fnic->fw_ack_index[hwq] + 1); in free_wq_copy_descs()
213 (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count; in free_wq_copy_descs()
216 fnic->fw_ack_recd[hwq] = 0; in free_wq_copy_descs()
361 uint16_t hwq) in fnic_queue_wq_copy_desc() argument
412 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_wq_copy_desc()
413 free_wq_copy_descs(fnic, wq, hwq); in fnic_queue_wq_copy_desc()
[all …]
H A Dfnic_main.c586 int hwq; in fnic_free_ioreq_tables_mq() local
588 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) in fnic_free_ioreq_tables_mq()
589 kfree(fnic->sw_copy_wq[hwq].io_req_table); in fnic_free_ioreq_tables_mq()
598 int hwq; in fnic_scsi_drv_init() local
619 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { in fnic_scsi_drv_init()
620 fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; in fnic_scsi_drv_init()
621 fnic->sw_copy_wq[hwq].io_req_table = in fnic_scsi_drv_init()
622 kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * in fnic_scsi_drv_init()
625 if (!fnic->sw_copy_wq[hwq].io_req_table) { in fnic_scsi_drv_init()
H A Dfnic.h551 int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid);
554 unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
/linux/include/scsi/
H A Dscsi_tcq.h26 u16 hwq; in scsi_host_find_tag() local
31 hwq = blk_mq_unique_tag_to_hwq(tag); in scsi_host_find_tag()
32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag()
33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
/linux/drivers/net/wireless/mediatek/mt76/mt7603/
H A Ddma.c32 u8 qid, tid = 0, hwq = 0; in mt7603_rx_loopback_skb() local
56 hwq = wmm_queue_map[IEEE80211_AC_BE]; in mt7603_rx_loopback_skb()
61 hwq = wmm_queue_map[qid]; in mt7603_rx_loopback_skb()
65 hwq = wmm_queue_map[IEEE80211_AC_BE]; in mt7603_rx_loopback_skb()
72 hwq = MT_TX_HW_QUEUE_MGMT; in mt7603_rx_loopback_skb()
79 val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq); in mt7603_rx_loopback_skb()
/linux/drivers/net/wireless/ti/wlcore/
H A Dtx.c1196 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_stop_queue_locked() local
1197 bool stopped = !!wl->queue_stop_reasons[hwq]; in wlcore_stop_queue_locked()
1200 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_stop_queue_locked()
1205 ieee80211_stop_queue(wl->hw, hwq); in wlcore_stop_queue_locked()
1222 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_wake_queue() local
1227 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_wake_queue()
1229 if (wl->queue_stop_reasons[hwq]) in wlcore_wake_queue()
1232 ieee80211_wake_queue(wl->hw, hwq); in wlcore_wake_queue()
1299 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_is_queue_stopped_by_reason_locked() local
1302 return test_bit(reason, &wl->queue_stop_reasons[hwq]); in wlcore_is_queue_stopped_by_reason_locked()
[all …]
/linux/drivers/scsi/
H A Dvirtio_scsi.c559 u16 hwq = blk_mq_unique_tag_to_hwq(tag); in virtscsi_pick_vq_mq() local
561 return &vscsi->req_vqs[hwq]; in virtscsi_pick_vq_mq()
773 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) in virtscsi_commit_rqs() argument
777 virtscsi_kick_vq(&vscsi->req_vqs[hwq]); in virtscsi_commit_rqs()
/linux/drivers/ufs/host/
H A Dufs-mediatek.c1710 struct ufs_hw_queue *hwq; in ufs_mtk_mcq_intr() local
1714 hwq = &hba->uhq[qid]; in ufs_mtk_mcq_intr()
1721 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufs_mtk_mcq_intr()
H A Dufs-qcom.c1791 struct ufs_hw_queue *hwq = &hba->uhq[id]; in ufs_qcom_mcq_esi_handler() local
1794 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufs_qcom_mcq_esi_handler()
/linux/drivers/scsi/ibmvscsi/
H A Dibmvfc.h769 u16 hwq; member
H A Dibmvfc.c1602 evt->hwq = 0; in ibmvfc_init_event()
1971 u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq); in ibmvfc_queuecommand() local
1984 scsi_channel = hwq % vhost->scsi_scrqs.active_queues; in ibmvfc_queuecommand()
1989 evt->hwq = hwq % vhost->scsi_scrqs.active_queues; in ibmvfc_queuecommand()
/linux/include/ufs/
H A Dufshcd.h1329 struct ufs_hw_queue *hwq);

12