| /linux/drivers/infiniband/hw/bng_re/ |
| H A D | bng_res.c | 108 struct bng_re_hwq *hwq) in bng_re_free_hwq() argument 112 if (!hwq->max_elements) in bng_re_free_hwq() 114 if (hwq->level >= BNG_PBL_LVL_MAX) in bng_re_free_hwq() 117 for (i = 0; i < hwq->level + 1; i++) in bng_re_free_hwq() 118 bng_free_pbl(res, &hwq->pbl[i]); in bng_re_free_hwq() 120 hwq->level = BNG_PBL_LVL_MAX; in bng_re_free_hwq() 121 hwq->max_elements = 0; in bng_re_free_hwq() 122 hwq->element_size = 0; in bng_re_free_hwq() 123 hwq->prod = 0; in bng_re_free_hwq() 124 hwq->cons = 0; in bng_re_free_hwq() [all …]
|
| H A D | bng_res.h | 16 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) argument 17 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ argument 18 ((HWQ_CMP(hwq->prod, hwq)\ 19 - HWQ_CMP(hwq->cons, hwq))\ 20 & (hwq->max_elements - 1))) 46 struct bng_re_hwq *hwq; member 141 static inline void *bng_re_get_qe(struct bng_re_hwq *hwq, in bng_re_get_qe() argument 146 pg_num = (indx / hwq->qe_ppg); in bng_re_get_qe() 147 pg_idx = (indx % hwq->qe_ppg); in bng_re_get_qe() 149 *pg = (u64)&hwq->pbl_ptr[pg_num]; in bng_re_get_qe() [all …]
|
| H A D | bng_fw.c | 56 bng_re_free_hwq(rcfw->res, &rcfw->cmdq.hwq); in bng_re_free_rcfw_channel() 57 bng_re_free_hwq(rcfw->res, &rcfw->creq.hwq); in bng_re_free_rcfw_channel() 83 if (bng_re_alloc_init_hwq(&creq->hwq, &hwq_attr)) { in bng_re_alloc_fw_channel() 95 if (bng_re_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) { in bng_re_alloc_fw_channel() 101 rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements, in bng_re_alloc_fw_channel() 120 struct bng_re_hwq *hwq = &rcfw->cmdq.hwq; in bng_re_process_qp_event() local 144 spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING); in bng_re_process_qp_event() 155 spin_unlock(&hwq->lock); in bng_re_process_qp_event() 181 hwq->cons += req_size; in bng_re_process_qp_event() 183 spin_unlock(&hwq->lock); in bng_re_process_qp_event() [all …]
|
| H A D | bng_fw.h | 69 struct bng_re_hwq hwq; member 90 struct bng_re_hwq hwq; member
|
| H A D | bng_re.h | 31 struct bng_re_hwq hwq; member
|
| H A D | bng_dev.c | 354 rattr.dma_arr = creq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr; in bng_re_dev_init() 355 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; in bng_re_dev_init()
|
| /linux/drivers/ufs/core/ |
| H A D | ufs-mcq.c | 225 struct ufs_hw_queue *hwq; in ufshcd_mcq_memory_alloc() local 230 hwq = &hba->uhq[i]; in ufshcd_mcq_memory_alloc() 233 hwq->max_entries; in ufshcd_mcq_memory_alloc() 234 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, in ufshcd_mcq_memory_alloc() 235 &hwq->sqe_dma_addr, in ufshcd_mcq_memory_alloc() 237 if (!hwq->sqe_base_addr) { in ufshcd_mcq_memory_alloc() 242 cqe_size = sizeof(struct cq_entry) * hwq->max_entries; in ufshcd_mcq_memory_alloc() 243 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, in ufshcd_mcq_memory_alloc() 244 &hwq->cqe_dma_addr, in ufshcd_mcq_memory_alloc() 246 if (!hwq->cqe_base_addr) { in ufshcd_mcq_memory_alloc() [all …]
|
| H A D | ufshcd.c | 517 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); in ufshcd_add_command_trace() local 519 hwq_id = hwq->id; in ufshcd_add_command_trace() 2366 struct ufs_hw_queue *hwq) in ufshcd_send_command() argument 2390 spin_lock(&hwq->sq_lock); in ufshcd_send_command() 2391 dest = hwq->sqe_base_addr + hwq->sq_tail_slot; in ufshcd_send_command() 2393 ufshcd_inc_sq_tail(hwq); in ufshcd_send_command() 2394 spin_unlock(&hwq->sq_lock); in ufshcd_send_command() 3039 struct ufs_hw_queue *hwq = NULL; in ufshcd_queuecommand() local 3098 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_queuecommand() 3100 ufshcd_send_command(hba, cmd, hwq); in ufshcd_queuecommand() [all …]
|
| H A D | ufshcd-priv.h | 75 struct ufs_hw_queue *hwq);
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | qplib_fp.c | 80 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 88 dev_dbg(&rcq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 147 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp() 148 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp() 150 qp->rq.hwq.prod = 0; in bnxt_qplib_clean_qp() 151 qp->rq.hwq.cons = 0; in bnxt_qplib_clean_qp() 241 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() local 247 spin_lock_bh(&hwq->lock); in clean_nq() 250 nq_ptr = (struct nq_base **)hwq->pbl_ptr; in clean_nq() 251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)]; in clean_nq() [all …]
|
| H A D | qplib_res.c | 151 struct bnxt_qplib_hwq *hwq) in bnxt_qplib_free_hwq() argument 155 if (!hwq->max_elements) in bnxt_qplib_free_hwq() 157 if (hwq->level >= PBL_LVL_MAX) in bnxt_qplib_free_hwq() 160 for (i = 0; i < hwq->level + 1; i++) { in bnxt_qplib_free_hwq() 161 if (i == hwq->level) in bnxt_qplib_free_hwq() 162 __free_pbl(res, &hwq->pbl[i], hwq->is_user); in bnxt_qplib_free_hwq() 164 __free_pbl(res, &hwq->pbl[i], false); in bnxt_qplib_free_hwq() 167 hwq->level = PBL_LVL_MAX; in bnxt_qplib_free_hwq() 168 hwq->max_elements = 0; in bnxt_qplib_free_hwq() 169 hwq->element_size = 0; in bnxt_qplib_free_hwq() [all …]
|
| H A D | qplib_rcfw.c | 239 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message_no_waiter() local 264 sw_prod = HWQ_CMP(hwq->prod, hwq); in __send_message_no_waiter() 265 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL); in __send_message_no_waiter() 271 hwq->prod++; in __send_message_no_waiter() 275 cmdq_prod = hwq->prod; in __send_message_no_waiter() 290 struct bnxt_qplib_hwq *hwq; in __send_message() local 297 hwq = &cmdq->hwq; in __send_message() 303 spin_lock_bh(&hwq->lock); in __send_message() 305 free_slots = HWQ_FREE_SLOTS(hwq); in __send_message() 313 spin_unlock_bh(&hwq->lock); in __send_message() [all …]
|
| H A D | qplib_fp.h | 101 struct bnxt_qplib_hwq hwq; member 249 struct bnxt_qplib_hwq hwq; member 366 static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq) in __bnxt_qplib_get_avail() argument 370 cons = hwq->cons; in __bnxt_qplib_get_avail() 371 prod = hwq->prod; in __bnxt_qplib_get_avail() 374 avail += hwq->depth; in __bnxt_qplib_get_avail() 381 struct bnxt_qplib_hwq *hwq; in bnxt_qplib_queue_full() local 384 hwq = &que->hwq; in bnxt_qplib_queue_full() 386 avail = hwq->cons - hwq->prod; in bnxt_qplib_queue_full() 387 if (hwq->cons <= hwq->prod) in bnxt_qplib_queue_full() [all …]
|
| H A D | qplib_sp.c | 506 if (mrw->hwq.max_elements) in bnxt_qplib_free_mrw() 507 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_free_mrw() 571 if (mrw->hwq.max_elements) { in bnxt_qplib_dereg_mrw() 574 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_dereg_mrw() 599 if (mr->hwq.max_elements) in bnxt_qplib_reg_mr() 600 bnxt_qplib_free_hwq(res, &mr->hwq); in bnxt_qplib_reg_mr() 610 rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); in bnxt_qplib_reg_mr() 623 if (mr->hwq.level == PBL_LVL_MAX) { in bnxt_qplib_reg_mr() 629 level = mr->hwq.level; in bnxt_qplib_reg_mr() 630 req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_reg_mr() [all …]
|
| H A D | qplib_sp.h | 124 struct bnxt_qplib_hwq hwq; member 129 struct bnxt_qplib_hwq hwq; member
|
| H A D | main.c | 1087 mr_hwq = &mr->qplib_mr.hwq; in bnxt_re_fill_res_mr_entry() 1145 cq_hwq = &cq->qplib_cq.hwq; in bnxt_re_fill_res_cq_entry() 1845 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; in bnxt_re_alloc_res() 1853 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_alloc_res() 1854 rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res() 2249 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_dev_init() 2250 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; in bnxt_re_dev_init()
|
| H A D | ib_verbs.c | 2732 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; in bnxt_re_build_reg_wqe() 2733 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; in bnxt_re_build_reg_wqe() 2736 wqe->frmr.levels = qplib_frpl->hwq.level; in bnxt_re_build_reg_wqe() 3212 resp.tail = cq->qplib_cq.hwq.cons; in bnxt_re_create_cq() 3819 lib_qp->id, lib_qp->sq.hwq.prod, in send_phantom_wqe() 3820 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), in send_phantom_wqe() 4023 mr->qplib_mr.hwq.level = PBL_LVL_MAX; in bnxt_re_get_dma_mr()
|
| /linux/drivers/scsi/fnic/ |
| H A D | fnic_scsi.c | 189 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) in free_wq_copy_descs() argument 192 if (!fnic->fw_ack_recd[hwq]) in free_wq_copy_descs() 199 if (wq->to_clean_index <= fnic->fw_ack_index[hwq]) in free_wq_copy_descs() 200 wq->ring.desc_avail += (fnic->fw_ack_index[hwq] in free_wq_copy_descs() 205 + fnic->fw_ack_index[hwq] + 1); in free_wq_copy_descs() 213 (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count; in free_wq_copy_descs() 216 fnic->fw_ack_recd[hwq] = 0; in free_wq_copy_descs() 361 uint16_t hwq) in fnic_queue_wq_copy_desc() argument 412 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_wq_copy_desc() 413 free_wq_copy_descs(fnic, wq, hwq); in fnic_queue_wq_copy_desc() [all …]
|
| H A D | fnic_main.c | 586 int hwq; in fnic_free_ioreq_tables_mq() local 588 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) in fnic_free_ioreq_tables_mq() 589 kfree(fnic->sw_copy_wq[hwq].io_req_table); in fnic_free_ioreq_tables_mq() 598 int hwq; in fnic_scsi_drv_init() local 619 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { in fnic_scsi_drv_init() 620 fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; in fnic_scsi_drv_init() 621 fnic->sw_copy_wq[hwq].io_req_table = in fnic_scsi_drv_init() 622 kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * in fnic_scsi_drv_init() 625 if (!fnic->sw_copy_wq[hwq].io_req_table) { in fnic_scsi_drv_init()
|
| H A D | fnic.h | 549 int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); 552 unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
|
| /linux/include/scsi/ |
| H A D | scsi_tcq.h | 26 u16 hwq; in scsi_host_find_tag() local 31 hwq = blk_mq_unique_tag_to_hwq(tag); in scsi_host_find_tag() 32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag() 33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
|
| /linux/drivers/net/wireless/ti/wlcore/ |
| H A D | tx.c | 1200 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_stop_queue_locked() local 1201 bool stopped = !!wl->queue_stop_reasons[hwq]; in wlcore_stop_queue_locked() 1204 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_stop_queue_locked() 1209 ieee80211_stop_queue(wl->hw, hwq); in wlcore_stop_queue_locked() 1226 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_wake_queue() local 1231 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_wake_queue() 1233 if (wl->queue_stop_reasons[hwq]) in wlcore_wake_queue() 1236 ieee80211_wake_queue(wl->hw, hwq); in wlcore_wake_queue() 1303 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_is_queue_stopped_by_reason_locked() local 1306 return test_bit(reason, &wl->queue_stop_reasons[hwq]); in wlcore_is_queue_stopped_by_reason_locked() [all …]
|
| /linux/drivers/scsi/ |
| H A D | virtio_scsi.c | 559 u16 hwq = blk_mq_unique_tag_to_hwq(tag); in virtscsi_pick_vq_mq() local 561 return &vscsi->req_vqs[hwq]; in virtscsi_pick_vq_mq() 773 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) in virtscsi_commit_rqs() argument 777 virtscsi_kick_vq(&vscsi->req_vqs[hwq]); in virtscsi_commit_rqs()
|
| /linux/drivers/net/wireless/intel/iwlegacy/ |
| H A D | common.h | 2257 il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) in il_set_swq_id() argument 2260 BUG_ON(hwq > 31); /* only use 5 bits */ in il_set_swq_id() 2262 txq->swq_id = (hwq << 2) | ac; in il_set_swq_id() 2283 u8 hwq = (queue >> 2) & 0x1f; in il_wake_queue() local 2285 if (test_and_clear_bit(hwq, il->queue_stopped)) in il_wake_queue() 2294 u8 hwq = (queue >> 2) & 0x1f; in il_stop_queue() local 2296 if (!test_and_set_bit(hwq, il->queue_stopped)) in il_stop_queue()
|
| /linux/drivers/block/ |
| H A D | nbd.c | 879 u16 hwq; in nbd_handle_reply() local 885 hwq = blk_mq_unique_tag_to_hwq(tag); in nbd_handle_reply() 886 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_handle_reply() 887 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_handle_reply()
|