/linux/drivers/gpu/drm/nouveau/nvkm/falcon/ |
H A D | cmdq.c | 26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) in nvkm_falcon_cmdq_has_room() argument 28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room() 29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room() 35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room() 40 head = cmdq->offset; in nvkm_falcon_cmdq_has_room() 51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) in nvkm_falcon_cmdq_push() argument 53 struct nvkm_falcon *falcon = cmdq->qmgr->falcon; in nvkm_falcon_cmdq_push() 54 nvkm_falcon_pio_wr(falcon, data, 0, 0, DMEM, cmdq->position, size, 0, false); in nvkm_falcon_cmdq_push() 55 cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); in nvkm_falcon_cmdq_push() 59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) in nvkm_falcon_cmdq_rewind() argument [all …]
|
/linux/drivers/net/ethernet/brocade/bna/ |
H A D | bfa_msgq.c | 31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq); 32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq); 43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event); 44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event); 45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event); 46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq, 50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) in cmdq_sm_stopped_entry() argument 54 cmdq->producer_index = 0; in cmdq_sm_stopped_entry() 55 cmdq->consumer_index = 0; in cmdq_sm_stopped_entry() 56 cmdq->flags = 0; in cmdq_sm_stopped_entry() [all …]
|
/linux/drivers/accel/ivpu/ |
H A D | ivpu_job.c | 29 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) in ivpu_cmdq_ring_db() argument 31 ivpu_hw_db_set(vdev, cmdq->db_id); in ivpu_cmdq_ring_db() 35 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) in ivpu_preemption_buffers_create() argument 44 cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, in ivpu_preemption_buffers_create() 46 if (!cmdq->primary_preempt_buf) { in ivpu_preemption_buffers_create() 51 cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, in ivpu_preemption_buffers_create() 53 if (!cmdq->secondary_preempt_buf) { in ivpu_preemption_buffers_create() 61 ivpu_bo_free(cmdq->primary_preempt_buf); in ivpu_preemption_buffers_create() 62 cmdq->primary_preempt_buf = NULL; in ivpu_preemption_buffers_create() 67 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) in ivpu_preemption_buffers_free() argument [all …]
|
/linux/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_cmdq.c | 78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument 79 struct hinic_cmdqs, cmdq[0]) 320 static void cmdq_set_db(struct hinic_cmdq *cmdq, in cmdq_set_db() argument 332 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); in cmdq_set_db() 335 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, in cmdq_sync_cmd_direct_resp() argument 343 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp() 348 spin_lock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp() 353 spin_unlock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp() 359 wrapped = cmdq->wrapped; in cmdq_sync_cmd_direct_resp() 364 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp() [all …]
|
H A D | hinic_hw_io.c | 119 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_sq_ctxts() 163 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_rq_ctxts() 220 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in hinic_clean_queue_offload_ctxt() 533 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local 565 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init() 568 dev_err(&pdev->dev, "Failed to get cmdq db area\n"); in hinic_io_init() 573 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init() 600 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init() 619 enum hinic_cmdq_type cmdq; in hinic_io_free() local 628 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free() [all …]
|
H A D | hinic_hw_wq.c | 162 * cmdq_allocate_page - allocate page for cmdq 163 * @cmdq_pages: the pages of the cmdq queue struct to hold the page 176 * cmdq_free_page - free page from cmdq 177 * @cmdq_pages: the pages of the cmdq queue struct that hold the page 588 * @cmdq_pages: will hold the pages of the cmdq 591 * @cmdq_blocks: number of cmdq blocks/wq to allocate 638 dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); in hinic_wqs_cmdq_alloc() 662 dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); in hinic_wqs_cmdq_alloc() 684 * @cmdq_pages: hold the pages of the cmdq
|
/linux/drivers/crypto/cavium/nitrox/ |
H A D | nitrox_reqmgr.c | 230 struct nitrox_cmdq *cmdq) in backlog_list_add() argument 234 spin_lock_bh(&cmdq->backlog_qlock); in backlog_list_add() 235 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add() 236 atomic_inc(&cmdq->backlog_count); in backlog_list_add() 238 spin_unlock_bh(&cmdq->backlog_qlock); in backlog_list_add() 242 struct nitrox_cmdq *cmdq) in response_list_add() argument 246 spin_lock_bh(&cmdq->resp_qlock); in response_list_add() 247 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add() 248 spin_unlock_bh(&cmdq->resp_qlock); in response_list_add() 252 struct nitrox_cmdq *cmdq) in response_list_del() argument [all …]
|
H A D | nitrox_isr.c | 32 struct nitrox_cmdq *cmdq = qvec->cmdq; in nps_pkt_slc_isr() local 34 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); in nps_pkt_slc_isr() 337 qvec->cmdq = &ndev->pkt_inq[qvec->ring]; in nitrox_register_interrupts()
|
/linux/drivers/net/ethernet/hisilicon/hns3/hns3_common/ |
H A D | hclge_comm_cmd.c | 492 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, in hclge_comm_cmd_send() 541 struct hclge_comm_cmq *cmdq = &hw->cmq; in hclge_comm_cmd_uninit() local 550 spin_lock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit() 551 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_uninit() 553 spin_unlock(&cmdq->crq.lock); in hclge_comm_cmd_uninit() 554 spin_unlock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit() 556 hclge_comm_free_cmd_desc(&cmdq->csq); in hclge_comm_cmd_uninit() 557 hclge_comm_free_cmd_desc(&cmdq->crq); in hclge_comm_cmd_uninit() 563 struct hclge_comm_cmq *cmdq = &hw->cmq; in hclge_comm_cmd_queue_init() local 567 spin_lock_init(&cmdq->csq.lock); in hclge_comm_cmd_queue_init() [all …]
|
/linux/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 374 struct arm_smmu_cmdq *cmdq = NULL; in arm_smmu_get_cmdq() local 377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); in arm_smmu_get_cmdq() 379 return cmdq ?: &smmu->cmdq; in arm_smmu_get_cmdq() 383 struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_needs_busy_polling() argument 385 if (cmdq == &smmu->cmdq) in arm_smmu_cmdq_needs_busy_polling() 392 struct arm_smmu_cmdq *cmdq, u32 prod) in arm_smmu_cmdq_build_sync_cmd() argument 394 struct arm_smmu_queue *q = &cmdq->q; in arm_smmu_cmdq_build_sync_cmd() 409 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) in arm_smmu_cmdq_build_sync_cmd() 414 struct arm_smmu_cmdq *cmdq) in __arm_smmu_cmdq_skip_err() argument 422 struct arm_smmu_queue *q = &cmdq->q; in __arm_smmu_cmdq_skip_err() [all …]
|
H A D | tegra241-cmdqv.c | 115 "This allows to disable CMDQV HW and use default SMMU internal CMDQ."); 131 * @cmdq: Command Queue struct 146 struct arm_smmu_cmdq cmdq; member 201 * struct tegra241_cmdqv - CMDQ-V for SMMUv3 326 __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq); in tegra241_vintf0_handle_error() 397 /* Use SMMU CMDQ if VINTF0 is uninitialized */ in tegra241_cmdqv_get_cmdq() 403 * balance out traffic on cmdq issuing: each cmdq has its own in tegra241_cmdqv_get_cmdq() 404 * lock, if all cpus issue cmdlist using the same cmdq, only in tegra241_cmdqv_get_cmdq() 413 /* Unsupported CMD goes for smmu->cmdq pathway */ in tegra241_cmdqv_get_cmdq() 414 if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) in tegra241_cmdqv_get_cmdq() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ |
H A D | base.c | 45 struct nvkm_falcon_cmdq *cmdq = sec2->cmdq; in nvkm_sec2_fini() local 56 ret = nvkm_falcon_cmdq_send(cmdq, &cmd, nvkm_sec2_finimsg, sec2, in nvkm_sec2_fini() 68 nvkm_falcon_cmdq_fini(cmdq); in nvkm_sec2_fini() 119 nvkm_falcon_cmdq_del(&sec2->cmdq); in nvkm_sec2_dtor() 159 (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || in nvkm_sec2_new_()
|
H A D | ga102.c | 50 nvkm_falcon_cmdq_init(sec2->cmdq, msg.queue_info[i].index, in ga102_sec2_initmsg() 104 return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, in ga102_sec2_acr_bootstrap_falcon() 137 .cmdq = { 0xc00, 0xc04, 8 },
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ |
H A D | rpc.c | 80 * GSP consumes the elements from the cmdq and always writes the result 369 msg->sequence = gsp->cmdq.seq++; in r535_gsp_cmdq_push() 377 wptr = *gsp->cmdq.wptr; in r535_gsp_cmdq_push() 380 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; in r535_gsp_cmdq_push() 381 if (free >= gsp->cmdq.cnt) in r535_gsp_cmdq_push() 382 free -= gsp->cmdq.cnt; in r535_gsp_cmdq_push() 394 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); in r535_gsp_cmdq_push() 395 step = min_t(u32, free, (gsp->cmdq.cnt - wptr)); in r535_gsp_cmdq_push() 401 if (wptr == gsp->cmdq.cnt) in r535_gsp_cmdq_push() 408 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); in r535_gsp_cmdq_push() [all …]
|
/linux/drivers/net/ethernet/chelsio/cxgb/ |
H A D | sge.c | 168 struct cmdQ { struct 182 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ argument 208 /* Bit flags for cmdQ.status */ 265 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; member 474 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb() 612 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) in free_cmdQ_buffers() 653 struct cmdQ *q = &sge->cmdQ[i]; in free_tx_resources() 677 struct cmdQ *q = &sge->cmdQ[i]; in alloc_tx_resources() 706 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * in alloc_tx_resources() 750 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, in configure_sge() [all …]
|
/linux/drivers/iommu/arm/ |
H A D | Kconfig | 123 bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3" 126 Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The 127 CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues 131 CMDQ-V extension.
|
/linux/include/dt-bindings/gce/ |
H A D | mt8186-gce.h | 82 /* CMDQ: debug */ 85 /* CMDQ: P7: debug */ 348 /* CMDQ sw tokens 367 /* Notify normal CMDQ there are some secure task done 372 /* CMDQ use sw token */ 419 /* CMDQ sw tokens END */
|
/linux/Documentation/devicetree/bindings/mailbox/ |
H A D | mediatek,gce-props.yaml | 15 (CMDQ) mailbox driver is a driver for GCE, implemented using the Linux 18 We use mediatek,gce-mailbox.yaml to define the properties for CMDQ mailbox 19 driver. A device driver that uses the CMDQ driver to configure its hardware
|
/linux/drivers/gpu/drm/mediatek/ |
H A D | mtk_padding.c | 12 #include <linux/soc/mediatek/mtk-cmdq.h> 30 * @cmdq_reg: CMDQ setting of the Padding 33 * CMDQ settings, we stored these differences all together.
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_trace.h | 175 DECLARE_EVENT_CLASS(cmdq, 195 TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n", 201 DEFINE_EVENT(cmdq, hns_cmdq_req, 205 DEFINE_EVENT(cmdq, hns_cmdq_resp,
|
/linux/drivers/atm/ |
H A D | fore200e.c | 553 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_pca_prom_read() local 554 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_pca_prom_read() 559 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_pca_prom_read() 1222 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_activate_vcin() local 1223 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_activate_vcin() 1230 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_activate_vcin() 1666 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_getstats() local 1667 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_getstats() 1683 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_getstats() 1712 struct host_cmdq* cmdq = &fore200e->host_cmdq; [all …]
|
/linux/include/linux/soc/mediatek/ |
H A D | mtk-mmsys.h | 10 #include <linux/mailbox/mtk-cmdq-mailbox.h> 11 #include <linux/soc/mediatek/mtk-cmdq.h>
|
/linux/Documentation/devicetree/bindings/iommu/ |
H A D | arm,smmu-v3.yaml | 45 - cmdq-sync # CMD_SYNC complete 91 interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
|
/linux/drivers/mmc/core/ |
H A D | debugfs.c | 261 [MMC_ERR_CMDQ_RED] = "CMDQ RED Errors", in mmc_err_stats_show() 262 [MMC_ERR_CMDQ_GCE] = "CMDQ GCE Errors", in mmc_err_stats_show() 263 [MMC_ERR_CMDQ_ICCE] = "CMDQ ICCE Errors", in mmc_err_stats_show() 265 [MMC_ERR_CMDQ_REQ_TIMEOUT] = "CMDQ Request Timedout", in mmc_err_stats_show()
|
/linux/include/linux/qed/ |
H A D | storage_common.h | 73 /* Scsi Drv CMDQ struct */ 89 /* SCSI RQ/CQ/CMDQ firmware function init parameters */
|