Lines Matching +full:mt8173 +full:- +full:gce

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
18 #include <linux/mailbox/mtk-cmdq-mailbox.h>
24 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
97 /* Convert DMA addr (PA or IOVA) to GCE readable addr */ in cmdq_convert_gce_addr()
98 return addr >> pdata->shift; in cmdq_convert_gce_addr()
103 /* Revert GCE readable addr to DMA addr (PA or IOVA) */ in cmdq_revert_gce_addr()
104 return (dma_addr_t)addr << pdata->shift; in cmdq_revert_gce_addr()
109 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa()
111 return cmdq->pdata->shift; in cmdq_get_shift_pa()
117 u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; in cmdq_gctl_value_toggle()
119 if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) in cmdq_gctl_value_toggle()
122 if (cmdq->pdata->sw_ddr_en && ddr_enable) in cmdq_gctl_value_toggle()
125 writel(val, cmdq->base + GCE_GCTL_VALUE); in cmdq_gctl_value_toggle()
132 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); in cmdq_thread_suspend()
135 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) in cmdq_thread_suspend()
138 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, in cmdq_thread_suspend()
140 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", in cmdq_thread_suspend()
141 (u32)(thread->base - cmdq->base)); in cmdq_thread_suspend()
142 return -EFAULT; in cmdq_thread_suspend()
150 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); in cmdq_thread_resume()
157 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); in cmdq_init()
161 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); in cmdq_init()
163 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); in cmdq_init()
164 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); in cmdq_init()
171 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET); in cmdq_thread_reset()
172 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET, in cmdq_thread_reset()
175 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n", in cmdq_thread_reset()
176 (u32)(thread->base - cmdq->base)); in cmdq_thread_reset()
177 return -EFAULT; in cmdq_thread_reset()
186 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK); in cmdq_thread_disable()
189 /* notify GCE to re-fetch commands by setting GCE thread PC */
192 writel(readl(thread->base + CMDQ_THR_CURR_ADDR), in cmdq_thread_invalidate_fetched_data()
193 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_thread_invalidate_fetched_data()
198 struct device *dev = task->cmdq->mbox.dev; in cmdq_task_insert_into_thread()
199 struct cmdq_thread *thread = task->thread; in cmdq_task_insert_into_thread()
201 &thread->task_busy_list, typeof(*task), list_entry); in cmdq_task_insert_into_thread()
202 u64 *prev_task_base = prev_task->pkt->va_base; in cmdq_task_insert_into_thread()
203 u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata); in cmdq_task_insert_into_thread()
206 dma_sync_single_for_cpu(dev, prev_task->pa_base, in cmdq_task_insert_into_thread()
207 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); in cmdq_task_insert_into_thread()
208 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr; in cmdq_task_insert_into_thread()
209 dma_sync_single_for_device(dev, prev_task->pa_base, in cmdq_task_insert_into_thread()
210 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); in cmdq_task_insert_into_thread()
217 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; in cmdq_thread_is_in_wfe()
225 data.pkt = task->pkt; in cmdq_task_exec_done()
226 mbox_chan_received_data(task->thread->chan, &data); in cmdq_task_exec_done()
228 list_del(&task->list_entry); in cmdq_task_exec_done()
233 struct cmdq_thread *thread = task->thread; in cmdq_task_handle_error()
235 struct cmdq *cmdq = task->cmdq; in cmdq_task_handle_error()
237 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); in cmdq_task_handle_error()
239 next_task = list_first_entry_or_null(&thread->task_busy_list, in cmdq_task_handle_error()
242 writel(next_task->pa_base >> cmdq->pdata->shift, in cmdq_task_handle_error()
243 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_task_handle_error()
255 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); in cmdq_thread_irq_handler()
256 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS); in cmdq_thread_irq_handler()
261 * reset / disable this GCE thread, so we need to check the enable in cmdq_thread_irq_handler()
262 * bit of this GCE thread. in cmdq_thread_irq_handler()
264 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) in cmdq_thread_irq_handler()
274 gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); in cmdq_thread_irq_handler()
275 curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); in cmdq_thread_irq_handler()
277 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, in cmdq_thread_irq_handler()
279 task_end_pa = task->pa_base + task->pkt->cmd_buf_size; in cmdq_thread_irq_handler()
280 if (curr_pa >= task->pa_base && curr_pa < task_end_pa) in cmdq_thread_irq_handler()
283 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { in cmdq_thread_irq_handler()
287 cmdq_task_exec_done(task, -ENOEXEC); in cmdq_thread_irq_handler()
296 if (list_empty(&thread->task_busy_list)) in cmdq_thread_irq_handler()
306 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; in cmdq_irq_handler()
307 if (!(irq_status ^ cmdq->irq_mask)) in cmdq_irq_handler()
310 for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) { in cmdq_irq_handler()
311 struct cmdq_thread *thread = &cmdq->thread[bit]; in cmdq_irq_handler()
313 spin_lock_irqsave(&thread->chan->lock, flags); in cmdq_irq_handler()
315 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_irq_handler()
318 pm_runtime_mark_last_busy(cmdq->mbox.dev); in cmdq_irq_handler()
328 ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); in cmdq_runtime_resume()
341 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); in cmdq_runtime_suspend()
352 cmdq->suspended = true; in cmdq_suspend()
354 for (i = 0; i < cmdq->pdata->thread_nr; i++) { in cmdq_suspend()
355 thread = &cmdq->thread[i]; in cmdq_suspend()
356 if (!list_empty(&thread->task_busy_list)) { in cmdq_suspend()
373 cmdq->suspended = false; in cmdq_resume()
383 cmdq_runtime_suspend(&pdev->dev); in cmdq_remove()
385 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks); in cmdq_remove()
391 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; in cmdq_mbox_send_data()
392 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); in cmdq_mbox_send_data()
398 WARN_ON(cmdq->suspended); in cmdq_mbox_send_data()
402 return -ENOMEM; in cmdq_mbox_send_data()
404 task->cmdq = cmdq; in cmdq_mbox_send_data()
405 INIT_LIST_HEAD(&task->list_entry); in cmdq_mbox_send_data()
406 task->pa_base = pkt->pa_base; in cmdq_mbox_send_data()
407 task->thread = thread; in cmdq_mbox_send_data()
408 task->pkt = pkt; in cmdq_mbox_send_data()
410 if (list_empty(&thread->task_busy_list)) { in cmdq_mbox_send_data()
419 gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata); in cmdq_mbox_send_data()
420 writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR); in cmdq_mbox_send_data()
421 gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata); in cmdq_mbox_send_data()
422 writel(gce_addr, thread->base + CMDQ_THR_END_ADDR); in cmdq_mbox_send_data()
424 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); in cmdq_mbox_send_data()
425 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); in cmdq_mbox_send_data()
426 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); in cmdq_mbox_send_data()
429 gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); in cmdq_mbox_send_data()
430 curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); in cmdq_mbox_send_data()
431 gce_addr = readl(thread->base + CMDQ_THR_END_ADDR); in cmdq_mbox_send_data()
432 end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); in cmdq_mbox_send_data()
434 if (curr_pa == end_pa - CMDQ_INST_SIZE || in cmdq_mbox_send_data()
437 writel(task->pa_base >> cmdq->pdata->shift, in cmdq_mbox_send_data()
438 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_mbox_send_data()
443 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, in cmdq_mbox_send_data()
444 thread->base + CMDQ_THR_END_ADDR); in cmdq_mbox_send_data()
447 list_move_tail(&task->list_entry, &thread->task_busy_list); in cmdq_mbox_send_data()
459 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; in cmdq_mbox_shutdown()
460 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); in cmdq_mbox_shutdown()
464 WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); in cmdq_mbox_shutdown()
466 spin_lock_irqsave(&thread->chan->lock, flags); in cmdq_mbox_shutdown()
467 if (list_empty(&thread->task_busy_list)) in cmdq_mbox_shutdown()
474 if (list_empty(&thread->task_busy_list)) in cmdq_mbox_shutdown()
477 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, in cmdq_mbox_shutdown()
479 cmdq_task_exec_done(task, -ECONNABORTED); in cmdq_mbox_shutdown()
487 * The thread->task_busy_list empty means thread already disable. The in cmdq_mbox_shutdown()
492 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_mbox_shutdown()
494 pm_runtime_mark_last_busy(cmdq->mbox.dev); in cmdq_mbox_shutdown()
495 pm_runtime_put_autosuspend(cmdq->mbox.dev); in cmdq_mbox_shutdown()
500 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; in cmdq_mbox_flush()
502 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); in cmdq_mbox_flush()
508 ret = pm_runtime_get_sync(cmdq->mbox.dev); in cmdq_mbox_flush()
512 spin_lock_irqsave(&thread->chan->lock, flags); in cmdq_mbox_flush()
513 if (list_empty(&thread->task_busy_list)) in cmdq_mbox_flush()
520 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, in cmdq_mbox_flush()
522 data.sta = -ECONNABORTED; in cmdq_mbox_flush()
523 data.pkt = task->pkt; in cmdq_mbox_flush()
524 mbox_chan_received_data(task->thread->chan, &data); in cmdq_mbox_flush()
525 list_del(&task->list_entry); in cmdq_mbox_flush()
533 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_mbox_flush()
534 pm_runtime_mark_last_busy(cmdq->mbox.dev); in cmdq_mbox_flush()
535 pm_runtime_put_autosuspend(cmdq->mbox.dev); in cmdq_mbox_flush()
541 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_mbox_flush()
542 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, in cmdq_mbox_flush()
544 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", in cmdq_mbox_flush()
545 (u32)(thread->base - cmdq->base)); in cmdq_mbox_flush()
547 return -EFAULT; in cmdq_mbox_flush()
549 pm_runtime_mark_last_busy(cmdq->mbox.dev); in cmdq_mbox_flush()
550 pm_runtime_put_autosuspend(cmdq->mbox.dev); in cmdq_mbox_flush()
564 int ind = sp->args[0]; in cmdq_xlate()
567 if (ind >= mbox->num_chans) in cmdq_xlate()
568 return ERR_PTR(-EINVAL); in cmdq_xlate()
570 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; in cmdq_xlate()
571 thread->priority = sp->args[1]; in cmdq_xlate()
572 thread->chan = &mbox->chans[ind]; in cmdq_xlate()
574 return &mbox->chans[ind]; in cmdq_xlate()
579 static const char * const gce_name = "gce"; in cmdq_get_clocks()
580 struct device_node *node, *parent = dev->of_node->parent; in cmdq_get_clocks()
583 cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, in cmdq_get_clocks()
584 sizeof(*cmdq->clocks), GFP_KERNEL); in cmdq_get_clocks()
585 if (!cmdq->clocks) in cmdq_get_clocks()
586 return -ENOMEM; in cmdq_get_clocks()
588 if (cmdq->pdata->gce_num == 1) { in cmdq_get_clocks()
589 clks = &cmdq->clocks[0]; in cmdq_get_clocks()
591 clks->id = gce_name; in cmdq_get_clocks()
592 clks->clk = devm_clk_get(dev, NULL); in cmdq_get_clocks()
593 if (IS_ERR(clks->clk)) in cmdq_get_clocks()
594 return dev_err_probe(dev, PTR_ERR(clks->clk), in cmdq_get_clocks()
595 "failed to get gce clock\n"); in cmdq_get_clocks()
601 * If there is more than one GCE, get the clocks for the others too, in cmdq_get_clocks()
602 * as the clock of the main GCE must be enabled for additional IPs in cmdq_get_clocks()
608 if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num) in cmdq_get_clocks()
611 clks = &cmdq->clocks[alias_id]; in cmdq_get_clocks()
613 clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id); in cmdq_get_clocks()
614 if (!clks->id) { in cmdq_get_clocks()
616 return -ENOMEM; in cmdq_get_clocks()
619 clks->clk = of_clk_get(node, 0); in cmdq_get_clocks()
620 if (IS_ERR(clks->clk)) { in cmdq_get_clocks()
622 return dev_err_probe(dev, PTR_ERR(clks->clk), in cmdq_get_clocks()
623 "failed to get gce%d clock\n", alias_id); in cmdq_get_clocks()
632 struct device *dev = &pdev->dev; in cmdq_probe()
638 return -ENOMEM; in cmdq_probe()
640 cmdq->base = devm_platform_ioremap_resource(pdev, 0); in cmdq_probe()
641 if (IS_ERR(cmdq->base)) in cmdq_probe()
642 return PTR_ERR(cmdq->base); in cmdq_probe()
644 cmdq->irq = platform_get_irq(pdev, 0); in cmdq_probe()
645 if (cmdq->irq < 0) in cmdq_probe()
646 return cmdq->irq; in cmdq_probe()
648 cmdq->pdata = device_get_match_data(dev); in cmdq_probe()
649 if (!cmdq->pdata) { in cmdq_probe()
651 return -EINVAL; in cmdq_probe()
654 cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0); in cmdq_probe()
657 dev, cmdq->base, cmdq->irq); in cmdq_probe()
664 DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift)); in cmdq_probe()
666 cmdq->mbox.dev = dev; in cmdq_probe()
667 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr, in cmdq_probe()
668 sizeof(*cmdq->mbox.chans), GFP_KERNEL); in cmdq_probe()
669 if (!cmdq->mbox.chans) in cmdq_probe()
670 return -ENOMEM; in cmdq_probe()
672 cmdq->mbox.num_chans = cmdq->pdata->thread_nr; in cmdq_probe()
673 cmdq->mbox.ops = &cmdq_mbox_chan_ops; in cmdq_probe()
674 cmdq->mbox.of_xlate = cmdq_xlate; in cmdq_probe()
677 cmdq->mbox.txdone_irq = false; in cmdq_probe()
678 cmdq->mbox.txdone_poll = false; in cmdq_probe()
680 cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr, in cmdq_probe()
681 sizeof(*cmdq->thread), GFP_KERNEL); in cmdq_probe()
682 if (!cmdq->thread) in cmdq_probe()
683 return -ENOMEM; in cmdq_probe()
685 for (i = 0; i < cmdq->pdata->thread_nr; i++) { in cmdq_probe()
686 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + in cmdq_probe()
688 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); in cmdq_probe()
689 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; in cmdq_probe()
694 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); in cmdq_probe()
698 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, in cmdq_probe()
719 err = devm_mbox_controller_register(dev, &cmdq->mbox); in cmdq_probe()
786 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779},
787 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173},
788 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183},
789 {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186},
790 {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
791 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
792 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},