1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2018 MediaTek Inc. 4 5 #include <linux/bitops.h> 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/errno.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/sizes.h> 18 #include <linux/mailbox_controller.h> 19 #include <linux/mailbox/mtk-cmdq-mailbox.h> 20 #include <linux/of.h> 21 22 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100 23 24 #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT) 25 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) 26 27 #define CMDQ_CURR_IRQ_STATUS 0x10 28 #define CMDQ_SYNC_TOKEN_UPDATE 0x68 29 #define CMDQ_THR_SLOT_CYCLES 0x30 30 #define CMDQ_THR_BASE 0x100 31 #define CMDQ_THR_SIZE 0x80 32 #define CMDQ_THR_WARM_RESET 0x00 33 #define CMDQ_THR_ENABLE_TASK 0x04 34 #define CMDQ_THR_SUSPEND_TASK 0x08 35 #define CMDQ_THR_CURR_STATUS 0x0c 36 #define CMDQ_THR_IRQ_STATUS 0x10 37 #define CMDQ_THR_IRQ_ENABLE 0x14 38 #define CMDQ_THR_CURR_ADDR 0x20 39 #define CMDQ_THR_END_ADDR 0x24 40 #define CMDQ_THR_WAIT_TOKEN 0x30 41 #define CMDQ_THR_PRIORITY 0x40 42 43 #define GCE_GCTL_VALUE 0x48 44 #define GCE_CTRL_BY_SW GENMASK(2, 0) 45 #define GCE_DDR_EN GENMASK(18, 16) 46 47 #define GCE_VM_ID_MAP(n) (0x5018 + (n) / 10 * 4) 48 #define GCE_VM_ID_MAP_THR_FLD_SHIFT(n) ((n) % 10 * 3) 49 #define GCE_VM_ID_MAP_HOST_VM GENMASK(2, 0) 50 #define GCE_VM_CPR_GSIZE 0x50c4 51 #define GCE_VM_CPR_GSIZE_FLD_SHIFT(vm_id) ((vm_id) * 4) 52 #define GCE_VM_CPR_GSIZE_MAX GENMASK(3, 0) 53 54 #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200 55 #define CMDQ_THR_ENABLED 0x1 56 #define CMDQ_THR_DISABLED 0x0 57 #define CMDQ_THR_SUSPEND 0x1 58 #define CMDQ_THR_RESUME 0x0 59 #define CMDQ_THR_STATUS_SUSPENDED BIT(1) 60 #define CMDQ_THR_DO_WARM_RESET BIT(0) 61 #define CMDQ_THR_IRQ_DONE 0x1 62 #define CMDQ_THR_IRQ_ERROR 0x12 63 #define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE) 64 #define CMDQ_THR_IS_WAITING BIT(31) 65 66 #define CMDQ_JUMP_BY_OFFSET 0x10000000 67 #define CMDQ_JUMP_BY_PA 0x10000001 68 69 struct cmdq_thread { 70 struct mbox_chan *chan; 71 void __iomem *base; 72 struct list_head task_busy_list; 73 u32 priority; 74 }; 75 76 struct cmdq_task { 77 struct cmdq *cmdq; 78 struct list_head list_entry; 79 dma_addr_t pa_base; 80 struct cmdq_thread *thread; 81 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */ 82 }; 83 84 struct cmdq { 85 struct mbox_controller mbox; 86 void __iomem *base; 87 int irq; 88 u32 irq_mask; 89 const struct gce_plat *pdata; 90 struct cmdq_thread *thread; 91 struct clk_bulk_data *clocks; 92 bool suspended; 93 }; 94 95 struct gce_plat { 96 u32 thread_nr; 97 u8 shift; 98 dma_addr_t mminfra_offset; 99 bool control_by_sw; 100 bool sw_ddr_en; 101 bool gce_vm; 102 u32 gce_num; 103 }; 104 105 static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata) 106 { 107 /* Convert DMA addr (PA or IOVA) to GCE readable addr */ 108 return (addr + pdata->mminfra_offset) >> pdata->shift; 109 } 110 111 static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata) 112 { 113 /* Revert GCE readable addr to DMA addr (PA or IOVA) */ 114 return ((dma_addr_t)addr << pdata->shift) - pdata->mminfra_offset; 115 } 116 117 void cmdq_get_mbox_priv(struct mbox_chan *chan, struct cmdq_mbox_priv *priv) 118 { 119 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); 120 121 priv->shift_pa = cmdq->pdata->shift; 122 priv->mminfra_offset = cmdq->pdata->mminfra_offset; 123 } 124 EXPORT_SYMBOL(cmdq_get_mbox_priv); 125 126 u8 cmdq_get_shift_pa(struct mbox_chan *chan) 127 { 128 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); 129 130 return cmdq->pdata->shift; 131 } 132 EXPORT_SYMBOL(cmdq_get_shift_pa); 133 134 static void cmdq_vm_init(struct cmdq *cmdq) 135 { 136 int i; 137 u32 vm_cpr_gsize = 0, vm_id_map = 0; 138 u32 *vm_map = NULL; 139 140 if (!cmdq->pdata->gce_vm) 141 return; 142 143 vm_map = kcalloc(cmdq->pdata->thread_nr, sizeof(*vm_map), GFP_KERNEL); 144 if (!vm_map) 145 return; 146 147 /* only configure the max CPR SRAM size to host vm (vm_id = 0) currently */ 148 vm_cpr_gsize = GCE_VM_CPR_GSIZE_MAX << GCE_VM_CPR_GSIZE_FLD_SHIFT(0); 149 150 /* set all thread mapping to host vm currently */ 151 for (i = 0; i < cmdq->pdata->thread_nr; i++) 152 vm_map[i] = GCE_VM_ID_MAP_HOST_VM << GCE_VM_ID_MAP_THR_FLD_SHIFT(i); 153 154 /* set the amount of CPR SRAM to allocate to each VM */ 155 writel(vm_cpr_gsize, cmdq->base + GCE_VM_CPR_GSIZE); 156 157 /* config CPR_GSIZE before setting VM_ID_MAP to avoid data leakage */ 158 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 159 vm_id_map |= vm_map[i]; 160 /* config every 10 threads, e.g., thread id=0~9, 10~19, ..., into one register */ 161 if ((i + 1) % 10 == 0) { 162 writel(vm_id_map, cmdq->base + GCE_VM_ID_MAP(i)); 163 vm_id_map = 0; 164 } 165 } 166 /* config remaining threads settings */ 167 if (cmdq->pdata->thread_nr % 10 != 0) 168 writel(vm_id_map, cmdq->base + GCE_VM_ID_MAP(cmdq->pdata->thread_nr - 1)); 169 170 kfree(vm_map); 171 } 172 173 static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) 174 { 175 u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; 176 177 if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) 178 return; 179 180 if (cmdq->pdata->sw_ddr_en && ddr_enable) 181 val |= GCE_DDR_EN; 182 183 writel(val, cmdq->base + GCE_GCTL_VALUE); 184 } 185 186 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) 187 { 188 u32 status; 189 190 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); 191 192 /* If already disabled, treat as suspended successful. */ 193 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 194 return 0; 195 196 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, 197 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) { 198 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", 199 (u32)(thread->base - cmdq->base)); 200 return -EFAULT; 201 } 202 203 return 0; 204 } 205 206 static void cmdq_thread_resume(struct cmdq_thread *thread) 207 { 208 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); 209 } 210 211 static void cmdq_init(struct cmdq *cmdq) 212 { 213 int i; 214 215 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); 216 217 cmdq_vm_init(cmdq); 218 cmdq_gctl_value_toggle(cmdq, true); 219 220 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); 221 for (i = 0; i <= CMDQ_MAX_EVENT; i++) 222 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); 223 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 224 } 225 226 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) 227 { 228 u32 warm_reset; 229 230 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET); 231 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET, 232 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET), 233 0, 10)) { 234 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n", 235 (u32)(thread->base - cmdq->base)); 236 return -EFAULT; 237 } 238 239 return 0; 240 } 241 242 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread) 243 { 244 cmdq_thread_reset(cmdq, thread); 245 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK); 246 } 247 248 /* notify GCE to re-fetch commands by setting GCE thread PC */ 249 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread) 250 { 251 writel(readl(thread->base + CMDQ_THR_CURR_ADDR), 252 thread->base + CMDQ_THR_CURR_ADDR); 253 } 254 255 static void cmdq_task_insert_into_thread(struct cmdq_task *task) 256 { 257 struct device *dev = task->cmdq->mbox.dev; 258 struct cmdq_thread *thread = task->thread; 259 struct cmdq_task *prev_task = list_last_entry( 260 &thread->task_busy_list, typeof(*task), list_entry); 261 u64 *prev_task_base = prev_task->pkt->va_base; 262 u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata); 263 264 /* let previous task jump to this task */ 265 dma_sync_single_for_cpu(dev, prev_task->pa_base, 266 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 267 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr; 268 dma_sync_single_for_device(dev, prev_task->pa_base, 269 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 270 271 cmdq_thread_invalidate_fetched_data(thread); 272 } 273 274 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread) 275 { 276 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; 277 } 278 279 static void cmdq_task_exec_done(struct cmdq_task *task, int sta) 280 { 281 struct cmdq_cb_data data; 282 283 data.sta = sta; 284 data.pkt = task->pkt; 285 mbox_chan_received_data(task->thread->chan, &data); 286 287 list_del(&task->list_entry); 288 } 289 290 static void cmdq_task_handle_error(struct cmdq_task *task) 291 { 292 struct cmdq_thread *thread = task->thread; 293 struct cmdq_task *next_task; 294 struct cmdq *cmdq = task->cmdq; 295 296 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); 297 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 298 next_task = list_first_entry_or_null(&thread->task_busy_list, 299 struct cmdq_task, list_entry); 300 if (next_task) 301 writel(next_task->pa_base >> cmdq->pdata->shift, 302 thread->base + CMDQ_THR_CURR_ADDR); 303 cmdq_thread_resume(thread); 304 } 305 306 static void cmdq_thread_irq_handler(struct cmdq *cmdq, 307 struct cmdq_thread *thread) 308 { 309 struct cmdq_task *task, *tmp, *curr_task = NULL; 310 u32 irq_flag, gce_addr; 311 dma_addr_t curr_pa, task_end_pa; 312 bool err; 313 314 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); 315 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS); 316 317 /* 318 * When ISR call this function, another CPU core could run 319 * "release task" right before we acquire the spin lock, and thus 320 * reset / disable this GCE thread, so we need to check the enable 321 * bit of this GCE thread. 322 */ 323 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 324 return; 325 326 if (irq_flag & CMDQ_THR_IRQ_ERROR) 327 err = true; 328 else if (irq_flag & CMDQ_THR_IRQ_DONE) 329 err = false; 330 else 331 return; 332 333 gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); 334 curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 335 336 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 337 list_entry) { 338 task_end_pa = task->pa_base + task->pkt->cmd_buf_size; 339 if (curr_pa >= task->pa_base && curr_pa < task_end_pa) 340 curr_task = task; 341 342 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { 343 cmdq_task_exec_done(task, 0); 344 kfree(task); 345 } else if (err) { 346 cmdq_task_exec_done(task, -ENOEXEC); 347 cmdq_task_handle_error(curr_task); 348 kfree(task); 349 } 350 351 if (curr_task) 352 break; 353 } 354 355 if (list_empty(&thread->task_busy_list)) 356 cmdq_thread_disable(cmdq, thread); 357 } 358 359 static irqreturn_t cmdq_irq_handler(int irq, void *dev) 360 { 361 struct cmdq *cmdq = dev; 362 unsigned long irq_status, flags = 0L; 363 int bit; 364 365 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; 366 if (!(irq_status ^ cmdq->irq_mask)) 367 return IRQ_NONE; 368 369 for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) { 370 struct cmdq_thread *thread = &cmdq->thread[bit]; 371 372 spin_lock_irqsave(&thread->chan->lock, flags); 373 cmdq_thread_irq_handler(cmdq, thread); 374 spin_unlock_irqrestore(&thread->chan->lock, flags); 375 } 376 377 pm_runtime_mark_last_busy(cmdq->mbox.dev); 378 379 return IRQ_HANDLED; 380 } 381 382 static int cmdq_runtime_resume(struct device *dev) 383 { 384 struct cmdq *cmdq = dev_get_drvdata(dev); 385 int ret; 386 387 ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); 388 if (ret) 389 return ret; 390 391 cmdq_gctl_value_toggle(cmdq, true); 392 return 0; 393 } 394 395 static int cmdq_runtime_suspend(struct device *dev) 396 { 397 struct cmdq *cmdq = dev_get_drvdata(dev); 398 399 cmdq_gctl_value_toggle(cmdq, false); 400 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 401 return 0; 402 } 403 404 static int cmdq_suspend(struct device *dev) 405 { 406 struct cmdq *cmdq = dev_get_drvdata(dev); 407 struct cmdq_thread *thread; 408 int i; 409 bool task_running = false; 410 411 cmdq->suspended = true; 412 413 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 414 thread = &cmdq->thread[i]; 415 if (!list_empty(&thread->task_busy_list)) { 416 task_running = true; 417 break; 418 } 419 } 420 421 if (task_running) 422 dev_warn(dev, "exist running task(s) in suspend\n"); 423 424 return pm_runtime_force_suspend(dev); 425 } 426 427 static int cmdq_resume(struct device *dev) 428 { 429 struct cmdq *cmdq = dev_get_drvdata(dev); 430 431 WARN_ON(pm_runtime_force_resume(dev)); 432 cmdq->suspended = false; 433 434 return 0; 435 } 436 437 static void cmdq_remove(struct platform_device *pdev) 438 { 439 struct cmdq *cmdq = platform_get_drvdata(pdev); 440 441 if (!IS_ENABLED(CONFIG_PM)) 442 cmdq_runtime_suspend(&pdev->dev); 443 444 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks); 445 } 446 447 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) 448 { 449 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data; 450 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 451 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 452 struct cmdq_task *task; 453 u32 gce_addr; 454 dma_addr_t curr_pa, end_pa; 455 456 /* Client should not flush new tasks if suspended. */ 457 WARN_ON(cmdq->suspended); 458 459 task = kzalloc(sizeof(*task), GFP_ATOMIC); 460 if (!task) 461 return -ENOMEM; 462 463 task->cmdq = cmdq; 464 INIT_LIST_HEAD(&task->list_entry); 465 task->pa_base = pkt->pa_base; 466 task->thread = thread; 467 task->pkt = pkt; 468 469 if (list_empty(&thread->task_busy_list)) { 470 /* 471 * The thread reset will clear thread related register to 0, 472 * including pc, end, priority, irq, suspend and enable. Thus 473 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable 474 * thread and make it running. 475 */ 476 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); 477 478 gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata); 479 writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR); 480 gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata); 481 writel(gce_addr, thread->base + CMDQ_THR_END_ADDR); 482 483 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); 484 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); 485 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); 486 } else { 487 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 488 gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); 489 curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 490 gce_addr = readl(thread->base + CMDQ_THR_END_ADDR); 491 end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 492 /* check boundary */ 493 if (curr_pa == end_pa - CMDQ_INST_SIZE || 494 curr_pa == end_pa) { 495 /* set to this task directly */ 496 writel(task->pa_base >> cmdq->pdata->shift, 497 thread->base + CMDQ_THR_CURR_ADDR); 498 } else { 499 cmdq_task_insert_into_thread(task); 500 smp_mb(); /* modify jump before enable thread */ 501 } 502 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 503 thread->base + CMDQ_THR_END_ADDR); 504 cmdq_thread_resume(thread); 505 } 506 list_move_tail(&task->list_entry, &thread->task_busy_list); 507 508 return 0; 509 } 510 511 static int cmdq_mbox_startup(struct mbox_chan *chan) 512 { 513 return 0; 514 } 515 516 static void cmdq_mbox_shutdown(struct mbox_chan *chan) 517 { 518 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 519 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 520 struct cmdq_task *task, *tmp; 521 unsigned long flags; 522 523 WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); 524 525 spin_lock_irqsave(&thread->chan->lock, flags); 526 if (list_empty(&thread->task_busy_list)) 527 goto done; 528 529 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 530 531 /* make sure executed tasks have success callback */ 532 cmdq_thread_irq_handler(cmdq, thread); 533 if (list_empty(&thread->task_busy_list)) 534 goto done; 535 536 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 537 list_entry) { 538 cmdq_task_exec_done(task, -ECONNABORTED); 539 kfree(task); 540 } 541 542 cmdq_thread_disable(cmdq, thread); 543 544 done: 545 /* 546 * The thread->task_busy_list empty means thread already disable. The 547 * cmdq_mbox_send_data() always reset thread which clear disable and 548 * suspend statue when first pkt send to channel, so there is no need 549 * to do any operation here, only unlock and leave. 550 */ 551 spin_unlock_irqrestore(&thread->chan->lock, flags); 552 553 pm_runtime_mark_last_busy(cmdq->mbox.dev); 554 pm_runtime_put_autosuspend(cmdq->mbox.dev); 555 } 556 557 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) 558 { 559 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 560 struct cmdq_cb_data data; 561 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 562 struct cmdq_task *task, *tmp; 563 unsigned long flags; 564 u32 enable; 565 int ret; 566 567 ret = pm_runtime_get_sync(cmdq->mbox.dev); 568 if (ret < 0) 569 return ret; 570 571 spin_lock_irqsave(&thread->chan->lock, flags); 572 if (list_empty(&thread->task_busy_list)) 573 goto out; 574 575 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 576 if (!cmdq_thread_is_in_wfe(thread)) 577 goto wait; 578 579 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 580 list_entry) { 581 data.sta = -ECONNABORTED; 582 data.pkt = task->pkt; 583 mbox_chan_received_data(task->thread->chan, &data); 584 list_del(&task->list_entry); 585 kfree(task); 586 } 587 588 cmdq_thread_resume(thread); 589 cmdq_thread_disable(cmdq, thread); 590 591 out: 592 spin_unlock_irqrestore(&thread->chan->lock, flags); 593 pm_runtime_mark_last_busy(cmdq->mbox.dev); 594 pm_runtime_put_autosuspend(cmdq->mbox.dev); 595 596 return 0; 597 598 wait: 599 cmdq_thread_resume(thread); 600 spin_unlock_irqrestore(&thread->chan->lock, flags); 601 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, 602 enable, enable == 0, 1, timeout)) { 603 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", 604 (u32)(thread->base - cmdq->base)); 605 606 return -EFAULT; 607 } 608 pm_runtime_mark_last_busy(cmdq->mbox.dev); 609 pm_runtime_put_autosuspend(cmdq->mbox.dev); 610 return 0; 611 } 612 613 static const struct mbox_chan_ops cmdq_mbox_chan_ops = { 614 .send_data = cmdq_mbox_send_data, 615 .startup = cmdq_mbox_startup, 616 .shutdown = cmdq_mbox_shutdown, 617 .flush = cmdq_mbox_flush, 618 }; 619 620 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, 621 const struct of_phandle_args *sp) 622 { 623 int ind = sp->args[0]; 624 struct cmdq_thread *thread; 625 626 if (ind >= mbox->num_chans) 627 return ERR_PTR(-EINVAL); 628 629 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; 630 thread->priority = sp->args[1]; 631 thread->chan = &mbox->chans[ind]; 632 633 return &mbox->chans[ind]; 634 } 635 636 static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq) 637 { 638 static const char * const gce_name = "gce"; 639 struct device_node *parent = dev->of_node->parent; 640 struct clk_bulk_data *clks; 641 642 cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, 643 sizeof(*cmdq->clocks), GFP_KERNEL); 644 if (!cmdq->clocks) 645 return -ENOMEM; 646 647 if (cmdq->pdata->gce_num == 1) { 648 clks = &cmdq->clocks[0]; 649 650 clks->id = gce_name; 651 clks->clk = devm_clk_get(dev, NULL); 652 if (IS_ERR(clks->clk)) 653 return dev_err_probe(dev, PTR_ERR(clks->clk), 654 "failed to get gce clock\n"); 655 656 return 0; 657 } 658 659 /* 660 * If there is more than one GCE, get the clocks for the others too, 661 * as the clock of the main GCE must be enabled for additional IPs 662 * to be reachable. 663 */ 664 for_each_child_of_node_scoped(parent, node) { 665 int alias_id = of_alias_get_id(node, gce_name); 666 667 if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num) 668 continue; 669 670 clks = &cmdq->clocks[alias_id]; 671 672 clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id); 673 if (!clks->id) 674 return -ENOMEM; 675 676 clks->clk = of_clk_get(node, 0); 677 if (IS_ERR(clks->clk)) 678 return dev_err_probe(dev, PTR_ERR(clks->clk), 679 "failed to get gce%d clock\n", alias_id); 680 } 681 682 return 0; 683 } 684 685 static int cmdq_probe(struct platform_device *pdev) 686 { 687 struct device *dev = &pdev->dev; 688 struct cmdq *cmdq; 689 int err, i; 690 691 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL); 692 if (!cmdq) 693 return -ENOMEM; 694 695 cmdq->base = devm_platform_ioremap_resource(pdev, 0); 696 if (IS_ERR(cmdq->base)) 697 return PTR_ERR(cmdq->base); 698 699 cmdq->irq = platform_get_irq(pdev, 0); 700 if (cmdq->irq < 0) 701 return cmdq->irq; 702 703 cmdq->pdata = device_get_match_data(dev); 704 if (!cmdq->pdata) { 705 dev_err(dev, "failed to get match data\n"); 706 return -EINVAL; 707 } 708 709 cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0); 710 711 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n", 712 dev, cmdq->base, cmdq->irq); 713 714 err = cmdq_get_clocks(dev, cmdq); 715 if (err) 716 return err; 717 718 dma_set_coherent_mask(dev, 719 DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift)); 720 721 cmdq->mbox.dev = dev; 722 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr, 723 sizeof(*cmdq->mbox.chans), GFP_KERNEL); 724 if (!cmdq->mbox.chans) 725 return -ENOMEM; 726 727 cmdq->mbox.num_chans = cmdq->pdata->thread_nr; 728 cmdq->mbox.ops = &cmdq_mbox_chan_ops; 729 cmdq->mbox.of_xlate = cmdq_xlate; 730 731 /* make use of TXDONE_BY_ACK */ 732 cmdq->mbox.txdone_irq = false; 733 cmdq->mbox.txdone_poll = false; 734 735 cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr, 736 sizeof(*cmdq->thread), GFP_KERNEL); 737 if (!cmdq->thread) 738 return -ENOMEM; 739 740 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 741 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + 742 CMDQ_THR_SIZE * i; 743 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); 744 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; 745 } 746 747 platform_set_drvdata(pdev, cmdq); 748 749 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); 750 751 cmdq_init(cmdq); 752 753 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, 754 "mtk_cmdq", cmdq); 755 if (err < 0) { 756 dev_err(dev, "failed to register ISR (%d)\n", err); 757 return err; 758 } 759 760 /* If Runtime PM is not available enable the clocks now. */ 761 if (!IS_ENABLED(CONFIG_PM)) { 762 err = cmdq_runtime_resume(dev); 763 if (err) 764 return err; 765 } 766 767 err = devm_pm_runtime_enable(dev); 768 if (err) 769 return err; 770 771 pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS); 772 pm_runtime_use_autosuspend(dev); 773 774 err = devm_mbox_controller_register(dev, &cmdq->mbox); 775 if (err < 0) { 776 dev_err(dev, "failed to register mailbox: %d\n", err); 777 return err; 778 } 779 780 return 0; 781 } 782 783 static const struct dev_pm_ops cmdq_pm_ops = { 784 .suspend = cmdq_suspend, 785 .resume = cmdq_resume, 786 SET_RUNTIME_PM_OPS(cmdq_runtime_suspend, 787 cmdq_runtime_resume, NULL) 788 }; 789 790 static const struct gce_plat gce_plat_mt6779 = { 791 .thread_nr = 24, 792 .shift = 3, 793 .control_by_sw = false, 794 .gce_num = 1 795 }; 796 797 static const struct gce_plat gce_plat_mt8173 = { 798 .thread_nr = 16, 799 .shift = 0, 800 .control_by_sw = false, 801 .gce_num = 1 802 }; 803 804 static const struct gce_plat gce_plat_mt8183 = { 805 .thread_nr = 24, 806 .shift = 0, 807 .control_by_sw = false, 808 .gce_num = 1 809 }; 810 811 static const struct gce_plat gce_plat_mt8186 = { 812 .thread_nr = 24, 813 .shift = 3, 814 .control_by_sw = true, 815 .sw_ddr_en = true, 816 .gce_num = 1 817 }; 818 819 static const struct gce_plat gce_plat_mt8188 = { 820 .thread_nr = 32, 821 .shift = 3, 822 .control_by_sw = true, 823 .gce_num = 2 824 }; 825 826 static const struct gce_plat gce_plat_mt8192 = { 827 .thread_nr = 24, 828 .shift = 3, 829 .control_by_sw = true, 830 .gce_num = 1 831 }; 832 833 static const struct gce_plat gce_plat_mt8195 = { 834 .thread_nr = 24, 835 .shift = 3, 836 .control_by_sw = true, 837 .gce_num = 2 838 }; 839 840 static const struct gce_plat gce_plat_mt8196 = { 841 .thread_nr = 32, 842 .shift = 3, 843 .mminfra_offset = SZ_2G, 844 .control_by_sw = true, 845 .sw_ddr_en = true, 846 .gce_vm = true, 847 .gce_num = 2 848 }; 849 850 static const struct of_device_id cmdq_of_ids[] = { 851 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779}, 852 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173}, 853 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183}, 854 {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186}, 855 {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188}, 856 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192}, 857 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195}, 858 {.compatible = "mediatek,mt8196-gce", .data = (void *)&gce_plat_mt8196}, 859 {} 860 }; 861 MODULE_DEVICE_TABLE(of, cmdq_of_ids); 862 863 static struct platform_driver cmdq_drv = { 864 .probe = cmdq_probe, 865 .remove = cmdq_remove, 866 .driver = { 867 .name = "mtk_cmdq", 868 .pm = &cmdq_pm_ops, 869 .of_match_table = cmdq_of_ids, 870 } 871 }; 872 873 static int __init cmdq_drv_init(void) 874 { 875 return platform_driver_register(&cmdq_drv); 876 } 877 878 static void __exit cmdq_drv_exit(void) 879 { 880 platform_driver_unregister(&cmdq_drv); 881 } 882 883 subsys_initcall(cmdq_drv_init); 884 module_exit(cmdq_drv_exit); 885 886 MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver"); 887 MODULE_LICENSE("GPL v2"); 888