1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2018 MediaTek Inc. 4 5 #include <linux/bitops.h> 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/errno.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/mailbox_controller.h> 18 #include <linux/mailbox/mtk-cmdq-mailbox.h> 19 #include <linux/of.h> 20 21 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100 22 23 #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT) 24 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) 25 26 #define CMDQ_CURR_IRQ_STATUS 0x10 27 #define CMDQ_SYNC_TOKEN_UPDATE 0x68 28 #define CMDQ_THR_SLOT_CYCLES 0x30 29 #define CMDQ_THR_BASE 0x100 30 #define CMDQ_THR_SIZE 0x80 31 #define CMDQ_THR_WARM_RESET 0x00 32 #define CMDQ_THR_ENABLE_TASK 0x04 33 #define CMDQ_THR_SUSPEND_TASK 0x08 34 #define CMDQ_THR_CURR_STATUS 0x0c 35 #define CMDQ_THR_IRQ_STATUS 0x10 36 #define CMDQ_THR_IRQ_ENABLE 0x14 37 #define CMDQ_THR_CURR_ADDR 0x20 38 #define CMDQ_THR_END_ADDR 0x24 39 #define CMDQ_THR_WAIT_TOKEN 0x30 40 #define CMDQ_THR_PRIORITY 0x40 41 42 #define GCE_GCTL_VALUE 0x48 43 #define GCE_CTRL_BY_SW GENMASK(2, 0) 44 #define GCE_DDR_EN GENMASK(18, 16) 45 46 #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200 47 #define CMDQ_THR_ENABLED 0x1 48 #define CMDQ_THR_DISABLED 0x0 49 #define CMDQ_THR_SUSPEND 0x1 50 #define CMDQ_THR_RESUME 0x0 51 #define CMDQ_THR_STATUS_SUSPENDED BIT(1) 52 #define CMDQ_THR_DO_WARM_RESET BIT(0) 53 #define CMDQ_THR_IRQ_DONE 0x1 54 #define CMDQ_THR_IRQ_ERROR 0x12 55 #define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE) 56 #define CMDQ_THR_IS_WAITING BIT(31) 57 58 #define CMDQ_JUMP_BY_OFFSET 0x10000000 59 #define CMDQ_JUMP_BY_PA 0x10000001 60 61 struct cmdq_thread { 62 struct mbox_chan *chan; 63 void __iomem *base; 64 struct list_head task_busy_list; 65 u32 priority; 66 }; 67 68 struct cmdq_task { 69 struct cmdq *cmdq; 70 struct list_head list_entry; 71 dma_addr_t pa_base; 72 struct cmdq_thread *thread; 73 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */ 74 }; 75 76 struct cmdq { 77 struct mbox_controller mbox; 78 void __iomem *base; 79 int irq; 80 u32 irq_mask; 81 const struct gce_plat *pdata; 82 struct cmdq_thread *thread; 83 struct clk_bulk_data *clocks; 84 bool suspended; 85 }; 86 87 struct gce_plat { 88 u32 thread_nr; 89 u8 shift; 90 bool control_by_sw; 91 bool sw_ddr_en; 92 u32 gce_num; 93 }; 94 95 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable) 96 { 97 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); 98 99 if (enable) 100 writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); 101 else 102 writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); 103 104 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 105 } 106 107 u8 cmdq_get_shift_pa(struct mbox_chan *chan) 108 { 109 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); 110 111 return cmdq->pdata->shift; 112 } 113 EXPORT_SYMBOL(cmdq_get_shift_pa); 114 115 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) 116 { 117 u32 status; 118 119 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); 120 121 /* If already disabled, treat as suspended successful. */ 122 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 123 return 0; 124 125 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, 126 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) { 127 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", 128 (u32)(thread->base - cmdq->base)); 129 return -EFAULT; 130 } 131 132 return 0; 133 } 134 135 static void cmdq_thread_resume(struct cmdq_thread *thread) 136 { 137 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); 138 } 139 140 static void cmdq_init(struct cmdq *cmdq) 141 { 142 int i; 143 u32 gctl_regval = 0; 144 145 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); 146 if (cmdq->pdata->control_by_sw) 147 gctl_regval = GCE_CTRL_BY_SW; 148 if (cmdq->pdata->sw_ddr_en) 149 gctl_regval |= GCE_DDR_EN; 150 151 if (gctl_regval) 152 writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE); 153 154 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); 155 for (i = 0; i <= CMDQ_MAX_EVENT; i++) 156 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); 157 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 158 } 159 160 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) 161 { 162 u32 warm_reset; 163 164 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET); 165 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET, 166 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET), 167 0, 10)) { 168 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n", 169 (u32)(thread->base - cmdq->base)); 170 return -EFAULT; 171 } 172 173 return 0; 174 } 175 176 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread) 177 { 178 cmdq_thread_reset(cmdq, thread); 179 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK); 180 } 181 182 /* notify GCE to re-fetch commands by setting GCE thread PC */ 183 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread) 184 { 185 writel(readl(thread->base + CMDQ_THR_CURR_ADDR), 186 thread->base + CMDQ_THR_CURR_ADDR); 187 } 188 189 static void cmdq_task_insert_into_thread(struct cmdq_task *task) 190 { 191 struct device *dev = task->cmdq->mbox.dev; 192 struct cmdq_thread *thread = task->thread; 193 struct cmdq_task *prev_task = list_last_entry( 194 &thread->task_busy_list, typeof(*task), list_entry); 195 u64 *prev_task_base = prev_task->pkt->va_base; 196 197 /* let previous task jump to this task */ 198 dma_sync_single_for_cpu(dev, prev_task->pa_base, 199 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 200 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = 201 (u64)CMDQ_JUMP_BY_PA << 32 | 202 (task->pa_base >> task->cmdq->pdata->shift); 203 dma_sync_single_for_device(dev, prev_task->pa_base, 204 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 205 206 cmdq_thread_invalidate_fetched_data(thread); 207 } 208 209 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread) 210 { 211 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; 212 } 213 214 static void cmdq_task_exec_done(struct cmdq_task *task, int sta) 215 { 216 struct cmdq_cb_data data; 217 218 data.sta = sta; 219 data.pkt = task->pkt; 220 mbox_chan_received_data(task->thread->chan, &data); 221 222 list_del(&task->list_entry); 223 } 224 225 static void cmdq_task_handle_error(struct cmdq_task *task) 226 { 227 struct cmdq_thread *thread = task->thread; 228 struct cmdq_task *next_task; 229 struct cmdq *cmdq = task->cmdq; 230 231 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); 232 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 233 next_task = list_first_entry_or_null(&thread->task_busy_list, 234 struct cmdq_task, list_entry); 235 if (next_task) 236 writel(next_task->pa_base >> cmdq->pdata->shift, 237 thread->base + CMDQ_THR_CURR_ADDR); 238 cmdq_thread_resume(thread); 239 } 240 241 static void cmdq_thread_irq_handler(struct cmdq *cmdq, 242 struct cmdq_thread *thread) 243 { 244 struct cmdq_task *task, *tmp, *curr_task = NULL; 245 u32 curr_pa, irq_flag, task_end_pa; 246 bool err; 247 248 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); 249 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS); 250 251 /* 252 * When ISR call this function, another CPU core could run 253 * "release task" right before we acquire the spin lock, and thus 254 * reset / disable this GCE thread, so we need to check the enable 255 * bit of this GCE thread. 256 */ 257 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 258 return; 259 260 if (irq_flag & CMDQ_THR_IRQ_ERROR) 261 err = true; 262 else if (irq_flag & CMDQ_THR_IRQ_DONE) 263 err = false; 264 else 265 return; 266 267 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift; 268 269 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 270 list_entry) { 271 task_end_pa = task->pa_base + task->pkt->cmd_buf_size; 272 if (curr_pa >= task->pa_base && curr_pa < task_end_pa) 273 curr_task = task; 274 275 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { 276 cmdq_task_exec_done(task, 0); 277 kfree(task); 278 } else if (err) { 279 cmdq_task_exec_done(task, -ENOEXEC); 280 cmdq_task_handle_error(curr_task); 281 kfree(task); 282 } 283 284 if (curr_task) 285 break; 286 } 287 288 if (list_empty(&thread->task_busy_list)) 289 cmdq_thread_disable(cmdq, thread); 290 } 291 292 static irqreturn_t cmdq_irq_handler(int irq, void *dev) 293 { 294 struct cmdq *cmdq = dev; 295 unsigned long irq_status, flags = 0L; 296 int bit; 297 298 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; 299 if (!(irq_status ^ cmdq->irq_mask)) 300 return IRQ_NONE; 301 302 for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) { 303 struct cmdq_thread *thread = &cmdq->thread[bit]; 304 305 spin_lock_irqsave(&thread->chan->lock, flags); 306 cmdq_thread_irq_handler(cmdq, thread); 307 spin_unlock_irqrestore(&thread->chan->lock, flags); 308 } 309 310 pm_runtime_mark_last_busy(cmdq->mbox.dev); 311 312 return IRQ_HANDLED; 313 } 314 315 static int cmdq_runtime_resume(struct device *dev) 316 { 317 struct cmdq *cmdq = dev_get_drvdata(dev); 318 319 return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); 320 } 321 322 static int cmdq_runtime_suspend(struct device *dev) 323 { 324 struct cmdq *cmdq = dev_get_drvdata(dev); 325 326 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 327 return 0; 328 } 329 330 static int cmdq_suspend(struct device *dev) 331 { 332 struct cmdq *cmdq = dev_get_drvdata(dev); 333 struct cmdq_thread *thread; 334 int i; 335 bool task_running = false; 336 337 cmdq->suspended = true; 338 339 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 340 thread = &cmdq->thread[i]; 341 if (!list_empty(&thread->task_busy_list)) { 342 task_running = true; 343 break; 344 } 345 } 346 347 if (task_running) 348 dev_warn(dev, "exist running task(s) in suspend\n"); 349 350 if (cmdq->pdata->sw_ddr_en) 351 cmdq_sw_ddr_enable(cmdq, false); 352 353 return pm_runtime_force_suspend(dev); 354 } 355 356 static int cmdq_resume(struct device *dev) 357 { 358 struct cmdq *cmdq = dev_get_drvdata(dev); 359 360 WARN_ON(pm_runtime_force_resume(dev)); 361 cmdq->suspended = false; 362 363 if (cmdq->pdata->sw_ddr_en) 364 cmdq_sw_ddr_enable(cmdq, true); 365 366 return 0; 367 } 368 369 static void cmdq_remove(struct platform_device *pdev) 370 { 371 struct cmdq *cmdq = platform_get_drvdata(pdev); 372 373 if (cmdq->pdata->sw_ddr_en) 374 cmdq_sw_ddr_enable(cmdq, false); 375 376 if (!IS_ENABLED(CONFIG_PM)) 377 cmdq_runtime_suspend(&pdev->dev); 378 379 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks); 380 } 381 382 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) 383 { 384 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data; 385 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 386 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 387 struct cmdq_task *task; 388 unsigned long curr_pa, end_pa; 389 int ret; 390 391 /* Client should not flush new tasks if suspended. */ 392 WARN_ON(cmdq->suspended); 393 394 ret = pm_runtime_get_sync(cmdq->mbox.dev); 395 if (ret < 0) 396 return ret; 397 398 task = kzalloc(sizeof(*task), GFP_ATOMIC); 399 if (!task) { 400 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 401 return -ENOMEM; 402 } 403 404 task->cmdq = cmdq; 405 INIT_LIST_HEAD(&task->list_entry); 406 task->pa_base = pkt->pa_base; 407 task->thread = thread; 408 task->pkt = pkt; 409 410 if (list_empty(&thread->task_busy_list)) { 411 /* 412 * The thread reset will clear thread related register to 0, 413 * including pc, end, priority, irq, suspend and enable. Thus 414 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable 415 * thread and make it running. 416 */ 417 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); 418 419 writel(task->pa_base >> cmdq->pdata->shift, 420 thread->base + CMDQ_THR_CURR_ADDR); 421 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 422 thread->base + CMDQ_THR_END_ADDR); 423 424 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); 425 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); 426 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); 427 } else { 428 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 429 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << 430 cmdq->pdata->shift; 431 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) << 432 cmdq->pdata->shift; 433 /* check boundary */ 434 if (curr_pa == end_pa - CMDQ_INST_SIZE || 435 curr_pa == end_pa) { 436 /* set to this task directly */ 437 writel(task->pa_base >> cmdq->pdata->shift, 438 thread->base + CMDQ_THR_CURR_ADDR); 439 } else { 440 cmdq_task_insert_into_thread(task); 441 smp_mb(); /* modify jump before enable thread */ 442 } 443 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 444 thread->base + CMDQ_THR_END_ADDR); 445 cmdq_thread_resume(thread); 446 } 447 list_move_tail(&task->list_entry, &thread->task_busy_list); 448 449 pm_runtime_mark_last_busy(cmdq->mbox.dev); 450 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 451 452 return 0; 453 } 454 455 static int cmdq_mbox_startup(struct mbox_chan *chan) 456 { 457 return 0; 458 } 459 460 static void cmdq_mbox_shutdown(struct mbox_chan *chan) 461 { 462 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 463 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 464 struct cmdq_task *task, *tmp; 465 unsigned long flags; 466 467 WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); 468 469 spin_lock_irqsave(&thread->chan->lock, flags); 470 if (list_empty(&thread->task_busy_list)) 471 goto done; 472 473 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 474 475 /* make sure executed tasks have success callback */ 476 cmdq_thread_irq_handler(cmdq, thread); 477 if (list_empty(&thread->task_busy_list)) 478 goto done; 479 480 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 481 list_entry) { 482 cmdq_task_exec_done(task, -ECONNABORTED); 483 kfree(task); 484 } 485 486 cmdq_thread_disable(cmdq, thread); 487 488 done: 489 /* 490 * The thread->task_busy_list empty means thread already disable. The 491 * cmdq_mbox_send_data() always reset thread which clear disable and 492 * suspend statue when first pkt send to channel, so there is no need 493 * to do any operation here, only unlock and leave. 494 */ 495 spin_unlock_irqrestore(&thread->chan->lock, flags); 496 497 pm_runtime_mark_last_busy(cmdq->mbox.dev); 498 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 499 } 500 501 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) 502 { 503 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 504 struct cmdq_cb_data data; 505 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 506 struct cmdq_task *task, *tmp; 507 unsigned long flags; 508 u32 enable; 509 int ret; 510 511 ret = pm_runtime_get_sync(cmdq->mbox.dev); 512 if (ret < 0) 513 return ret; 514 515 spin_lock_irqsave(&thread->chan->lock, flags); 516 if (list_empty(&thread->task_busy_list)) 517 goto out; 518 519 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 520 if (!cmdq_thread_is_in_wfe(thread)) 521 goto wait; 522 523 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 524 list_entry) { 525 data.sta = -ECONNABORTED; 526 data.pkt = task->pkt; 527 mbox_chan_received_data(task->thread->chan, &data); 528 list_del(&task->list_entry); 529 kfree(task); 530 } 531 532 cmdq_thread_resume(thread); 533 cmdq_thread_disable(cmdq, thread); 534 535 out: 536 spin_unlock_irqrestore(&thread->chan->lock, flags); 537 pm_runtime_mark_last_busy(cmdq->mbox.dev); 538 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 539 540 return 0; 541 542 wait: 543 cmdq_thread_resume(thread); 544 spin_unlock_irqrestore(&thread->chan->lock, flags); 545 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, 546 enable, enable == 0, 1, timeout)) { 547 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", 548 (u32)(thread->base - cmdq->base)); 549 550 return -EFAULT; 551 } 552 pm_runtime_mark_last_busy(cmdq->mbox.dev); 553 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 554 return 0; 555 } 556 557 static const struct mbox_chan_ops cmdq_mbox_chan_ops = { 558 .send_data = cmdq_mbox_send_data, 559 .startup = cmdq_mbox_startup, 560 .shutdown = cmdq_mbox_shutdown, 561 .flush = cmdq_mbox_flush, 562 }; 563 564 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, 565 const struct of_phandle_args *sp) 566 { 567 int ind = sp->args[0]; 568 struct cmdq_thread *thread; 569 570 if (ind >= mbox->num_chans) 571 return ERR_PTR(-EINVAL); 572 573 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; 574 thread->priority = sp->args[1]; 575 thread->chan = &mbox->chans[ind]; 576 577 return &mbox->chans[ind]; 578 } 579 580 static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq) 581 { 582 static const char * const gce_name = "gce"; 583 struct device_node *node, *parent = dev->of_node->parent; 584 struct clk_bulk_data *clks; 585 586 cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, 587 sizeof(*cmdq->clocks), GFP_KERNEL); 588 if (!cmdq->clocks) 589 return -ENOMEM; 590 591 if (cmdq->pdata->gce_num == 1) { 592 clks = &cmdq->clocks[0]; 593 594 clks->id = gce_name; 595 clks->clk = devm_clk_get(dev, NULL); 596 if (IS_ERR(clks->clk)) 597 return dev_err_probe(dev, PTR_ERR(clks->clk), 598 "failed to get gce clock\n"); 599 600 return 0; 601 } 602 603 /* 604 * If there is more than one GCE, get the clocks for the others too, 605 * as the clock of the main GCE must be enabled for additional IPs 606 * to be reachable. 607 */ 608 for_each_child_of_node(parent, node) { 609 int alias_id = of_alias_get_id(node, gce_name); 610 611 if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num) 612 continue; 613 614 clks = &cmdq->clocks[alias_id]; 615 616 clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id); 617 if (!clks->id) { 618 of_node_put(node); 619 return -ENOMEM; 620 } 621 622 clks->clk = of_clk_get(node, 0); 623 if (IS_ERR(clks->clk)) { 624 of_node_put(node); 625 return dev_err_probe(dev, PTR_ERR(clks->clk), 626 "failed to get gce%d clock\n", alias_id); 627 } 628 } 629 630 return 0; 631 } 632 633 static int cmdq_probe(struct platform_device *pdev) 634 { 635 struct device *dev = &pdev->dev; 636 struct cmdq *cmdq; 637 int err, i; 638 639 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL); 640 if (!cmdq) 641 return -ENOMEM; 642 643 cmdq->base = devm_platform_ioremap_resource(pdev, 0); 644 if (IS_ERR(cmdq->base)) 645 return PTR_ERR(cmdq->base); 646 647 cmdq->irq = platform_get_irq(pdev, 0); 648 if (cmdq->irq < 0) 649 return cmdq->irq; 650 651 cmdq->pdata = device_get_match_data(dev); 652 if (!cmdq->pdata) { 653 dev_err(dev, "failed to get match data\n"); 654 return -EINVAL; 655 } 656 657 cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0); 658 659 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n", 660 dev, cmdq->base, cmdq->irq); 661 662 err = cmdq_get_clocks(dev, cmdq); 663 if (err) 664 return err; 665 666 cmdq->mbox.dev = dev; 667 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr, 668 sizeof(*cmdq->mbox.chans), GFP_KERNEL); 669 if (!cmdq->mbox.chans) 670 return -ENOMEM; 671 672 cmdq->mbox.num_chans = cmdq->pdata->thread_nr; 673 cmdq->mbox.ops = &cmdq_mbox_chan_ops; 674 cmdq->mbox.of_xlate = cmdq_xlate; 675 676 /* make use of TXDONE_BY_ACK */ 677 cmdq->mbox.txdone_irq = false; 678 cmdq->mbox.txdone_poll = false; 679 680 cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr, 681 sizeof(*cmdq->thread), GFP_KERNEL); 682 if (!cmdq->thread) 683 return -ENOMEM; 684 685 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 686 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + 687 CMDQ_THR_SIZE * i; 688 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); 689 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; 690 } 691 692 platform_set_drvdata(pdev, cmdq); 693 694 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); 695 696 cmdq_init(cmdq); 697 698 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, 699 "mtk_cmdq", cmdq); 700 if (err < 0) { 701 dev_err(dev, "failed to register ISR (%d)\n", err); 702 return err; 703 } 704 705 /* If Runtime PM is not available enable the clocks now. */ 706 if (!IS_ENABLED(CONFIG_PM)) { 707 err = cmdq_runtime_resume(dev); 708 if (err) 709 return err; 710 } 711 712 err = devm_pm_runtime_enable(dev); 713 if (err) 714 return err; 715 716 pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS); 717 pm_runtime_use_autosuspend(dev); 718 719 err = devm_mbox_controller_register(dev, &cmdq->mbox); 720 if (err < 0) { 721 dev_err(dev, "failed to register mailbox: %d\n", err); 722 return err; 723 } 724 725 return 0; 726 } 727 728 static const struct dev_pm_ops cmdq_pm_ops = { 729 .suspend = cmdq_suspend, 730 .resume = cmdq_resume, 731 SET_RUNTIME_PM_OPS(cmdq_runtime_suspend, 732 cmdq_runtime_resume, NULL) 733 }; 734 735 static const struct gce_plat gce_plat_mt6779 = { 736 .thread_nr = 24, 737 .shift = 3, 738 .control_by_sw = false, 739 .gce_num = 1 740 }; 741 742 static const struct gce_plat gce_plat_mt8173 = { 743 .thread_nr = 16, 744 .shift = 0, 745 .control_by_sw = false, 746 .gce_num = 1 747 }; 748 749 static const struct gce_plat gce_plat_mt8183 = { 750 .thread_nr = 24, 751 .shift = 0, 752 .control_by_sw = false, 753 .gce_num = 1 754 }; 755 756 static const struct gce_plat gce_plat_mt8186 = { 757 .thread_nr = 24, 758 .shift = 3, 759 .control_by_sw = true, 760 .sw_ddr_en = true, 761 .gce_num = 1 762 }; 763 764 static const struct gce_plat gce_plat_mt8188 = { 765 .thread_nr = 32, 766 .shift = 3, 767 .control_by_sw = true, 768 .gce_num = 2 769 }; 770 771 static const struct gce_plat gce_plat_mt8192 = { 772 .thread_nr = 24, 773 .shift = 3, 774 .control_by_sw = true, 775 .gce_num = 1 776 }; 777 778 static const struct gce_plat gce_plat_mt8195 = { 779 .thread_nr = 24, 780 .shift = 3, 781 .control_by_sw = true, 782 .gce_num = 2 783 }; 784 785 static const struct of_device_id cmdq_of_ids[] = { 786 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779}, 787 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173}, 788 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183}, 789 {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186}, 790 {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188}, 791 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192}, 792 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195}, 793 {} 794 }; 795 MODULE_DEVICE_TABLE(of, cmdq_of_ids); 796 797 static struct platform_driver cmdq_drv = { 798 .probe = cmdq_probe, 799 .remove = cmdq_remove, 800 .driver = { 801 .name = "mtk_cmdq", 802 .pm = &cmdq_pm_ops, 803 .of_match_table = cmdq_of_ids, 804 } 805 }; 806 807 static int __init cmdq_drv_init(void) 808 { 809 return platform_driver_register(&cmdq_drv); 810 } 811 812 static void __exit cmdq_drv_exit(void) 813 { 814 platform_driver_unregister(&cmdq_drv); 815 } 816 817 subsys_initcall(cmdq_drv_init); 818 module_exit(cmdq_drv_exit); 819 820 MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver"); 821 MODULE_LICENSE("GPL v2"); 822