1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2018 MediaTek Inc. 4 5 #include <linux/bitops.h> 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/errno.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/mailbox_controller.h> 18 #include <linux/mailbox/mtk-cmdq-mailbox.h> 19 #include <linux/of.h> 20 21 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100 22 23 #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT) 24 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) 25 26 #define CMDQ_CURR_IRQ_STATUS 0x10 27 #define CMDQ_SYNC_TOKEN_UPDATE 0x68 28 #define CMDQ_THR_SLOT_CYCLES 0x30 29 #define CMDQ_THR_BASE 0x100 30 #define CMDQ_THR_SIZE 0x80 31 #define CMDQ_THR_WARM_RESET 0x00 32 #define CMDQ_THR_ENABLE_TASK 0x04 33 #define CMDQ_THR_SUSPEND_TASK 0x08 34 #define CMDQ_THR_CURR_STATUS 0x0c 35 #define CMDQ_THR_IRQ_STATUS 0x10 36 #define CMDQ_THR_IRQ_ENABLE 0x14 37 #define CMDQ_THR_CURR_ADDR 0x20 38 #define CMDQ_THR_END_ADDR 0x24 39 #define CMDQ_THR_WAIT_TOKEN 0x30 40 #define CMDQ_THR_PRIORITY 0x40 41 42 #define GCE_GCTL_VALUE 0x48 43 #define GCE_CTRL_BY_SW GENMASK(2, 0) 44 #define GCE_DDR_EN GENMASK(18, 16) 45 46 #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200 47 #define CMDQ_THR_ENABLED 0x1 48 #define CMDQ_THR_DISABLED 0x0 49 #define CMDQ_THR_SUSPEND 0x1 50 #define CMDQ_THR_RESUME 0x0 51 #define CMDQ_THR_STATUS_SUSPENDED BIT(1) 52 #define CMDQ_THR_DO_WARM_RESET BIT(0) 53 #define CMDQ_THR_IRQ_DONE 0x1 54 #define CMDQ_THR_IRQ_ERROR 0x12 55 #define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE) 56 #define CMDQ_THR_IS_WAITING BIT(31) 57 58 #define CMDQ_JUMP_BY_OFFSET 0x10000000 59 #define CMDQ_JUMP_BY_PA 0x10000001 60 61 struct cmdq_thread { 62 struct mbox_chan *chan; 63 void __iomem *base; 64 struct list_head task_busy_list; 65 u32 priority; 66 }; 67 68 struct cmdq_task { 69 struct cmdq *cmdq; 70 struct list_head list_entry; 71 dma_addr_t pa_base; 72 struct cmdq_thread *thread; 73 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */ 74 }; 75 76 struct cmdq { 77 struct mbox_controller mbox; 78 void __iomem *base; 79 int irq; 80 u32 irq_mask; 81 const struct gce_plat *pdata; 82 struct cmdq_thread *thread; 83 struct clk_bulk_data *clocks; 84 bool suspended; 85 }; 86 87 struct gce_plat { 88 u32 thread_nr; 89 u8 shift; 90 bool control_by_sw; 91 bool sw_ddr_en; 92 u32 gce_num; 93 }; 94 95 u8 cmdq_get_shift_pa(struct mbox_chan *chan) 96 { 97 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); 98 99 return cmdq->pdata->shift; 100 } 101 EXPORT_SYMBOL(cmdq_get_shift_pa); 102 103 static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) 104 { 105 u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; 106 107 if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) 108 return; 109 110 if (cmdq->pdata->sw_ddr_en && ddr_enable) 111 val |= GCE_DDR_EN; 112 113 writel(val, cmdq->base + GCE_GCTL_VALUE); 114 } 115 116 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) 117 { 118 u32 status; 119 120 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); 121 122 /* If already disabled, treat as suspended successful. */ 123 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 124 return 0; 125 126 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, 127 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) { 128 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", 129 (u32)(thread->base - cmdq->base)); 130 return -EFAULT; 131 } 132 133 return 0; 134 } 135 136 static void cmdq_thread_resume(struct cmdq_thread *thread) 137 { 138 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); 139 } 140 141 static void cmdq_init(struct cmdq *cmdq) 142 { 143 int i; 144 145 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); 146 147 cmdq_gctl_value_toggle(cmdq, true); 148 149 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); 150 for (i = 0; i <= CMDQ_MAX_EVENT; i++) 151 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); 152 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 153 } 154 155 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) 156 { 157 u32 warm_reset; 158 159 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET); 160 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET, 161 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET), 162 0, 10)) { 163 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n", 164 (u32)(thread->base - cmdq->base)); 165 return -EFAULT; 166 } 167 168 return 0; 169 } 170 171 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread) 172 { 173 cmdq_thread_reset(cmdq, thread); 174 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK); 175 } 176 177 /* notify GCE to re-fetch commands by setting GCE thread PC */ 178 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread) 179 { 180 writel(readl(thread->base + CMDQ_THR_CURR_ADDR), 181 thread->base + CMDQ_THR_CURR_ADDR); 182 } 183 184 static void cmdq_task_insert_into_thread(struct cmdq_task *task) 185 { 186 struct device *dev = task->cmdq->mbox.dev; 187 struct cmdq_thread *thread = task->thread; 188 struct cmdq_task *prev_task = list_last_entry( 189 &thread->task_busy_list, typeof(*task), list_entry); 190 u64 *prev_task_base = prev_task->pkt->va_base; 191 192 /* let previous task jump to this task */ 193 dma_sync_single_for_cpu(dev, prev_task->pa_base, 194 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 195 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = 196 (u64)CMDQ_JUMP_BY_PA << 32 | 197 (task->pa_base >> task->cmdq->pdata->shift); 198 dma_sync_single_for_device(dev, prev_task->pa_base, 199 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 200 201 cmdq_thread_invalidate_fetched_data(thread); 202 } 203 204 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread) 205 { 206 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; 207 } 208 209 static void cmdq_task_exec_done(struct cmdq_task *task, int sta) 210 { 211 struct cmdq_cb_data data; 212 213 data.sta = sta; 214 data.pkt = task->pkt; 215 mbox_chan_received_data(task->thread->chan, &data); 216 217 list_del(&task->list_entry); 218 } 219 220 static void cmdq_task_handle_error(struct cmdq_task *task) 221 { 222 struct cmdq_thread *thread = task->thread; 223 struct cmdq_task *next_task; 224 struct cmdq *cmdq = task->cmdq; 225 226 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); 227 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 228 next_task = list_first_entry_or_null(&thread->task_busy_list, 229 struct cmdq_task, list_entry); 230 if (next_task) 231 writel(next_task->pa_base >> cmdq->pdata->shift, 232 thread->base + CMDQ_THR_CURR_ADDR); 233 cmdq_thread_resume(thread); 234 } 235 236 static void cmdq_thread_irq_handler(struct cmdq *cmdq, 237 struct cmdq_thread *thread) 238 { 239 struct cmdq_task *task, *tmp, *curr_task = NULL; 240 u32 curr_pa, irq_flag, task_end_pa; 241 bool err; 242 243 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); 244 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS); 245 246 /* 247 * When ISR call this function, another CPU core could run 248 * "release task" right before we acquire the spin lock, and thus 249 * reset / disable this GCE thread, so we need to check the enable 250 * bit of this GCE thread. 251 */ 252 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 253 return; 254 255 if (irq_flag & CMDQ_THR_IRQ_ERROR) 256 err = true; 257 else if (irq_flag & CMDQ_THR_IRQ_DONE) 258 err = false; 259 else 260 return; 261 262 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift; 263 264 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 265 list_entry) { 266 task_end_pa = task->pa_base + task->pkt->cmd_buf_size; 267 if (curr_pa >= task->pa_base && curr_pa < task_end_pa) 268 curr_task = task; 269 270 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { 271 cmdq_task_exec_done(task, 0); 272 kfree(task); 273 } else if (err) { 274 cmdq_task_exec_done(task, -ENOEXEC); 275 cmdq_task_handle_error(curr_task); 276 kfree(task); 277 } 278 279 if (curr_task) 280 break; 281 } 282 283 if (list_empty(&thread->task_busy_list)) 284 cmdq_thread_disable(cmdq, thread); 285 } 286 287 static irqreturn_t cmdq_irq_handler(int irq, void *dev) 288 { 289 struct cmdq *cmdq = dev; 290 unsigned long irq_status, flags = 0L; 291 int bit; 292 293 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; 294 if (!(irq_status ^ cmdq->irq_mask)) 295 return IRQ_NONE; 296 297 for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) { 298 struct cmdq_thread *thread = &cmdq->thread[bit]; 299 300 spin_lock_irqsave(&thread->chan->lock, flags); 301 cmdq_thread_irq_handler(cmdq, thread); 302 spin_unlock_irqrestore(&thread->chan->lock, flags); 303 } 304 305 pm_runtime_mark_last_busy(cmdq->mbox.dev); 306 307 return IRQ_HANDLED; 308 } 309 310 static int cmdq_runtime_resume(struct device *dev) 311 { 312 struct cmdq *cmdq = dev_get_drvdata(dev); 313 int ret; 314 315 ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); 316 if (ret) 317 return ret; 318 319 cmdq_gctl_value_toggle(cmdq, true); 320 return 0; 321 } 322 323 static int cmdq_runtime_suspend(struct device *dev) 324 { 325 struct cmdq *cmdq = dev_get_drvdata(dev); 326 327 cmdq_gctl_value_toggle(cmdq, false); 328 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 329 return 0; 330 } 331 332 static int cmdq_suspend(struct device *dev) 333 { 334 struct cmdq *cmdq = dev_get_drvdata(dev); 335 struct cmdq_thread *thread; 336 int i; 337 bool task_running = false; 338 339 cmdq->suspended = true; 340 341 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 342 thread = &cmdq->thread[i]; 343 if (!list_empty(&thread->task_busy_list)) { 344 task_running = true; 345 break; 346 } 347 } 348 349 if (task_running) 350 dev_warn(dev, "exist running task(s) in suspend\n"); 351 352 return pm_runtime_force_suspend(dev); 353 } 354 355 static int cmdq_resume(struct device *dev) 356 { 357 struct cmdq *cmdq = dev_get_drvdata(dev); 358 359 WARN_ON(pm_runtime_force_resume(dev)); 360 cmdq->suspended = false; 361 362 return 0; 363 } 364 365 static void cmdq_remove(struct platform_device *pdev) 366 { 367 struct cmdq *cmdq = platform_get_drvdata(pdev); 368 369 if (!IS_ENABLED(CONFIG_PM)) 370 cmdq_runtime_suspend(&pdev->dev); 371 372 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks); 373 } 374 375 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) 376 { 377 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data; 378 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 379 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 380 struct cmdq_task *task; 381 unsigned long curr_pa, end_pa; 382 int ret; 383 384 /* Client should not flush new tasks if suspended. */ 385 WARN_ON(cmdq->suspended); 386 387 ret = pm_runtime_get_sync(cmdq->mbox.dev); 388 if (ret < 0) 389 return ret; 390 391 task = kzalloc(sizeof(*task), GFP_ATOMIC); 392 if (!task) { 393 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 394 return -ENOMEM; 395 } 396 397 task->cmdq = cmdq; 398 INIT_LIST_HEAD(&task->list_entry); 399 task->pa_base = pkt->pa_base; 400 task->thread = thread; 401 task->pkt = pkt; 402 403 if (list_empty(&thread->task_busy_list)) { 404 /* 405 * The thread reset will clear thread related register to 0, 406 * including pc, end, priority, irq, suspend and enable. Thus 407 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable 408 * thread and make it running. 409 */ 410 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); 411 412 writel(task->pa_base >> cmdq->pdata->shift, 413 thread->base + CMDQ_THR_CURR_ADDR); 414 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 415 thread->base + CMDQ_THR_END_ADDR); 416 417 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); 418 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); 419 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); 420 } else { 421 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 422 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << 423 cmdq->pdata->shift; 424 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) << 425 cmdq->pdata->shift; 426 /* check boundary */ 427 if (curr_pa == end_pa - CMDQ_INST_SIZE || 428 curr_pa == end_pa) { 429 /* set to this task directly */ 430 writel(task->pa_base >> cmdq->pdata->shift, 431 thread->base + CMDQ_THR_CURR_ADDR); 432 } else { 433 cmdq_task_insert_into_thread(task); 434 smp_mb(); /* modify jump before enable thread */ 435 } 436 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 437 thread->base + CMDQ_THR_END_ADDR); 438 cmdq_thread_resume(thread); 439 } 440 list_move_tail(&task->list_entry, &thread->task_busy_list); 441 442 pm_runtime_mark_last_busy(cmdq->mbox.dev); 443 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 444 445 return 0; 446 } 447 448 static int cmdq_mbox_startup(struct mbox_chan *chan) 449 { 450 return 0; 451 } 452 453 static void cmdq_mbox_shutdown(struct mbox_chan *chan) 454 { 455 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 456 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 457 struct cmdq_task *task, *tmp; 458 unsigned long flags; 459 460 WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); 461 462 spin_lock_irqsave(&thread->chan->lock, flags); 463 if (list_empty(&thread->task_busy_list)) 464 goto done; 465 466 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 467 468 /* make sure executed tasks have success callback */ 469 cmdq_thread_irq_handler(cmdq, thread); 470 if (list_empty(&thread->task_busy_list)) 471 goto done; 472 473 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 474 list_entry) { 475 cmdq_task_exec_done(task, -ECONNABORTED); 476 kfree(task); 477 } 478 479 cmdq_thread_disable(cmdq, thread); 480 481 done: 482 /* 483 * The thread->task_busy_list empty means thread already disable. The 484 * cmdq_mbox_send_data() always reset thread which clear disable and 485 * suspend statue when first pkt send to channel, so there is no need 486 * to do any operation here, only unlock and leave. 487 */ 488 spin_unlock_irqrestore(&thread->chan->lock, flags); 489 490 pm_runtime_mark_last_busy(cmdq->mbox.dev); 491 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 492 } 493 494 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) 495 { 496 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 497 struct cmdq_cb_data data; 498 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 499 struct cmdq_task *task, *tmp; 500 unsigned long flags; 501 u32 enable; 502 int ret; 503 504 ret = pm_runtime_get_sync(cmdq->mbox.dev); 505 if (ret < 0) 506 return ret; 507 508 spin_lock_irqsave(&thread->chan->lock, flags); 509 if (list_empty(&thread->task_busy_list)) 510 goto out; 511 512 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 513 if (!cmdq_thread_is_in_wfe(thread)) 514 goto wait; 515 516 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 517 list_entry) { 518 data.sta = -ECONNABORTED; 519 data.pkt = task->pkt; 520 mbox_chan_received_data(task->thread->chan, &data); 521 list_del(&task->list_entry); 522 kfree(task); 523 } 524 525 cmdq_thread_resume(thread); 526 cmdq_thread_disable(cmdq, thread); 527 528 out: 529 spin_unlock_irqrestore(&thread->chan->lock, flags); 530 pm_runtime_mark_last_busy(cmdq->mbox.dev); 531 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 532 533 return 0; 534 535 wait: 536 cmdq_thread_resume(thread); 537 spin_unlock_irqrestore(&thread->chan->lock, flags); 538 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, 539 enable, enable == 0, 1, timeout)) { 540 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", 541 (u32)(thread->base - cmdq->base)); 542 543 return -EFAULT; 544 } 545 pm_runtime_mark_last_busy(cmdq->mbox.dev); 546 __pm_runtime_put_autosuspend(cmdq->mbox.dev); 547 return 0; 548 } 549 550 static const struct mbox_chan_ops cmdq_mbox_chan_ops = { 551 .send_data = cmdq_mbox_send_data, 552 .startup = cmdq_mbox_startup, 553 .shutdown = cmdq_mbox_shutdown, 554 .flush = cmdq_mbox_flush, 555 }; 556 557 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, 558 const struct of_phandle_args *sp) 559 { 560 int ind = sp->args[0]; 561 struct cmdq_thread *thread; 562 563 if (ind >= mbox->num_chans) 564 return ERR_PTR(-EINVAL); 565 566 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; 567 thread->priority = sp->args[1]; 568 thread->chan = &mbox->chans[ind]; 569 570 return &mbox->chans[ind]; 571 } 572 573 static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq) 574 { 575 static const char * const gce_name = "gce"; 576 struct device_node *node, *parent = dev->of_node->parent; 577 struct clk_bulk_data *clks; 578 579 cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, 580 sizeof(*cmdq->clocks), GFP_KERNEL); 581 if (!cmdq->clocks) 582 return -ENOMEM; 583 584 if (cmdq->pdata->gce_num == 1) { 585 clks = &cmdq->clocks[0]; 586 587 clks->id = gce_name; 588 clks->clk = devm_clk_get(dev, NULL); 589 if (IS_ERR(clks->clk)) 590 return dev_err_probe(dev, PTR_ERR(clks->clk), 591 "failed to get gce clock\n"); 592 593 return 0; 594 } 595 596 /* 597 * If there is more than one GCE, get the clocks for the others too, 598 * as the clock of the main GCE must be enabled for additional IPs 599 * to be reachable. 600 */ 601 for_each_child_of_node(parent, node) { 602 int alias_id = of_alias_get_id(node, gce_name); 603 604 if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num) 605 continue; 606 607 clks = &cmdq->clocks[alias_id]; 608 609 clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id); 610 if (!clks->id) { 611 of_node_put(node); 612 return -ENOMEM; 613 } 614 615 clks->clk = of_clk_get(node, 0); 616 if (IS_ERR(clks->clk)) { 617 of_node_put(node); 618 return dev_err_probe(dev, PTR_ERR(clks->clk), 619 "failed to get gce%d clock\n", alias_id); 620 } 621 } 622 623 return 0; 624 } 625 626 static int cmdq_probe(struct platform_device *pdev) 627 { 628 struct device *dev = &pdev->dev; 629 struct cmdq *cmdq; 630 int err, i; 631 632 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL); 633 if (!cmdq) 634 return -ENOMEM; 635 636 cmdq->base = devm_platform_ioremap_resource(pdev, 0); 637 if (IS_ERR(cmdq->base)) 638 return PTR_ERR(cmdq->base); 639 640 cmdq->irq = platform_get_irq(pdev, 0); 641 if (cmdq->irq < 0) 642 return cmdq->irq; 643 644 cmdq->pdata = device_get_match_data(dev); 645 if (!cmdq->pdata) { 646 dev_err(dev, "failed to get match data\n"); 647 return -EINVAL; 648 } 649 650 cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0); 651 652 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n", 653 dev, cmdq->base, cmdq->irq); 654 655 err = cmdq_get_clocks(dev, cmdq); 656 if (err) 657 return err; 658 659 cmdq->mbox.dev = dev; 660 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr, 661 sizeof(*cmdq->mbox.chans), GFP_KERNEL); 662 if (!cmdq->mbox.chans) 663 return -ENOMEM; 664 665 cmdq->mbox.num_chans = cmdq->pdata->thread_nr; 666 cmdq->mbox.ops = &cmdq_mbox_chan_ops; 667 cmdq->mbox.of_xlate = cmdq_xlate; 668 669 /* make use of TXDONE_BY_ACK */ 670 cmdq->mbox.txdone_irq = false; 671 cmdq->mbox.txdone_poll = false; 672 673 cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr, 674 sizeof(*cmdq->thread), GFP_KERNEL); 675 if (!cmdq->thread) 676 return -ENOMEM; 677 678 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 679 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + 680 CMDQ_THR_SIZE * i; 681 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); 682 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; 683 } 684 685 platform_set_drvdata(pdev, cmdq); 686 687 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); 688 689 cmdq_init(cmdq); 690 691 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, 692 "mtk_cmdq", cmdq); 693 if (err < 0) { 694 dev_err(dev, "failed to register ISR (%d)\n", err); 695 return err; 696 } 697 698 /* If Runtime PM is not available enable the clocks now. */ 699 if (!IS_ENABLED(CONFIG_PM)) { 700 err = cmdq_runtime_resume(dev); 701 if (err) 702 return err; 703 } 704 705 err = devm_pm_runtime_enable(dev); 706 if (err) 707 return err; 708 709 pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS); 710 pm_runtime_use_autosuspend(dev); 711 712 err = devm_mbox_controller_register(dev, &cmdq->mbox); 713 if (err < 0) { 714 dev_err(dev, "failed to register mailbox: %d\n", err); 715 return err; 716 } 717 718 return 0; 719 } 720 721 static const struct dev_pm_ops cmdq_pm_ops = { 722 .suspend = cmdq_suspend, 723 .resume = cmdq_resume, 724 SET_RUNTIME_PM_OPS(cmdq_runtime_suspend, 725 cmdq_runtime_resume, NULL) 726 }; 727 728 static const struct gce_plat gce_plat_mt6779 = { 729 .thread_nr = 24, 730 .shift = 3, 731 .control_by_sw = false, 732 .gce_num = 1 733 }; 734 735 static const struct gce_plat gce_plat_mt8173 = { 736 .thread_nr = 16, 737 .shift = 0, 738 .control_by_sw = false, 739 .gce_num = 1 740 }; 741 742 static const struct gce_plat gce_plat_mt8183 = { 743 .thread_nr = 24, 744 .shift = 0, 745 .control_by_sw = false, 746 .gce_num = 1 747 }; 748 749 static const struct gce_plat gce_plat_mt8186 = { 750 .thread_nr = 24, 751 .shift = 3, 752 .control_by_sw = true, 753 .sw_ddr_en = true, 754 .gce_num = 1 755 }; 756 757 static const struct gce_plat gce_plat_mt8188 = { 758 .thread_nr = 32, 759 .shift = 3, 760 .control_by_sw = true, 761 .gce_num = 2 762 }; 763 764 static const struct gce_plat gce_plat_mt8192 = { 765 .thread_nr = 24, 766 .shift = 3, 767 .control_by_sw = true, 768 .gce_num = 1 769 }; 770 771 static const struct gce_plat gce_plat_mt8195 = { 772 .thread_nr = 24, 773 .shift = 3, 774 .control_by_sw = true, 775 .gce_num = 2 776 }; 777 778 static const struct of_device_id cmdq_of_ids[] = { 779 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779}, 780 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173}, 781 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183}, 782 {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186}, 783 {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188}, 784 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192}, 785 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195}, 786 {} 787 }; 788 MODULE_DEVICE_TABLE(of, cmdq_of_ids); 789 790 static struct platform_driver cmdq_drv = { 791 .probe = cmdq_probe, 792 .remove = cmdq_remove, 793 .driver = { 794 .name = "mtk_cmdq", 795 .pm = &cmdq_pm_ops, 796 .of_match_table = cmdq_of_ids, 797 } 798 }; 799 800 static int __init cmdq_drv_init(void) 801 { 802 return platform_driver_register(&cmdq_drv); 803 } 804 805 static void __exit cmdq_drv_exit(void) 806 { 807 platform_driver_unregister(&cmdq_drv); 808 } 809 810 subsys_initcall(cmdq_drv_init); 811 module_exit(cmdq_drv_exit); 812 813 MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver"); 814 MODULE_LICENSE("GPL v2"); 815