1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2018 MediaTek Inc. 4 5 #include <linux/bitops.h> 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/errno.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/mailbox_controller.h> 18 #include <linux/mailbox/mtk-cmdq-mailbox.h> 19 #include <linux/of.h> 20 21 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100 22 23 #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT) 24 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE) 25 26 #define CMDQ_CURR_IRQ_STATUS 0x10 27 #define CMDQ_SYNC_TOKEN_UPDATE 0x68 28 #define CMDQ_THR_SLOT_CYCLES 0x30 29 #define CMDQ_THR_BASE 0x100 30 #define CMDQ_THR_SIZE 0x80 31 #define CMDQ_THR_WARM_RESET 0x00 32 #define CMDQ_THR_ENABLE_TASK 0x04 33 #define CMDQ_THR_SUSPEND_TASK 0x08 34 #define CMDQ_THR_CURR_STATUS 0x0c 35 #define CMDQ_THR_IRQ_STATUS 0x10 36 #define CMDQ_THR_IRQ_ENABLE 0x14 37 #define CMDQ_THR_CURR_ADDR 0x20 38 #define CMDQ_THR_END_ADDR 0x24 39 #define CMDQ_THR_WAIT_TOKEN 0x30 40 #define CMDQ_THR_PRIORITY 0x40 41 42 #define GCE_GCTL_VALUE 0x48 43 #define GCE_CTRL_BY_SW GENMASK(2, 0) 44 #define GCE_DDR_EN GENMASK(18, 16) 45 46 #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200 47 #define CMDQ_THR_ENABLED 0x1 48 #define CMDQ_THR_DISABLED 0x0 49 #define CMDQ_THR_SUSPEND 0x1 50 #define CMDQ_THR_RESUME 0x0 51 #define CMDQ_THR_STATUS_SUSPENDED BIT(1) 52 #define CMDQ_THR_DO_WARM_RESET BIT(0) 53 #define CMDQ_THR_IRQ_DONE 0x1 54 #define CMDQ_THR_IRQ_ERROR 0x12 55 #define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE) 56 #define CMDQ_THR_IS_WAITING BIT(31) 57 58 #define CMDQ_JUMP_BY_OFFSET 0x10000000 59 #define CMDQ_JUMP_BY_PA 0x10000001 60 61 struct cmdq_thread { 62 struct mbox_chan *chan; 63 void __iomem *base; 64 struct list_head task_busy_list; 65 u32 priority; 66 }; 67 68 struct cmdq_task { 69 struct cmdq *cmdq; 70 struct list_head list_entry; 71 dma_addr_t pa_base; 72 struct cmdq_thread *thread; 73 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */ 74 }; 75 76 struct cmdq { 77 struct mbox_controller mbox; 78 void __iomem *base; 79 int irq; 80 u32 irq_mask; 81 const struct gce_plat *pdata; 82 struct cmdq_thread *thread; 83 struct clk_bulk_data *clocks; 84 bool suspended; 85 }; 86 87 struct gce_plat { 88 u32 thread_nr; 89 u8 shift; 90 bool control_by_sw; 91 bool sw_ddr_en; 92 u32 gce_num; 93 }; 94 95 static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata) 96 { 97 /* Convert DMA addr (PA or IOVA) to GCE readable addr */ 98 return addr >> pdata->shift; 99 } 100 101 static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata) 102 { 103 /* Revert GCE readable addr to DMA addr (PA or IOVA) */ 104 return (dma_addr_t)addr << pdata->shift; 105 } 106 107 u8 cmdq_get_shift_pa(struct mbox_chan *chan) 108 { 109 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); 110 111 return cmdq->pdata->shift; 112 } 113 EXPORT_SYMBOL(cmdq_get_shift_pa); 114 115 static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) 116 { 117 u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; 118 119 if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) 120 return; 121 122 if (cmdq->pdata->sw_ddr_en && ddr_enable) 123 val |= GCE_DDR_EN; 124 125 writel(val, cmdq->base + GCE_GCTL_VALUE); 126 } 127 128 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) 129 { 130 u32 status; 131 132 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); 133 134 /* If already disabled, treat as suspended successful. */ 135 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 136 return 0; 137 138 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, 139 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) { 140 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", 141 (u32)(thread->base - cmdq->base)); 142 return -EFAULT; 143 } 144 145 return 0; 146 } 147 148 static void cmdq_thread_resume(struct cmdq_thread *thread) 149 { 150 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); 151 } 152 153 static void cmdq_init(struct cmdq *cmdq) 154 { 155 int i; 156 157 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); 158 159 cmdq_gctl_value_toggle(cmdq, true); 160 161 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); 162 for (i = 0; i <= CMDQ_MAX_EVENT; i++) 163 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); 164 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 165 } 166 167 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) 168 { 169 u32 warm_reset; 170 171 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET); 172 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET, 173 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET), 174 0, 10)) { 175 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n", 176 (u32)(thread->base - cmdq->base)); 177 return -EFAULT; 178 } 179 180 return 0; 181 } 182 183 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread) 184 { 185 cmdq_thread_reset(cmdq, thread); 186 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK); 187 } 188 189 /* notify GCE to re-fetch commands by setting GCE thread PC */ 190 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread) 191 { 192 writel(readl(thread->base + CMDQ_THR_CURR_ADDR), 193 thread->base + CMDQ_THR_CURR_ADDR); 194 } 195 196 static void cmdq_task_insert_into_thread(struct cmdq_task *task) 197 { 198 struct device *dev = task->cmdq->mbox.dev; 199 struct cmdq_thread *thread = task->thread; 200 struct cmdq_task *prev_task = list_last_entry( 201 &thread->task_busy_list, typeof(*task), list_entry); 202 u64 *prev_task_base = prev_task->pkt->va_base; 203 u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata); 204 205 /* let previous task jump to this task */ 206 dma_sync_single_for_cpu(dev, prev_task->pa_base, 207 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 208 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr; 209 dma_sync_single_for_device(dev, prev_task->pa_base, 210 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); 211 212 cmdq_thread_invalidate_fetched_data(thread); 213 } 214 215 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread) 216 { 217 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; 218 } 219 220 static void cmdq_task_exec_done(struct cmdq_task *task, int sta) 221 { 222 struct cmdq_cb_data data; 223 224 data.sta = sta; 225 data.pkt = task->pkt; 226 mbox_chan_received_data(task->thread->chan, &data); 227 228 list_del(&task->list_entry); 229 } 230 231 static void cmdq_task_handle_error(struct cmdq_task *task) 232 { 233 struct cmdq_thread *thread = task->thread; 234 struct cmdq_task *next_task; 235 struct cmdq *cmdq = task->cmdq; 236 237 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); 238 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 239 next_task = list_first_entry_or_null(&thread->task_busy_list, 240 struct cmdq_task, list_entry); 241 if (next_task) 242 writel(next_task->pa_base >> cmdq->pdata->shift, 243 thread->base + CMDQ_THR_CURR_ADDR); 244 cmdq_thread_resume(thread); 245 } 246 247 static void cmdq_thread_irq_handler(struct cmdq *cmdq, 248 struct cmdq_thread *thread) 249 { 250 struct cmdq_task *task, *tmp, *curr_task = NULL; 251 u32 irq_flag, gce_addr; 252 dma_addr_t curr_pa, task_end_pa; 253 bool err; 254 255 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); 256 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS); 257 258 /* 259 * When ISR call this function, another CPU core could run 260 * "release task" right before we acquire the spin lock, and thus 261 * reset / disable this GCE thread, so we need to check the enable 262 * bit of this GCE thread. 263 */ 264 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) 265 return; 266 267 if (irq_flag & CMDQ_THR_IRQ_ERROR) 268 err = true; 269 else if (irq_flag & CMDQ_THR_IRQ_DONE) 270 err = false; 271 else 272 return; 273 274 gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); 275 curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 276 277 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 278 list_entry) { 279 task_end_pa = task->pa_base + task->pkt->cmd_buf_size; 280 if (curr_pa >= task->pa_base && curr_pa < task_end_pa) 281 curr_task = task; 282 283 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { 284 cmdq_task_exec_done(task, 0); 285 kfree(task); 286 } else if (err) { 287 cmdq_task_exec_done(task, -ENOEXEC); 288 cmdq_task_handle_error(curr_task); 289 kfree(task); 290 } 291 292 if (curr_task) 293 break; 294 } 295 296 if (list_empty(&thread->task_busy_list)) 297 cmdq_thread_disable(cmdq, thread); 298 } 299 300 static irqreturn_t cmdq_irq_handler(int irq, void *dev) 301 { 302 struct cmdq *cmdq = dev; 303 unsigned long irq_status, flags = 0L; 304 int bit; 305 306 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; 307 if (!(irq_status ^ cmdq->irq_mask)) 308 return IRQ_NONE; 309 310 for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) { 311 struct cmdq_thread *thread = &cmdq->thread[bit]; 312 313 spin_lock_irqsave(&thread->chan->lock, flags); 314 cmdq_thread_irq_handler(cmdq, thread); 315 spin_unlock_irqrestore(&thread->chan->lock, flags); 316 } 317 318 pm_runtime_mark_last_busy(cmdq->mbox.dev); 319 320 return IRQ_HANDLED; 321 } 322 323 static int cmdq_runtime_resume(struct device *dev) 324 { 325 struct cmdq *cmdq = dev_get_drvdata(dev); 326 int ret; 327 328 ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); 329 if (ret) 330 return ret; 331 332 cmdq_gctl_value_toggle(cmdq, true); 333 return 0; 334 } 335 336 static int cmdq_runtime_suspend(struct device *dev) 337 { 338 struct cmdq *cmdq = dev_get_drvdata(dev); 339 340 cmdq_gctl_value_toggle(cmdq, false); 341 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); 342 return 0; 343 } 344 345 static int cmdq_suspend(struct device *dev) 346 { 347 struct cmdq *cmdq = dev_get_drvdata(dev); 348 struct cmdq_thread *thread; 349 int i; 350 bool task_running = false; 351 352 cmdq->suspended = true; 353 354 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 355 thread = &cmdq->thread[i]; 356 if (!list_empty(&thread->task_busy_list)) { 357 task_running = true; 358 break; 359 } 360 } 361 362 if (task_running) 363 dev_warn(dev, "exist running task(s) in suspend\n"); 364 365 return pm_runtime_force_suspend(dev); 366 } 367 368 static int cmdq_resume(struct device *dev) 369 { 370 struct cmdq *cmdq = dev_get_drvdata(dev); 371 372 WARN_ON(pm_runtime_force_resume(dev)); 373 cmdq->suspended = false; 374 375 return 0; 376 } 377 378 static void cmdq_remove(struct platform_device *pdev) 379 { 380 struct cmdq *cmdq = platform_get_drvdata(pdev); 381 382 if (!IS_ENABLED(CONFIG_PM)) 383 cmdq_runtime_suspend(&pdev->dev); 384 385 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks); 386 } 387 388 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) 389 { 390 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data; 391 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 392 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 393 struct cmdq_task *task; 394 u32 gce_addr; 395 dma_addr_t curr_pa, end_pa; 396 397 /* Client should not flush new tasks if suspended. */ 398 WARN_ON(cmdq->suspended); 399 400 task = kzalloc(sizeof(*task), GFP_ATOMIC); 401 if (!task) 402 return -ENOMEM; 403 404 task->cmdq = cmdq; 405 INIT_LIST_HEAD(&task->list_entry); 406 task->pa_base = pkt->pa_base; 407 task->thread = thread; 408 task->pkt = pkt; 409 410 if (list_empty(&thread->task_busy_list)) { 411 /* 412 * The thread reset will clear thread related register to 0, 413 * including pc, end, priority, irq, suspend and enable. Thus 414 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable 415 * thread and make it running. 416 */ 417 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); 418 419 gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata); 420 writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR); 421 gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata); 422 writel(gce_addr, thread->base + CMDQ_THR_END_ADDR); 423 424 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); 425 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); 426 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); 427 } else { 428 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 429 gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR); 430 curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 431 gce_addr = readl(thread->base + CMDQ_THR_END_ADDR); 432 end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata); 433 /* check boundary */ 434 if (curr_pa == end_pa - CMDQ_INST_SIZE || 435 curr_pa == end_pa) { 436 /* set to this task directly */ 437 writel(task->pa_base >> cmdq->pdata->shift, 438 thread->base + CMDQ_THR_CURR_ADDR); 439 } else { 440 cmdq_task_insert_into_thread(task); 441 smp_mb(); /* modify jump before enable thread */ 442 } 443 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift, 444 thread->base + CMDQ_THR_END_ADDR); 445 cmdq_thread_resume(thread); 446 } 447 list_move_tail(&task->list_entry, &thread->task_busy_list); 448 449 return 0; 450 } 451 452 static int cmdq_mbox_startup(struct mbox_chan *chan) 453 { 454 return 0; 455 } 456 457 static void cmdq_mbox_shutdown(struct mbox_chan *chan) 458 { 459 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 460 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 461 struct cmdq_task *task, *tmp; 462 unsigned long flags; 463 464 WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); 465 466 spin_lock_irqsave(&thread->chan->lock, flags); 467 if (list_empty(&thread->task_busy_list)) 468 goto done; 469 470 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 471 472 /* make sure executed tasks have success callback */ 473 cmdq_thread_irq_handler(cmdq, thread); 474 if (list_empty(&thread->task_busy_list)) 475 goto done; 476 477 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 478 list_entry) { 479 cmdq_task_exec_done(task, -ECONNABORTED); 480 kfree(task); 481 } 482 483 cmdq_thread_disable(cmdq, thread); 484 485 done: 486 /* 487 * The thread->task_busy_list empty means thread already disable. The 488 * cmdq_mbox_send_data() always reset thread which clear disable and 489 * suspend statue when first pkt send to channel, so there is no need 490 * to do any operation here, only unlock and leave. 491 */ 492 spin_unlock_irqrestore(&thread->chan->lock, flags); 493 494 pm_runtime_mark_last_busy(cmdq->mbox.dev); 495 pm_runtime_put_autosuspend(cmdq->mbox.dev); 496 } 497 498 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) 499 { 500 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; 501 struct cmdq_cb_data data; 502 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); 503 struct cmdq_task *task, *tmp; 504 unsigned long flags; 505 u32 enable; 506 int ret; 507 508 ret = pm_runtime_get_sync(cmdq->mbox.dev); 509 if (ret < 0) 510 return ret; 511 512 spin_lock_irqsave(&thread->chan->lock, flags); 513 if (list_empty(&thread->task_busy_list)) 514 goto out; 515 516 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); 517 if (!cmdq_thread_is_in_wfe(thread)) 518 goto wait; 519 520 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, 521 list_entry) { 522 data.sta = -ECONNABORTED; 523 data.pkt = task->pkt; 524 mbox_chan_received_data(task->thread->chan, &data); 525 list_del(&task->list_entry); 526 kfree(task); 527 } 528 529 cmdq_thread_resume(thread); 530 cmdq_thread_disable(cmdq, thread); 531 532 out: 533 spin_unlock_irqrestore(&thread->chan->lock, flags); 534 pm_runtime_mark_last_busy(cmdq->mbox.dev); 535 pm_runtime_put_autosuspend(cmdq->mbox.dev); 536 537 return 0; 538 539 wait: 540 cmdq_thread_resume(thread); 541 spin_unlock_irqrestore(&thread->chan->lock, flags); 542 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, 543 enable, enable == 0, 1, timeout)) { 544 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", 545 (u32)(thread->base - cmdq->base)); 546 547 return -EFAULT; 548 } 549 pm_runtime_mark_last_busy(cmdq->mbox.dev); 550 pm_runtime_put_autosuspend(cmdq->mbox.dev); 551 return 0; 552 } 553 554 static const struct mbox_chan_ops cmdq_mbox_chan_ops = { 555 .send_data = cmdq_mbox_send_data, 556 .startup = cmdq_mbox_startup, 557 .shutdown = cmdq_mbox_shutdown, 558 .flush = cmdq_mbox_flush, 559 }; 560 561 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox, 562 const struct of_phandle_args *sp) 563 { 564 int ind = sp->args[0]; 565 struct cmdq_thread *thread; 566 567 if (ind >= mbox->num_chans) 568 return ERR_PTR(-EINVAL); 569 570 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; 571 thread->priority = sp->args[1]; 572 thread->chan = &mbox->chans[ind]; 573 574 return &mbox->chans[ind]; 575 } 576 577 static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq) 578 { 579 static const char * const gce_name = "gce"; 580 struct device_node *node, *parent = dev->of_node->parent; 581 struct clk_bulk_data *clks; 582 583 cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num, 584 sizeof(*cmdq->clocks), GFP_KERNEL); 585 if (!cmdq->clocks) 586 return -ENOMEM; 587 588 if (cmdq->pdata->gce_num == 1) { 589 clks = &cmdq->clocks[0]; 590 591 clks->id = gce_name; 592 clks->clk = devm_clk_get(dev, NULL); 593 if (IS_ERR(clks->clk)) 594 return dev_err_probe(dev, PTR_ERR(clks->clk), 595 "failed to get gce clock\n"); 596 597 return 0; 598 } 599 600 /* 601 * If there is more than one GCE, get the clocks for the others too, 602 * as the clock of the main GCE must be enabled for additional IPs 603 * to be reachable. 604 */ 605 for_each_child_of_node(parent, node) { 606 int alias_id = of_alias_get_id(node, gce_name); 607 608 if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num) 609 continue; 610 611 clks = &cmdq->clocks[alias_id]; 612 613 clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id); 614 if (!clks->id) { 615 of_node_put(node); 616 return -ENOMEM; 617 } 618 619 clks->clk = of_clk_get(node, 0); 620 if (IS_ERR(clks->clk)) { 621 of_node_put(node); 622 return dev_err_probe(dev, PTR_ERR(clks->clk), 623 "failed to get gce%d clock\n", alias_id); 624 } 625 } 626 627 return 0; 628 } 629 630 static int cmdq_probe(struct platform_device *pdev) 631 { 632 struct device *dev = &pdev->dev; 633 struct cmdq *cmdq; 634 int err, i; 635 636 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL); 637 if (!cmdq) 638 return -ENOMEM; 639 640 cmdq->base = devm_platform_ioremap_resource(pdev, 0); 641 if (IS_ERR(cmdq->base)) 642 return PTR_ERR(cmdq->base); 643 644 cmdq->irq = platform_get_irq(pdev, 0); 645 if (cmdq->irq < 0) 646 return cmdq->irq; 647 648 cmdq->pdata = device_get_match_data(dev); 649 if (!cmdq->pdata) { 650 dev_err(dev, "failed to get match data\n"); 651 return -EINVAL; 652 } 653 654 cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0); 655 656 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n", 657 dev, cmdq->base, cmdq->irq); 658 659 err = cmdq_get_clocks(dev, cmdq); 660 if (err) 661 return err; 662 663 dma_set_coherent_mask(dev, 664 DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift)); 665 666 cmdq->mbox.dev = dev; 667 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr, 668 sizeof(*cmdq->mbox.chans), GFP_KERNEL); 669 if (!cmdq->mbox.chans) 670 return -ENOMEM; 671 672 cmdq->mbox.num_chans = cmdq->pdata->thread_nr; 673 cmdq->mbox.ops = &cmdq_mbox_chan_ops; 674 cmdq->mbox.of_xlate = cmdq_xlate; 675 676 /* make use of TXDONE_BY_ACK */ 677 cmdq->mbox.txdone_irq = false; 678 cmdq->mbox.txdone_poll = false; 679 680 cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr, 681 sizeof(*cmdq->thread), GFP_KERNEL); 682 if (!cmdq->thread) 683 return -ENOMEM; 684 685 for (i = 0; i < cmdq->pdata->thread_nr; i++) { 686 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + 687 CMDQ_THR_SIZE * i; 688 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); 689 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; 690 } 691 692 platform_set_drvdata(pdev, cmdq); 693 694 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); 695 696 cmdq_init(cmdq); 697 698 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, 699 "mtk_cmdq", cmdq); 700 if (err < 0) { 701 dev_err(dev, "failed to register ISR (%d)\n", err); 702 return err; 703 } 704 705 /* If Runtime PM is not available enable the clocks now. */ 706 if (!IS_ENABLED(CONFIG_PM)) { 707 err = cmdq_runtime_resume(dev); 708 if (err) 709 return err; 710 } 711 712 err = devm_pm_runtime_enable(dev); 713 if (err) 714 return err; 715 716 pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS); 717 pm_runtime_use_autosuspend(dev); 718 719 err = devm_mbox_controller_register(dev, &cmdq->mbox); 720 if (err < 0) { 721 dev_err(dev, "failed to register mailbox: %d\n", err); 722 return err; 723 } 724 725 return 0; 726 } 727 728 static const struct dev_pm_ops cmdq_pm_ops = { 729 .suspend = cmdq_suspend, 730 .resume = cmdq_resume, 731 SET_RUNTIME_PM_OPS(cmdq_runtime_suspend, 732 cmdq_runtime_resume, NULL) 733 }; 734 735 static const struct gce_plat gce_plat_mt6779 = { 736 .thread_nr = 24, 737 .shift = 3, 738 .control_by_sw = false, 739 .gce_num = 1 740 }; 741 742 static const struct gce_plat gce_plat_mt8173 = { 743 .thread_nr = 16, 744 .shift = 0, 745 .control_by_sw = false, 746 .gce_num = 1 747 }; 748 749 static const struct gce_plat gce_plat_mt8183 = { 750 .thread_nr = 24, 751 .shift = 0, 752 .control_by_sw = false, 753 .gce_num = 1 754 }; 755 756 static const struct gce_plat gce_plat_mt8186 = { 757 .thread_nr = 24, 758 .shift = 3, 759 .control_by_sw = true, 760 .sw_ddr_en = true, 761 .gce_num = 1 762 }; 763 764 static const struct gce_plat gce_plat_mt8188 = { 765 .thread_nr = 32, 766 .shift = 3, 767 .control_by_sw = true, 768 .gce_num = 2 769 }; 770 771 static const struct gce_plat gce_plat_mt8192 = { 772 .thread_nr = 24, 773 .shift = 3, 774 .control_by_sw = true, 775 .gce_num = 1 776 }; 777 778 static const struct gce_plat gce_plat_mt8195 = { 779 .thread_nr = 24, 780 .shift = 3, 781 .control_by_sw = true, 782 .gce_num = 2 783 }; 784 785 static const struct of_device_id cmdq_of_ids[] = { 786 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779}, 787 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173}, 788 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183}, 789 {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186}, 790 {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188}, 791 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192}, 792 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195}, 793 {} 794 }; 795 MODULE_DEVICE_TABLE(of, cmdq_of_ids); 796 797 static struct platform_driver cmdq_drv = { 798 .probe = cmdq_probe, 799 .remove = cmdq_remove, 800 .driver = { 801 .name = "mtk_cmdq", 802 .pm = &cmdq_pm_ops, 803 .of_match_table = cmdq_of_ids, 804 } 805 }; 806 807 static int __init cmdq_drv_init(void) 808 { 809 return platform_driver_register(&cmdq_drv); 810 } 811 812 static void __exit cmdq_drv_exit(void) 813 { 814 platform_driver_unregister(&cmdq_drv); 815 } 816 817 subsys_initcall(cmdq_drv_init); 818 module_exit(cmdq_drv_exit); 819 820 MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver"); 821 MODULE_LICENSE("GPL v2"); 822