xref: /linux/drivers/mailbox/mtk-cmdq-mailbox.c (revision 9a95c5bfbf02a0a7f5983280fe284a0ff0836c34)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4 
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/mailbox_controller.h>
18 #include <linux/mailbox/mtk-cmdq-mailbox.h>
19 #include <linux/of.h>
20 
21 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS	100
22 
23 #define CMDQ_OP_CODE_MASK		(0xff << CMDQ_OP_CODE_SHIFT)
24 #define CMDQ_NUM_CMD(t)			(t->cmd_buf_size / CMDQ_INST_SIZE)
25 #define CMDQ_GCE_NUM_MAX		(2)
26 
27 #define CMDQ_CURR_IRQ_STATUS		0x10
28 #define CMDQ_SYNC_TOKEN_UPDATE		0x68
29 #define CMDQ_THR_SLOT_CYCLES		0x30
30 #define CMDQ_THR_BASE			0x100
31 #define CMDQ_THR_SIZE			0x80
32 #define CMDQ_THR_WARM_RESET		0x00
33 #define CMDQ_THR_ENABLE_TASK		0x04
34 #define CMDQ_THR_SUSPEND_TASK		0x08
35 #define CMDQ_THR_CURR_STATUS		0x0c
36 #define CMDQ_THR_IRQ_STATUS		0x10
37 #define CMDQ_THR_IRQ_ENABLE		0x14
38 #define CMDQ_THR_CURR_ADDR		0x20
39 #define CMDQ_THR_END_ADDR		0x24
40 #define CMDQ_THR_WAIT_TOKEN		0x30
41 #define CMDQ_THR_PRIORITY		0x40
42 
43 #define GCE_GCTL_VALUE			0x48
44 #define GCE_CTRL_BY_SW				GENMASK(2, 0)
45 #define GCE_DDR_EN				GENMASK(18, 16)
46 
47 #define CMDQ_THR_ACTIVE_SLOT_CYCLES	0x3200
48 #define CMDQ_THR_ENABLED		0x1
49 #define CMDQ_THR_DISABLED		0x0
50 #define CMDQ_THR_SUSPEND		0x1
51 #define CMDQ_THR_RESUME			0x0
52 #define CMDQ_THR_STATUS_SUSPENDED	BIT(1)
53 #define CMDQ_THR_DO_WARM_RESET		BIT(0)
54 #define CMDQ_THR_IRQ_DONE		0x1
55 #define CMDQ_THR_IRQ_ERROR		0x12
56 #define CMDQ_THR_IRQ_EN			(CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
57 #define CMDQ_THR_IS_WAITING		BIT(31)
58 
59 #define CMDQ_JUMP_BY_OFFSET		0x10000000
60 #define CMDQ_JUMP_BY_PA			0x10000001
61 
62 struct cmdq_thread {
63 	struct mbox_chan	*chan;
64 	void __iomem		*base;
65 	struct list_head	task_busy_list;
66 	u32			priority;
67 };
68 
69 struct cmdq_task {
70 	struct cmdq		*cmdq;
71 	struct list_head	list_entry;
72 	dma_addr_t		pa_base;
73 	struct cmdq_thread	*thread;
74 	struct cmdq_pkt		*pkt; /* the packet sent from mailbox client */
75 };
76 
77 struct cmdq {
78 	struct mbox_controller	mbox;
79 	void __iomem		*base;
80 	int			irq;
81 	u32			irq_mask;
82 	const struct gce_plat	*pdata;
83 	struct cmdq_thread	*thread;
84 	struct clk_bulk_data	clocks[CMDQ_GCE_NUM_MAX];
85 	bool			suspended;
86 };
87 
88 struct gce_plat {
89 	u32 thread_nr;
90 	u8 shift;
91 	bool control_by_sw;
92 	bool sw_ddr_en;
93 	u32 gce_num;
94 };
95 
96 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
97 {
98 	WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
99 
100 	if (enable)
101 		writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
102 	else
103 		writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
104 
105 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
106 }
107 
108 u8 cmdq_get_shift_pa(struct mbox_chan *chan)
109 {
110 	struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
111 
112 	return cmdq->pdata->shift;
113 }
114 EXPORT_SYMBOL(cmdq_get_shift_pa);
115 
116 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
117 {
118 	u32 status;
119 
120 	writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
121 
122 	/* If already disabled, treat as suspended successful. */
123 	if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
124 		return 0;
125 
126 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
127 			status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
128 		dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
129 			(u32)(thread->base - cmdq->base));
130 		return -EFAULT;
131 	}
132 
133 	return 0;
134 }
135 
136 static void cmdq_thread_resume(struct cmdq_thread *thread)
137 {
138 	writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
139 }
140 
141 static void cmdq_init(struct cmdq *cmdq)
142 {
143 	int i;
144 	u32 gctl_regval = 0;
145 
146 	WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
147 	if (cmdq->pdata->control_by_sw)
148 		gctl_regval = GCE_CTRL_BY_SW;
149 	if (cmdq->pdata->sw_ddr_en)
150 		gctl_regval |= GCE_DDR_EN;
151 
152 	if (gctl_regval)
153 		writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
154 
155 	writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
156 	for (i = 0; i <= CMDQ_MAX_EVENT; i++)
157 		writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
158 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
159 }
160 
161 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
162 {
163 	u32 warm_reset;
164 
165 	writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
166 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
167 			warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
168 			0, 10)) {
169 		dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
170 			(u32)(thread->base - cmdq->base));
171 		return -EFAULT;
172 	}
173 
174 	return 0;
175 }
176 
177 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
178 {
179 	cmdq_thread_reset(cmdq, thread);
180 	writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
181 }
182 
183 /* notify GCE to re-fetch commands by setting GCE thread PC */
184 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
185 {
186 	writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
187 	       thread->base + CMDQ_THR_CURR_ADDR);
188 }
189 
190 static void cmdq_task_insert_into_thread(struct cmdq_task *task)
191 {
192 	struct device *dev = task->cmdq->mbox.dev;
193 	struct cmdq_thread *thread = task->thread;
194 	struct cmdq_task *prev_task = list_last_entry(
195 			&thread->task_busy_list, typeof(*task), list_entry);
196 	u64 *prev_task_base = prev_task->pkt->va_base;
197 
198 	/* let previous task jump to this task */
199 	dma_sync_single_for_cpu(dev, prev_task->pa_base,
200 				prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
201 	prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
202 		(u64)CMDQ_JUMP_BY_PA << 32 |
203 		(task->pa_base >> task->cmdq->pdata->shift);
204 	dma_sync_single_for_device(dev, prev_task->pa_base,
205 				   prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
206 
207 	cmdq_thread_invalidate_fetched_data(thread);
208 }
209 
210 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
211 {
212 	return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
213 }
214 
215 static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
216 {
217 	struct cmdq_cb_data data;
218 
219 	data.sta = sta;
220 	data.pkt = task->pkt;
221 	mbox_chan_received_data(task->thread->chan, &data);
222 
223 	list_del(&task->list_entry);
224 }
225 
226 static void cmdq_task_handle_error(struct cmdq_task *task)
227 {
228 	struct cmdq_thread *thread = task->thread;
229 	struct cmdq_task *next_task;
230 	struct cmdq *cmdq = task->cmdq;
231 
232 	dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
233 	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
234 	next_task = list_first_entry_or_null(&thread->task_busy_list,
235 			struct cmdq_task, list_entry);
236 	if (next_task)
237 		writel(next_task->pa_base >> cmdq->pdata->shift,
238 		       thread->base + CMDQ_THR_CURR_ADDR);
239 	cmdq_thread_resume(thread);
240 }
241 
242 static void cmdq_thread_irq_handler(struct cmdq *cmdq,
243 				    struct cmdq_thread *thread)
244 {
245 	struct cmdq_task *task, *tmp, *curr_task = NULL;
246 	u32 curr_pa, irq_flag, task_end_pa;
247 	bool err;
248 
249 	irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
250 	writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
251 
252 	/*
253 	 * When ISR call this function, another CPU core could run
254 	 * "release task" right before we acquire the spin lock, and thus
255 	 * reset / disable this GCE thread, so we need to check the enable
256 	 * bit of this GCE thread.
257 	 */
258 	if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
259 		return;
260 
261 	if (irq_flag & CMDQ_THR_IRQ_ERROR)
262 		err = true;
263 	else if (irq_flag & CMDQ_THR_IRQ_DONE)
264 		err = false;
265 	else
266 		return;
267 
268 	curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
269 
270 	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
271 				 list_entry) {
272 		task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
273 		if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
274 			curr_task = task;
275 
276 		if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
277 			cmdq_task_exec_done(task, 0);
278 			kfree(task);
279 		} else if (err) {
280 			cmdq_task_exec_done(task, -ENOEXEC);
281 			cmdq_task_handle_error(curr_task);
282 			kfree(task);
283 		}
284 
285 		if (curr_task)
286 			break;
287 	}
288 
289 	if (list_empty(&thread->task_busy_list))
290 		cmdq_thread_disable(cmdq, thread);
291 }
292 
293 static irqreturn_t cmdq_irq_handler(int irq, void *dev)
294 {
295 	struct cmdq *cmdq = dev;
296 	unsigned long irq_status, flags = 0L;
297 	int bit;
298 
299 	irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
300 	if (!(irq_status ^ cmdq->irq_mask))
301 		return IRQ_NONE;
302 
303 	for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
304 		struct cmdq_thread *thread = &cmdq->thread[bit];
305 
306 		spin_lock_irqsave(&thread->chan->lock, flags);
307 		cmdq_thread_irq_handler(cmdq, thread);
308 		spin_unlock_irqrestore(&thread->chan->lock, flags);
309 	}
310 
311 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
312 
313 	return IRQ_HANDLED;
314 }
315 
316 static int cmdq_runtime_resume(struct device *dev)
317 {
318 	struct cmdq *cmdq = dev_get_drvdata(dev);
319 
320 	return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
321 }
322 
323 static int cmdq_runtime_suspend(struct device *dev)
324 {
325 	struct cmdq *cmdq = dev_get_drvdata(dev);
326 
327 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
328 	return 0;
329 }
330 
331 static int cmdq_suspend(struct device *dev)
332 {
333 	struct cmdq *cmdq = dev_get_drvdata(dev);
334 	struct cmdq_thread *thread;
335 	int i;
336 	bool task_running = false;
337 
338 	cmdq->suspended = true;
339 
340 	for (i = 0; i < cmdq->pdata->thread_nr; i++) {
341 		thread = &cmdq->thread[i];
342 		if (!list_empty(&thread->task_busy_list)) {
343 			task_running = true;
344 			break;
345 		}
346 	}
347 
348 	if (task_running)
349 		dev_warn(dev, "exist running task(s) in suspend\n");
350 
351 	if (cmdq->pdata->sw_ddr_en)
352 		cmdq_sw_ddr_enable(cmdq, false);
353 
354 	return pm_runtime_force_suspend(dev);
355 }
356 
357 static int cmdq_resume(struct device *dev)
358 {
359 	struct cmdq *cmdq = dev_get_drvdata(dev);
360 
361 	WARN_ON(pm_runtime_force_resume(dev));
362 	cmdq->suspended = false;
363 
364 	if (cmdq->pdata->sw_ddr_en)
365 		cmdq_sw_ddr_enable(cmdq, true);
366 
367 	return 0;
368 }
369 
370 static void cmdq_remove(struct platform_device *pdev)
371 {
372 	struct cmdq *cmdq = platform_get_drvdata(pdev);
373 
374 	if (cmdq->pdata->sw_ddr_en)
375 		cmdq_sw_ddr_enable(cmdq, false);
376 
377 	if (!IS_ENABLED(CONFIG_PM))
378 		cmdq_runtime_suspend(&pdev->dev);
379 
380 	clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
381 }
382 
383 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
384 {
385 	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
386 	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
387 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
388 	struct cmdq_task *task;
389 	unsigned long curr_pa, end_pa;
390 	int ret;
391 
392 	/* Client should not flush new tasks if suspended. */
393 	WARN_ON(cmdq->suspended);
394 
395 	ret = pm_runtime_get_sync(cmdq->mbox.dev);
396 	if (ret < 0)
397 		return ret;
398 
399 	task = kzalloc(sizeof(*task), GFP_ATOMIC);
400 	if (!task) {
401 		pm_runtime_put_autosuspend(cmdq->mbox.dev);
402 		return -ENOMEM;
403 	}
404 
405 	task->cmdq = cmdq;
406 	INIT_LIST_HEAD(&task->list_entry);
407 	task->pa_base = pkt->pa_base;
408 	task->thread = thread;
409 	task->pkt = pkt;
410 
411 	if (list_empty(&thread->task_busy_list)) {
412 		/*
413 		 * The thread reset will clear thread related register to 0,
414 		 * including pc, end, priority, irq, suspend and enable. Thus
415 		 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
416 		 * thread and make it running.
417 		 */
418 		WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
419 
420 		writel(task->pa_base >> cmdq->pdata->shift,
421 		       thread->base + CMDQ_THR_CURR_ADDR);
422 		writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
423 		       thread->base + CMDQ_THR_END_ADDR);
424 
425 		writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
426 		writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
427 		writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
428 	} else {
429 		WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
430 		curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
431 			cmdq->pdata->shift;
432 		end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
433 			cmdq->pdata->shift;
434 		/* check boundary */
435 		if (curr_pa == end_pa - CMDQ_INST_SIZE ||
436 		    curr_pa == end_pa) {
437 			/* set to this task directly */
438 			writel(task->pa_base >> cmdq->pdata->shift,
439 			       thread->base + CMDQ_THR_CURR_ADDR);
440 		} else {
441 			cmdq_task_insert_into_thread(task);
442 			smp_mb(); /* modify jump before enable thread */
443 		}
444 		writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
445 		       thread->base + CMDQ_THR_END_ADDR);
446 		cmdq_thread_resume(thread);
447 	}
448 	list_move_tail(&task->list_entry, &thread->task_busy_list);
449 
450 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
451 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
452 
453 	return 0;
454 }
455 
456 static int cmdq_mbox_startup(struct mbox_chan *chan)
457 {
458 	return 0;
459 }
460 
461 static void cmdq_mbox_shutdown(struct mbox_chan *chan)
462 {
463 	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
464 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
465 	struct cmdq_task *task, *tmp;
466 	unsigned long flags;
467 
468 	WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0);
469 
470 	spin_lock_irqsave(&thread->chan->lock, flags);
471 	if (list_empty(&thread->task_busy_list))
472 		goto done;
473 
474 	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
475 
476 	/* make sure executed tasks have success callback */
477 	cmdq_thread_irq_handler(cmdq, thread);
478 	if (list_empty(&thread->task_busy_list))
479 		goto done;
480 
481 	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
482 				 list_entry) {
483 		cmdq_task_exec_done(task, -ECONNABORTED);
484 		kfree(task);
485 	}
486 
487 	cmdq_thread_disable(cmdq, thread);
488 
489 done:
490 	/*
491 	 * The thread->task_busy_list empty means thread already disable. The
492 	 * cmdq_mbox_send_data() always reset thread which clear disable and
493 	 * suspend statue when first pkt send to channel, so there is no need
494 	 * to do any operation here, only unlock and leave.
495 	 */
496 	spin_unlock_irqrestore(&thread->chan->lock, flags);
497 
498 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
499 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
500 }
501 
502 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
503 {
504 	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
505 	struct cmdq_cb_data data;
506 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
507 	struct cmdq_task *task, *tmp;
508 	unsigned long flags;
509 	u32 enable;
510 	int ret;
511 
512 	ret = pm_runtime_get_sync(cmdq->mbox.dev);
513 	if (ret < 0)
514 		return ret;
515 
516 	spin_lock_irqsave(&thread->chan->lock, flags);
517 	if (list_empty(&thread->task_busy_list))
518 		goto out;
519 
520 	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
521 	if (!cmdq_thread_is_in_wfe(thread))
522 		goto wait;
523 
524 	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
525 				 list_entry) {
526 		data.sta = -ECONNABORTED;
527 		data.pkt = task->pkt;
528 		mbox_chan_received_data(task->thread->chan, &data);
529 		list_del(&task->list_entry);
530 		kfree(task);
531 	}
532 
533 	cmdq_thread_resume(thread);
534 	cmdq_thread_disable(cmdq, thread);
535 
536 out:
537 	spin_unlock_irqrestore(&thread->chan->lock, flags);
538 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
539 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
540 
541 	return 0;
542 
543 wait:
544 	cmdq_thread_resume(thread);
545 	spin_unlock_irqrestore(&thread->chan->lock, flags);
546 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
547 				      enable, enable == 0, 1, timeout)) {
548 		dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
549 			(u32)(thread->base - cmdq->base));
550 
551 		return -EFAULT;
552 	}
553 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
554 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
555 	return 0;
556 }
557 
558 static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
559 	.send_data = cmdq_mbox_send_data,
560 	.startup = cmdq_mbox_startup,
561 	.shutdown = cmdq_mbox_shutdown,
562 	.flush = cmdq_mbox_flush,
563 };
564 
565 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
566 		const struct of_phandle_args *sp)
567 {
568 	int ind = sp->args[0];
569 	struct cmdq_thread *thread;
570 
571 	if (ind >= mbox->num_chans)
572 		return ERR_PTR(-EINVAL);
573 
574 	thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
575 	thread->priority = sp->args[1];
576 	thread->chan = &mbox->chans[ind];
577 
578 	return &mbox->chans[ind];
579 }
580 
581 static int cmdq_probe(struct platform_device *pdev)
582 {
583 	struct device *dev = &pdev->dev;
584 	struct cmdq *cmdq;
585 	int err, i;
586 	struct device_node *phandle = dev->of_node;
587 	struct device_node *node;
588 	int alias_id = 0;
589 	static const char * const clk_name = "gce";
590 	static const char * const clk_names[] = { "gce0", "gce1" };
591 
592 	cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
593 	if (!cmdq)
594 		return -ENOMEM;
595 
596 	cmdq->base = devm_platform_ioremap_resource(pdev, 0);
597 	if (IS_ERR(cmdq->base))
598 		return PTR_ERR(cmdq->base);
599 
600 	cmdq->irq = platform_get_irq(pdev, 0);
601 	if (cmdq->irq < 0)
602 		return cmdq->irq;
603 
604 	cmdq->pdata = device_get_match_data(dev);
605 	if (!cmdq->pdata) {
606 		dev_err(dev, "failed to get match data\n");
607 		return -EINVAL;
608 	}
609 
610 	cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
611 
612 	dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
613 		dev, cmdq->base, cmdq->irq);
614 
615 	if (cmdq->pdata->gce_num > 1) {
616 		for_each_child_of_node(phandle->parent, node) {
617 			alias_id = of_alias_get_id(node, clk_name);
618 			if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
619 				cmdq->clocks[alias_id].id = clk_names[alias_id];
620 				cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
621 				if (IS_ERR(cmdq->clocks[alias_id].clk)) {
622 					of_node_put(node);
623 					return dev_err_probe(dev,
624 							     PTR_ERR(cmdq->clocks[alias_id].clk),
625 							     "failed to get gce clk: %d\n",
626 							     alias_id);
627 				}
628 			}
629 		}
630 	} else {
631 		cmdq->clocks[alias_id].id = clk_name;
632 		cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
633 		if (IS_ERR(cmdq->clocks[alias_id].clk)) {
634 			return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
635 					     "failed to get gce clk\n");
636 		}
637 	}
638 
639 	cmdq->mbox.dev = dev;
640 	cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
641 					sizeof(*cmdq->mbox.chans), GFP_KERNEL);
642 	if (!cmdq->mbox.chans)
643 		return -ENOMEM;
644 
645 	cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
646 	cmdq->mbox.ops = &cmdq_mbox_chan_ops;
647 	cmdq->mbox.of_xlate = cmdq_xlate;
648 
649 	/* make use of TXDONE_BY_ACK */
650 	cmdq->mbox.txdone_irq = false;
651 	cmdq->mbox.txdone_poll = false;
652 
653 	cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
654 					sizeof(*cmdq->thread), GFP_KERNEL);
655 	if (!cmdq->thread)
656 		return -ENOMEM;
657 
658 	for (i = 0; i < cmdq->pdata->thread_nr; i++) {
659 		cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
660 				CMDQ_THR_SIZE * i;
661 		INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
662 		cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
663 	}
664 
665 	err = devm_mbox_controller_register(dev, &cmdq->mbox);
666 	if (err < 0) {
667 		dev_err(dev, "failed to register mailbox: %d\n", err);
668 		return err;
669 	}
670 
671 	platform_set_drvdata(pdev, cmdq);
672 
673 	WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
674 
675 	cmdq_init(cmdq);
676 
677 	err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
678 			       "mtk_cmdq", cmdq);
679 	if (err < 0) {
680 		dev_err(dev, "failed to register ISR (%d)\n", err);
681 		return err;
682 	}
683 
684 	/* If Runtime PM is not available enable the clocks now. */
685 	if (!IS_ENABLED(CONFIG_PM)) {
686 		err = cmdq_runtime_resume(dev);
687 		if (err)
688 			return err;
689 	}
690 
691 	err = devm_pm_runtime_enable(dev);
692 	if (err)
693 		return err;
694 
695 	pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
696 	pm_runtime_use_autosuspend(dev);
697 
698 	return 0;
699 }
700 
701 static const struct dev_pm_ops cmdq_pm_ops = {
702 	.suspend = cmdq_suspend,
703 	.resume = cmdq_resume,
704 	SET_RUNTIME_PM_OPS(cmdq_runtime_suspend,
705 			   cmdq_runtime_resume, NULL)
706 };
707 
708 static const struct gce_plat gce_plat_mt6779 = {
709 	.thread_nr = 24,
710 	.shift = 3,
711 	.control_by_sw = false,
712 	.gce_num = 1
713 };
714 
715 static const struct gce_plat gce_plat_mt8173 = {
716 	.thread_nr = 16,
717 	.shift = 0,
718 	.control_by_sw = false,
719 	.gce_num = 1
720 };
721 
722 static const struct gce_plat gce_plat_mt8183 = {
723 	.thread_nr = 24,
724 	.shift = 0,
725 	.control_by_sw = false,
726 	.gce_num = 1
727 };
728 
729 static const struct gce_plat gce_plat_mt8186 = {
730 	.thread_nr = 24,
731 	.shift = 3,
732 	.control_by_sw = true,
733 	.sw_ddr_en = true,
734 	.gce_num = 1
735 };
736 
737 static const struct gce_plat gce_plat_mt8188 = {
738 	.thread_nr = 32,
739 	.shift = 3,
740 	.control_by_sw = true,
741 	.gce_num = 2
742 };
743 
744 static const struct gce_plat gce_plat_mt8192 = {
745 	.thread_nr = 24,
746 	.shift = 3,
747 	.control_by_sw = true,
748 	.gce_num = 1
749 };
750 
751 static const struct gce_plat gce_plat_mt8195 = {
752 	.thread_nr = 24,
753 	.shift = 3,
754 	.control_by_sw = true,
755 	.gce_num = 2
756 };
757 
758 static const struct of_device_id cmdq_of_ids[] = {
759 	{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779},
760 	{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173},
761 	{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183},
762 	{.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186},
763 	{.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
764 	{.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
765 	{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
766 	{}
767 };
768 MODULE_DEVICE_TABLE(of, cmdq_of_ids);
769 
770 static struct platform_driver cmdq_drv = {
771 	.probe = cmdq_probe,
772 	.remove_new = cmdq_remove,
773 	.driver = {
774 		.name = "mtk_cmdq",
775 		.pm = &cmdq_pm_ops,
776 		.of_match_table = cmdq_of_ids,
777 	}
778 };
779 
780 static int __init cmdq_drv_init(void)
781 {
782 	return platform_driver_register(&cmdq_drv);
783 }
784 
785 static void __exit cmdq_drv_exit(void)
786 {
787 	platform_driver_unregister(&cmdq_drv);
788 }
789 
790 subsys_initcall(cmdq_drv_init);
791 module_exit(cmdq_drv_exit);
792 
793 MODULE_LICENSE("GPL v2");
794