xref: /linux/drivers/mailbox/mtk-cmdq-mailbox.c (revision 42422993cf28d456778ee9168d73758ec037cd51)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4 
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/mailbox_controller.h>
18 #include <linux/mailbox/mtk-cmdq-mailbox.h>
19 #include <linux/of.h>
20 
21 #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS	100
22 
23 #define CMDQ_OP_CODE_MASK		(0xff << CMDQ_OP_CODE_SHIFT)
24 #define CMDQ_NUM_CMD(t)			(t->cmd_buf_size / CMDQ_INST_SIZE)
25 #define CMDQ_GCE_NUM_MAX		(2)
26 
27 #define CMDQ_CURR_IRQ_STATUS		0x10
28 #define CMDQ_SYNC_TOKEN_UPDATE		0x68
29 #define CMDQ_THR_SLOT_CYCLES		0x30
30 #define CMDQ_THR_BASE			0x100
31 #define CMDQ_THR_SIZE			0x80
32 #define CMDQ_THR_WARM_RESET		0x00
33 #define CMDQ_THR_ENABLE_TASK		0x04
34 #define CMDQ_THR_SUSPEND_TASK		0x08
35 #define CMDQ_THR_CURR_STATUS		0x0c
36 #define CMDQ_THR_IRQ_STATUS		0x10
37 #define CMDQ_THR_IRQ_ENABLE		0x14
38 #define CMDQ_THR_CURR_ADDR		0x20
39 #define CMDQ_THR_END_ADDR		0x24
40 #define CMDQ_THR_WAIT_TOKEN		0x30
41 #define CMDQ_THR_PRIORITY		0x40
42 
43 #define GCE_GCTL_VALUE			0x48
44 #define GCE_CTRL_BY_SW				GENMASK(2, 0)
45 #define GCE_DDR_EN				GENMASK(18, 16)
46 
47 #define CMDQ_THR_ACTIVE_SLOT_CYCLES	0x3200
48 #define CMDQ_THR_ENABLED		0x1
49 #define CMDQ_THR_DISABLED		0x0
50 #define CMDQ_THR_SUSPEND		0x1
51 #define CMDQ_THR_RESUME			0x0
52 #define CMDQ_THR_STATUS_SUSPENDED	BIT(1)
53 #define CMDQ_THR_DO_WARM_RESET		BIT(0)
54 #define CMDQ_THR_IRQ_DONE		0x1
55 #define CMDQ_THR_IRQ_ERROR		0x12
56 #define CMDQ_THR_IRQ_EN			(CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
57 #define CMDQ_THR_IS_WAITING		BIT(31)
58 
59 #define CMDQ_JUMP_BY_OFFSET		0x10000000
60 #define CMDQ_JUMP_BY_PA			0x10000001
61 
62 struct cmdq_thread {
63 	struct mbox_chan	*chan;
64 	void __iomem		*base;
65 	struct list_head	task_busy_list;
66 	u32			priority;
67 };
68 
69 struct cmdq_task {
70 	struct cmdq		*cmdq;
71 	struct list_head	list_entry;
72 	dma_addr_t		pa_base;
73 	struct cmdq_thread	*thread;
74 	struct cmdq_pkt		*pkt; /* the packet sent from mailbox client */
75 };
76 
77 struct cmdq {
78 	struct mbox_controller	mbox;
79 	void __iomem		*base;
80 	int			irq;
81 	u32			irq_mask;
82 	const struct gce_plat	*pdata;
83 	struct cmdq_thread	*thread;
84 	struct clk_bulk_data	clocks[CMDQ_GCE_NUM_MAX];
85 	bool			suspended;
86 };
87 
88 struct gce_plat {
89 	u32 thread_nr;
90 	u8 shift;
91 	bool control_by_sw;
92 	bool sw_ddr_en;
93 	u32 gce_num;
94 };
95 
96 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
97 {
98 	WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
99 
100 	if (enable)
101 		writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
102 	else
103 		writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
104 
105 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
106 }
107 
108 u8 cmdq_get_shift_pa(struct mbox_chan *chan)
109 {
110 	struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
111 
112 	return cmdq->pdata->shift;
113 }
114 EXPORT_SYMBOL(cmdq_get_shift_pa);
115 
116 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
117 {
118 	u32 status;
119 
120 	writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
121 
122 	/* If already disabled, treat as suspended successful. */
123 	if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
124 		return 0;
125 
126 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
127 			status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
128 		dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
129 			(u32)(thread->base - cmdq->base));
130 		return -EFAULT;
131 	}
132 
133 	return 0;
134 }
135 
136 static void cmdq_thread_resume(struct cmdq_thread *thread)
137 {
138 	writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
139 }
140 
141 static void cmdq_init(struct cmdq *cmdq)
142 {
143 	int i;
144 	u32 gctl_regval = 0;
145 
146 	WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
147 	if (cmdq->pdata->control_by_sw)
148 		gctl_regval = GCE_CTRL_BY_SW;
149 	if (cmdq->pdata->sw_ddr_en)
150 		gctl_regval |= GCE_DDR_EN;
151 
152 	if (gctl_regval)
153 		writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
154 
155 	writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
156 	for (i = 0; i <= CMDQ_MAX_EVENT; i++)
157 		writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
158 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
159 }
160 
161 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
162 {
163 	u32 warm_reset;
164 
165 	writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
166 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
167 			warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
168 			0, 10)) {
169 		dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
170 			(u32)(thread->base - cmdq->base));
171 		return -EFAULT;
172 	}
173 
174 	return 0;
175 }
176 
177 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
178 {
179 	cmdq_thread_reset(cmdq, thread);
180 	writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
181 }
182 
183 /* notify GCE to re-fetch commands by setting GCE thread PC */
184 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
185 {
186 	writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
187 	       thread->base + CMDQ_THR_CURR_ADDR);
188 }
189 
190 static void cmdq_task_insert_into_thread(struct cmdq_task *task)
191 {
192 	struct device *dev = task->cmdq->mbox.dev;
193 	struct cmdq_thread *thread = task->thread;
194 	struct cmdq_task *prev_task = list_last_entry(
195 			&thread->task_busy_list, typeof(*task), list_entry);
196 	u64 *prev_task_base = prev_task->pkt->va_base;
197 
198 	/* let previous task jump to this task */
199 	dma_sync_single_for_cpu(dev, prev_task->pa_base,
200 				prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
201 	prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
202 		(u64)CMDQ_JUMP_BY_PA << 32 |
203 		(task->pa_base >> task->cmdq->pdata->shift);
204 	dma_sync_single_for_device(dev, prev_task->pa_base,
205 				   prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
206 
207 	cmdq_thread_invalidate_fetched_data(thread);
208 }
209 
210 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
211 {
212 	return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
213 }
214 
215 static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
216 {
217 	struct cmdq_cb_data data;
218 
219 	data.sta = sta;
220 	data.pkt = task->pkt;
221 	mbox_chan_received_data(task->thread->chan, &data);
222 
223 	list_del(&task->list_entry);
224 }
225 
226 static void cmdq_task_handle_error(struct cmdq_task *task)
227 {
228 	struct cmdq_thread *thread = task->thread;
229 	struct cmdq_task *next_task;
230 	struct cmdq *cmdq = task->cmdq;
231 
232 	dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
233 	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
234 	next_task = list_first_entry_or_null(&thread->task_busy_list,
235 			struct cmdq_task, list_entry);
236 	if (next_task)
237 		writel(next_task->pa_base >> cmdq->pdata->shift,
238 		       thread->base + CMDQ_THR_CURR_ADDR);
239 	cmdq_thread_resume(thread);
240 }
241 
242 static void cmdq_thread_irq_handler(struct cmdq *cmdq,
243 				    struct cmdq_thread *thread)
244 {
245 	struct cmdq_task *task, *tmp, *curr_task = NULL;
246 	u32 curr_pa, irq_flag, task_end_pa;
247 	bool err;
248 
249 	irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
250 	writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
251 
252 	/*
253 	 * When ISR call this function, another CPU core could run
254 	 * "release task" right before we acquire the spin lock, and thus
255 	 * reset / disable this GCE thread, so we need to check the enable
256 	 * bit of this GCE thread.
257 	 */
258 	if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
259 		return;
260 
261 	if (irq_flag & CMDQ_THR_IRQ_ERROR)
262 		err = true;
263 	else if (irq_flag & CMDQ_THR_IRQ_DONE)
264 		err = false;
265 	else
266 		return;
267 
268 	curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
269 
270 	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
271 				 list_entry) {
272 		task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
273 		if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
274 			curr_task = task;
275 
276 		if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
277 			cmdq_task_exec_done(task, 0);
278 			kfree(task);
279 		} else if (err) {
280 			cmdq_task_exec_done(task, -ENOEXEC);
281 			cmdq_task_handle_error(curr_task);
282 			kfree(task);
283 		}
284 
285 		if (curr_task)
286 			break;
287 	}
288 
289 	if (list_empty(&thread->task_busy_list))
290 		cmdq_thread_disable(cmdq, thread);
291 }
292 
293 static irqreturn_t cmdq_irq_handler(int irq, void *dev)
294 {
295 	struct cmdq *cmdq = dev;
296 	unsigned long irq_status, flags = 0L;
297 	int bit;
298 
299 	irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
300 	if (!(irq_status ^ cmdq->irq_mask))
301 		return IRQ_NONE;
302 
303 	for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
304 		struct cmdq_thread *thread = &cmdq->thread[bit];
305 
306 		spin_lock_irqsave(&thread->chan->lock, flags);
307 		cmdq_thread_irq_handler(cmdq, thread);
308 		spin_unlock_irqrestore(&thread->chan->lock, flags);
309 	}
310 
311 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
312 
313 	return IRQ_HANDLED;
314 }
315 
316 static int cmdq_runtime_resume(struct device *dev)
317 {
318 	struct cmdq *cmdq = dev_get_drvdata(dev);
319 
320 	return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
321 }
322 
323 static int cmdq_runtime_suspend(struct device *dev)
324 {
325 	struct cmdq *cmdq = dev_get_drvdata(dev);
326 
327 	clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
328 	return 0;
329 }
330 
331 static int cmdq_suspend(struct device *dev)
332 {
333 	struct cmdq *cmdq = dev_get_drvdata(dev);
334 	struct cmdq_thread *thread;
335 	int i;
336 	bool task_running = false;
337 
338 	cmdq->suspended = true;
339 
340 	for (i = 0; i < cmdq->pdata->thread_nr; i++) {
341 		thread = &cmdq->thread[i];
342 		if (!list_empty(&thread->task_busy_list)) {
343 			task_running = true;
344 			break;
345 		}
346 	}
347 
348 	if (task_running)
349 		dev_warn(dev, "exist running task(s) in suspend\n");
350 
351 	if (cmdq->pdata->sw_ddr_en)
352 		cmdq_sw_ddr_enable(cmdq, false);
353 
354 	return pm_runtime_force_suspend(dev);
355 }
356 
357 static int cmdq_resume(struct device *dev)
358 {
359 	struct cmdq *cmdq = dev_get_drvdata(dev);
360 
361 	WARN_ON(pm_runtime_force_resume(dev));
362 	cmdq->suspended = false;
363 
364 	if (cmdq->pdata->sw_ddr_en)
365 		cmdq_sw_ddr_enable(cmdq, true);
366 
367 	return 0;
368 }
369 
370 static int cmdq_remove(struct platform_device *pdev)
371 {
372 	struct cmdq *cmdq = platform_get_drvdata(pdev);
373 
374 	if (cmdq->pdata->sw_ddr_en)
375 		cmdq_sw_ddr_enable(cmdq, false);
376 
377 	if (!IS_ENABLED(CONFIG_PM))
378 		cmdq_runtime_suspend(&pdev->dev);
379 
380 	clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
381 	return 0;
382 }
383 
384 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
385 {
386 	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
387 	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
388 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
389 	struct cmdq_task *task;
390 	unsigned long curr_pa, end_pa;
391 	int ret;
392 
393 	/* Client should not flush new tasks if suspended. */
394 	WARN_ON(cmdq->suspended);
395 
396 	ret = pm_runtime_get_sync(cmdq->mbox.dev);
397 	if (ret < 0)
398 		return ret;
399 
400 	task = kzalloc(sizeof(*task), GFP_ATOMIC);
401 	if (!task) {
402 		pm_runtime_put_autosuspend(cmdq->mbox.dev);
403 		return -ENOMEM;
404 	}
405 
406 	task->cmdq = cmdq;
407 	INIT_LIST_HEAD(&task->list_entry);
408 	task->pa_base = pkt->pa_base;
409 	task->thread = thread;
410 	task->pkt = pkt;
411 
412 	if (list_empty(&thread->task_busy_list)) {
413 		/*
414 		 * The thread reset will clear thread related register to 0,
415 		 * including pc, end, priority, irq, suspend and enable. Thus
416 		 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
417 		 * thread and make it running.
418 		 */
419 		WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
420 
421 		writel(task->pa_base >> cmdq->pdata->shift,
422 		       thread->base + CMDQ_THR_CURR_ADDR);
423 		writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
424 		       thread->base + CMDQ_THR_END_ADDR);
425 
426 		writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
427 		writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
428 		writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
429 	} else {
430 		WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
431 		curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
432 			cmdq->pdata->shift;
433 		end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
434 			cmdq->pdata->shift;
435 		/* check boundary */
436 		if (curr_pa == end_pa - CMDQ_INST_SIZE ||
437 		    curr_pa == end_pa) {
438 			/* set to this task directly */
439 			writel(task->pa_base >> cmdq->pdata->shift,
440 			       thread->base + CMDQ_THR_CURR_ADDR);
441 		} else {
442 			cmdq_task_insert_into_thread(task);
443 			smp_mb(); /* modify jump before enable thread */
444 		}
445 		writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
446 		       thread->base + CMDQ_THR_END_ADDR);
447 		cmdq_thread_resume(thread);
448 	}
449 	list_move_tail(&task->list_entry, &thread->task_busy_list);
450 
451 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
452 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
453 
454 	return 0;
455 }
456 
457 static int cmdq_mbox_startup(struct mbox_chan *chan)
458 {
459 	return 0;
460 }
461 
462 static void cmdq_mbox_shutdown(struct mbox_chan *chan)
463 {
464 	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
465 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
466 	struct cmdq_task *task, *tmp;
467 	unsigned long flags;
468 
469 	WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev));
470 
471 	spin_lock_irqsave(&thread->chan->lock, flags);
472 	if (list_empty(&thread->task_busy_list))
473 		goto done;
474 
475 	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
476 
477 	/* make sure executed tasks have success callback */
478 	cmdq_thread_irq_handler(cmdq, thread);
479 	if (list_empty(&thread->task_busy_list))
480 		goto done;
481 
482 	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
483 				 list_entry) {
484 		cmdq_task_exec_done(task, -ECONNABORTED);
485 		kfree(task);
486 	}
487 
488 	cmdq_thread_disable(cmdq, thread);
489 
490 done:
491 	/*
492 	 * The thread->task_busy_list empty means thread already disable. The
493 	 * cmdq_mbox_send_data() always reset thread which clear disable and
494 	 * suspend statue when first pkt send to channel, so there is no need
495 	 * to do any operation here, only unlock and leave.
496 	 */
497 	spin_unlock_irqrestore(&thread->chan->lock, flags);
498 
499 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
500 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
501 }
502 
503 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
504 {
505 	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
506 	struct cmdq_cb_data data;
507 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
508 	struct cmdq_task *task, *tmp;
509 	unsigned long flags;
510 	u32 enable;
511 	int ret;
512 
513 	ret = pm_runtime_get_sync(cmdq->mbox.dev);
514 	if (ret < 0)
515 		return ret;
516 
517 	spin_lock_irqsave(&thread->chan->lock, flags);
518 	if (list_empty(&thread->task_busy_list))
519 		goto out;
520 
521 	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
522 	if (!cmdq_thread_is_in_wfe(thread))
523 		goto wait;
524 
525 	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
526 				 list_entry) {
527 		data.sta = -ECONNABORTED;
528 		data.pkt = task->pkt;
529 		mbox_chan_received_data(task->thread->chan, &data);
530 		list_del(&task->list_entry);
531 		kfree(task);
532 	}
533 
534 	cmdq_thread_resume(thread);
535 	cmdq_thread_disable(cmdq, thread);
536 
537 out:
538 	spin_unlock_irqrestore(&thread->chan->lock, flags);
539 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
540 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
541 
542 	return 0;
543 
544 wait:
545 	cmdq_thread_resume(thread);
546 	spin_unlock_irqrestore(&thread->chan->lock, flags);
547 	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
548 				      enable, enable == 0, 1, timeout)) {
549 		dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
550 			(u32)(thread->base - cmdq->base));
551 
552 		return -EFAULT;
553 	}
554 	pm_runtime_mark_last_busy(cmdq->mbox.dev);
555 	pm_runtime_put_autosuspend(cmdq->mbox.dev);
556 	return 0;
557 }
558 
559 static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
560 	.send_data = cmdq_mbox_send_data,
561 	.startup = cmdq_mbox_startup,
562 	.shutdown = cmdq_mbox_shutdown,
563 	.flush = cmdq_mbox_flush,
564 };
565 
566 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
567 		const struct of_phandle_args *sp)
568 {
569 	int ind = sp->args[0];
570 	struct cmdq_thread *thread;
571 
572 	if (ind >= mbox->num_chans)
573 		return ERR_PTR(-EINVAL);
574 
575 	thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
576 	thread->priority = sp->args[1];
577 	thread->chan = &mbox->chans[ind];
578 
579 	return &mbox->chans[ind];
580 }
581 
582 static int cmdq_probe(struct platform_device *pdev)
583 {
584 	struct device *dev = &pdev->dev;
585 	struct cmdq *cmdq;
586 	int err, i;
587 	struct device_node *phandle = dev->of_node;
588 	struct device_node *node;
589 	int alias_id = 0;
590 	static const char * const clk_name = "gce";
591 	static const char * const clk_names[] = { "gce0", "gce1" };
592 
593 	cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
594 	if (!cmdq)
595 		return -ENOMEM;
596 
597 	cmdq->base = devm_platform_ioremap_resource(pdev, 0);
598 	if (IS_ERR(cmdq->base))
599 		return PTR_ERR(cmdq->base);
600 
601 	cmdq->irq = platform_get_irq(pdev, 0);
602 	if (cmdq->irq < 0)
603 		return cmdq->irq;
604 
605 	cmdq->pdata = device_get_match_data(dev);
606 	if (!cmdq->pdata) {
607 		dev_err(dev, "failed to get match data\n");
608 		return -EINVAL;
609 	}
610 
611 	cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
612 
613 	dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
614 		dev, cmdq->base, cmdq->irq);
615 
616 	if (cmdq->pdata->gce_num > 1) {
617 		for_each_child_of_node(phandle->parent, node) {
618 			alias_id = of_alias_get_id(node, clk_name);
619 			if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
620 				cmdq->clocks[alias_id].id = clk_names[alias_id];
621 				cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
622 				if (IS_ERR(cmdq->clocks[alias_id].clk)) {
623 					of_node_put(node);
624 					return dev_err_probe(dev,
625 							     PTR_ERR(cmdq->clocks[alias_id].clk),
626 							     "failed to get gce clk: %d\n",
627 							     alias_id);
628 				}
629 			}
630 		}
631 	} else {
632 		cmdq->clocks[alias_id].id = clk_name;
633 		cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
634 		if (IS_ERR(cmdq->clocks[alias_id].clk)) {
635 			return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
636 					     "failed to get gce clk\n");
637 		}
638 	}
639 
640 	cmdq->mbox.dev = dev;
641 	cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
642 					sizeof(*cmdq->mbox.chans), GFP_KERNEL);
643 	if (!cmdq->mbox.chans)
644 		return -ENOMEM;
645 
646 	cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
647 	cmdq->mbox.ops = &cmdq_mbox_chan_ops;
648 	cmdq->mbox.of_xlate = cmdq_xlate;
649 
650 	/* make use of TXDONE_BY_ACK */
651 	cmdq->mbox.txdone_irq = false;
652 	cmdq->mbox.txdone_poll = false;
653 
654 	cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
655 					sizeof(*cmdq->thread), GFP_KERNEL);
656 	if (!cmdq->thread)
657 		return -ENOMEM;
658 
659 	for (i = 0; i < cmdq->pdata->thread_nr; i++) {
660 		cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
661 				CMDQ_THR_SIZE * i;
662 		INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
663 		cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
664 	}
665 
666 	err = devm_mbox_controller_register(dev, &cmdq->mbox);
667 	if (err < 0) {
668 		dev_err(dev, "failed to register mailbox: %d\n", err);
669 		return err;
670 	}
671 
672 	platform_set_drvdata(pdev, cmdq);
673 
674 	WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
675 
676 	cmdq_init(cmdq);
677 
678 	err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
679 			       "mtk_cmdq", cmdq);
680 	if (err < 0) {
681 		dev_err(dev, "failed to register ISR (%d)\n", err);
682 		return err;
683 	}
684 
685 	/* If Runtime PM is not available enable the clocks now. */
686 	if (!IS_ENABLED(CONFIG_PM)) {
687 		err = cmdq_runtime_resume(dev);
688 		if (err)
689 			return err;
690 	}
691 
692 	err = devm_pm_runtime_enable(dev);
693 	if (err)
694 		return err;
695 
696 	pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
697 	pm_runtime_use_autosuspend(dev);
698 
699 	return 0;
700 }
701 
702 static const struct dev_pm_ops cmdq_pm_ops = {
703 	.suspend = cmdq_suspend,
704 	.resume = cmdq_resume,
705 	SET_RUNTIME_PM_OPS(cmdq_runtime_suspend,
706 			   cmdq_runtime_resume, NULL)
707 };
708 
709 static const struct gce_plat gce_plat_v2 = {
710 	.thread_nr = 16,
711 	.shift = 0,
712 	.control_by_sw = false,
713 	.gce_num = 1
714 };
715 
716 static const struct gce_plat gce_plat_v3 = {
717 	.thread_nr = 24,
718 	.shift = 0,
719 	.control_by_sw = false,
720 	.gce_num = 1
721 };
722 
723 static const struct gce_plat gce_plat_v4 = {
724 	.thread_nr = 24,
725 	.shift = 3,
726 	.control_by_sw = false,
727 	.gce_num = 1
728 };
729 
730 static const struct gce_plat gce_plat_v5 = {
731 	.thread_nr = 24,
732 	.shift = 3,
733 	.control_by_sw = true,
734 	.gce_num = 1
735 };
736 
737 static const struct gce_plat gce_plat_v6 = {
738 	.thread_nr = 24,
739 	.shift = 3,
740 	.control_by_sw = true,
741 	.gce_num = 2
742 };
743 
744 static const struct gce_plat gce_plat_v7 = {
745 	.thread_nr = 24,
746 	.shift = 3,
747 	.control_by_sw = true,
748 	.sw_ddr_en = true,
749 	.gce_num = 1
750 };
751 
752 static const struct of_device_id cmdq_of_ids[] = {
753 	{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
754 	{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
755 	{.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
756 	{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
757 	{.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
758 	{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
759 	{}
760 };
761 
762 static struct platform_driver cmdq_drv = {
763 	.probe = cmdq_probe,
764 	.remove = cmdq_remove,
765 	.driver = {
766 		.name = "mtk_cmdq",
767 		.pm = &cmdq_pm_ops,
768 		.of_match_table = cmdq_of_ids,
769 	}
770 };
771 
772 static int __init cmdq_drv_init(void)
773 {
774 	return platform_driver_register(&cmdq_drv);
775 }
776 
777 static void __exit cmdq_drv_exit(void)
778 {
779 	platform_driver_unregister(&cmdq_drv);
780 }
781 
782 subsys_initcall(cmdq_drv_init);
783 module_exit(cmdq_drv_exit);
784 
785 MODULE_LICENSE("GPL v2");
786