xref: /linux/drivers/mailbox/mailbox-th1520.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Alibaba Group Holding Limited.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/interrupt.h>
8 #include <linux/io.h>
9 #include <linux/kernel.h>
10 #include <linux/mailbox_controller.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 
16 /* Status Register */
17 #define TH_1520_MBOX_STA 0x0
18 #define TH_1520_MBOX_CLR 0x4
19 #define TH_1520_MBOX_MASK 0xc
20 
21 /* Transmit/receive data register:
22  * INFO0 ~ INFO6
23  */
24 #define TH_1520_MBOX_INFO_NUM 8
25 #define TH_1520_MBOX_DATA_INFO_NUM 7
26 #define TH_1520_MBOX_INFO0 0x14
27 /* Transmit ack register: INFO7 */
28 #define TH_1520_MBOX_INFO7 0x30
29 
30 /* Generate remote icu IRQ Register */
31 #define TH_1520_MBOX_GEN 0x10
32 #define TH_1520_MBOX_GEN_RX_DATA BIT(6)
33 #define TH_1520_MBOX_GEN_TX_ACK BIT(7)
34 
35 #define TH_1520_MBOX_CHAN_RES_SIZE 0x1000
36 #define TH_1520_MBOX_CHANS 4
37 #define TH_1520_MBOX_CHAN_NAME_SIZE 20
38 
39 #define TH_1520_MBOX_ACK_MAGIC 0xdeadbeaf
40 
41 #ifdef CONFIG_PM_SLEEP
42 /* store MBOX context across system-wide suspend/resume transitions */
43 struct th1520_mbox_context {
44 	u32 intr_mask[TH_1520_MBOX_CHANS - 1];
45 };
46 #endif
47 
48 enum th1520_mbox_icu_cpu_id {
49 	TH_1520_MBOX_ICU_KERNEL_CPU0, /* 910T */
50 	TH_1520_MBOX_ICU_CPU1, /* 902 */
51 	TH_1520_MBOX_ICU_CPU2, /* 906 */
52 	TH_1520_MBOX_ICU_CPU3, /* 910R */
53 };
54 
55 struct th1520_mbox_con_priv {
56 	enum th1520_mbox_icu_cpu_id idx;
57 	void __iomem *comm_local_base;
58 	void __iomem *comm_remote_base;
59 	char irq_desc[TH_1520_MBOX_CHAN_NAME_SIZE];
60 	struct mbox_chan *chan;
61 };
62 
63 struct th1520_mbox_priv {
64 	struct device *dev;
65 	void __iomem *local_icu[TH_1520_MBOX_CHANS];
66 	void __iomem *remote_icu[TH_1520_MBOX_CHANS - 1];
67 	void __iomem *cur_cpu_ch_base;
68 	spinlock_t mbox_lock; /* control register lock */
69 
70 	struct mbox_controller mbox;
71 	struct mbox_chan mbox_chans[TH_1520_MBOX_CHANS];
72 	struct clk_bulk_data clocks[TH_1520_MBOX_CHANS];
73 	struct th1520_mbox_con_priv con_priv[TH_1520_MBOX_CHANS];
74 	int irq;
75 #ifdef CONFIG_PM_SLEEP
76 	struct th1520_mbox_context *ctx;
77 #endif
78 };
79 
80 static struct th1520_mbox_priv *
81 to_th1520_mbox_priv(struct mbox_controller *mbox)
82 {
83 	return container_of(mbox, struct th1520_mbox_priv, mbox);
84 }
85 
86 static void th1520_mbox_write(struct th1520_mbox_priv *priv, u32 val, u32 offs)
87 {
88 	iowrite32(val, priv->cur_cpu_ch_base + offs);
89 }
90 
91 static u32 th1520_mbox_read(struct th1520_mbox_priv *priv, u32 offs)
92 {
93 	return ioread32(priv->cur_cpu_ch_base + offs);
94 }
95 
96 static u32 th1520_mbox_rmw(struct th1520_mbox_priv *priv, u32 off, u32 set,
97 			   u32 clr)
98 {
99 	unsigned long flags;
100 	u32 val;
101 
102 	spin_lock_irqsave(&priv->mbox_lock, flags);
103 	val = th1520_mbox_read(priv, off);
104 	val &= ~clr;
105 	val |= set;
106 	th1520_mbox_write(priv, val, off);
107 	spin_unlock_irqrestore(&priv->mbox_lock, flags);
108 
109 	return val;
110 }
111 
112 static void th1520_mbox_chan_write(struct th1520_mbox_con_priv *cp, u32 val,
113 				   u32 offs, bool is_remote)
114 {
115 	if (is_remote)
116 		iowrite32(val, cp->comm_remote_base + offs);
117 	else
118 		iowrite32(val, cp->comm_local_base + offs);
119 }
120 
121 static u32 th1520_mbox_chan_read(struct th1520_mbox_con_priv *cp, u32 offs,
122 				 bool is_remote)
123 {
124 	if (is_remote)
125 		return ioread32(cp->comm_remote_base + offs);
126 	else
127 		return ioread32(cp->comm_local_base + offs);
128 }
129 
130 static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv *cp, u32 off,
131 				 u32 set, u32 clr, bool is_remote)
132 {
133 	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(cp->chan->mbox);
134 	unsigned long flags;
135 	u32 val;
136 
137 	spin_lock_irqsave(&priv->mbox_lock, flags);
138 	val = th1520_mbox_chan_read(cp, off, is_remote);
139 	val &= ~clr;
140 	val |= set;
141 	th1520_mbox_chan_write(cp, val, off, is_remote);
142 	spin_unlock_irqrestore(&priv->mbox_lock, flags);
143 }
144 
145 static void th1520_mbox_chan_rd_data(struct th1520_mbox_con_priv *cp,
146 				     void *data, bool is_remote)
147 {
148 	u32 off = TH_1520_MBOX_INFO0;
149 	u32 *arg = data;
150 	u32 i;
151 
152 	/* read info0 ~ info6, totally 28 bytes
153 	 * requires data memory size is 28 bytes
154 	 */
155 	for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) {
156 		*arg = th1520_mbox_chan_read(cp, off, is_remote);
157 		off += 4;
158 		arg++;
159 	}
160 }
161 
162 static void th1520_mbox_chan_wr_data(struct th1520_mbox_con_priv *cp,
163 				     void *data, bool is_remote)
164 {
165 	u32 off = TH_1520_MBOX_INFO0;
166 	u32 *arg = data;
167 	u32 i;
168 
169 	/* write info0 ~ info6, totally 28 bytes
170 	 * requires data memory is 28 bytes valid data
171 	 */
172 	for (i = 0; i < TH_1520_MBOX_DATA_INFO_NUM; i++) {
173 		th1520_mbox_chan_write(cp, *arg, off, is_remote);
174 		off += 4;
175 		arg++;
176 	}
177 }
178 
179 static void th1520_mbox_chan_wr_ack(struct th1520_mbox_con_priv *cp, void *data,
180 				    bool is_remote)
181 {
182 	u32 off = TH_1520_MBOX_INFO7;
183 	u32 *arg = data;
184 
185 	th1520_mbox_chan_write(cp, *arg, off, is_remote);
186 }
187 
188 static int th1520_mbox_chan_id_to_mapbit(struct th1520_mbox_con_priv *cp)
189 {
190 	int mapbit = 0;
191 	int i;
192 
193 	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
194 		if (i == cp->idx)
195 			return mapbit;
196 
197 		if (i != TH_1520_MBOX_ICU_KERNEL_CPU0)
198 			mapbit++;
199 	}
200 
201 	if (i == TH_1520_MBOX_CHANS)
202 		dev_err(cp->chan->mbox->dev, "convert to mapbit failed\n");
203 
204 	return 0;
205 }
206 
207 static irqreturn_t th1520_mbox_isr(int irq, void *p)
208 {
209 	struct mbox_chan *chan = p;
210 	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
211 	struct th1520_mbox_con_priv *cp = chan->con_priv;
212 	int mapbit = th1520_mbox_chan_id_to_mapbit(cp);
213 	u32 sta, dat[TH_1520_MBOX_DATA_INFO_NUM];
214 	u32 ack_magic = TH_1520_MBOX_ACK_MAGIC;
215 	u32 info0_data, info7_data;
216 
217 	sta = th1520_mbox_read(priv, TH_1520_MBOX_STA);
218 	if (!(sta & BIT(mapbit)))
219 		return IRQ_NONE;
220 
221 	/* clear chan irq bit in STA register */
222 	th1520_mbox_rmw(priv, TH_1520_MBOX_CLR, BIT(mapbit), 0);
223 
224 	/* info0 is the protocol word, should not be zero! */
225 	info0_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO0, false);
226 	if (info0_data) {
227 		/* read info0~info6 data */
228 		th1520_mbox_chan_rd_data(cp, dat, false);
229 
230 		/* clear local info0 */
231 		th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO0, false);
232 
233 		/* notify remote cpu */
234 		th1520_mbox_chan_wr_ack(cp, &ack_magic, true);
235 		/* CPU1 902/906 use polling mode to monitor info7 */
236 		if (cp->idx != TH_1520_MBOX_ICU_CPU1 &&
237 		    cp->idx != TH_1520_MBOX_ICU_CPU2)
238 			th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN,
239 					     TH_1520_MBOX_GEN_TX_ACK, 0, true);
240 
241 		/* transfer the data to client */
242 		mbox_chan_received_data(chan, (void *)dat);
243 	}
244 
245 	/* info7 magic value mean the real ack signal, not generate bit7 */
246 	info7_data = th1520_mbox_chan_read(cp, TH_1520_MBOX_INFO7, false);
247 	if (info7_data == TH_1520_MBOX_ACK_MAGIC) {
248 		/* clear local info7 */
249 		th1520_mbox_chan_write(cp, 0x0, TH_1520_MBOX_INFO7, false);
250 
251 		/* notify framework the last TX has completed */
252 		mbox_chan_txdone(chan, 0);
253 	}
254 
255 	if (!info0_data && !info7_data)
256 		return IRQ_NONE;
257 
258 	return IRQ_HANDLED;
259 }
260 
261 static int th1520_mbox_send_data(struct mbox_chan *chan, void *data)
262 {
263 	struct th1520_mbox_con_priv *cp = chan->con_priv;
264 
265 	th1520_mbox_chan_wr_data(cp, data, true);
266 	th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, TH_1520_MBOX_GEN_RX_DATA, 0,
267 			     true);
268 	return 0;
269 }
270 
271 static int th1520_mbox_startup(struct mbox_chan *chan)
272 {
273 	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
274 	struct th1520_mbox_con_priv *cp = chan->con_priv;
275 	u32 data[8] = {};
276 	int mask_bit;
277 	int ret;
278 
279 	/* clear local and remote generate and info0~info7 */
280 	th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, true);
281 	th1520_mbox_chan_rmw(cp, TH_1520_MBOX_GEN, 0x0, 0xff, false);
282 	th1520_mbox_chan_wr_ack(cp, &data[7], true);
283 	th1520_mbox_chan_wr_ack(cp, &data[7], false);
284 	th1520_mbox_chan_wr_data(cp, &data[0], true);
285 	th1520_mbox_chan_wr_data(cp, &data[0], false);
286 
287 	/* enable the chan mask */
288 	mask_bit = th1520_mbox_chan_id_to_mapbit(cp);
289 	th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, BIT(mask_bit), 0);
290 
291 	/*
292 	 * Mixing devm_ managed resources with manual IRQ handling is generally
293 	 * discouraged due to potential complexities with resource management,
294 	 * especially when dealing with shared interrupts. However, in this case,
295 	 * the approach is safe and effective because:
296 	 *
297 	 * 1. Each mailbox channel requests its IRQ within the .startup() callback
298 	 *    and frees it within the .shutdown() callback.
299 	 * 2. During device unbinding, the devm_ managed mailbox controller first
300 	 *    iterates through all channels, ensuring that their IRQs are freed before
301 	 *    any other devm_ resources are released.
302 	 *
303 	 * This ordering guarantees that no interrupts can be triggered from the device
304 	 * while it is being unbound, preventing race conditions and ensuring system
305 	 * stability.
306 	 */
307 	ret = request_irq(priv->irq, th1520_mbox_isr,
308 			  IRQF_SHARED | IRQF_NO_SUSPEND, cp->irq_desc, chan);
309 	if (ret) {
310 		dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq);
311 		return ret;
312 	}
313 
314 	return 0;
315 }
316 
317 static void th1520_mbox_shutdown(struct mbox_chan *chan)
318 {
319 	struct th1520_mbox_priv *priv = to_th1520_mbox_priv(chan->mbox);
320 	struct th1520_mbox_con_priv *cp = chan->con_priv;
321 	int mask_bit;
322 
323 	free_irq(priv->irq, chan);
324 
325 	/* clear the chan mask */
326 	mask_bit = th1520_mbox_chan_id_to_mapbit(cp);
327 	th1520_mbox_rmw(priv, TH_1520_MBOX_MASK, 0, BIT(mask_bit));
328 }
329 
330 static const struct mbox_chan_ops th1520_mbox_ops = {
331 	.send_data = th1520_mbox_send_data,
332 	.startup = th1520_mbox_startup,
333 	.shutdown = th1520_mbox_shutdown,
334 };
335 
336 static int th1520_mbox_init_generic(struct th1520_mbox_priv *priv)
337 {
338 #ifdef CONFIG_PM_SLEEP
339 	priv->ctx = devm_kzalloc(priv->dev, sizeof(*priv->ctx), GFP_KERNEL);
340 	if (!priv->ctx)
341 		return -ENOMEM;
342 #endif
343 	/* Set default configuration */
344 	th1520_mbox_write(priv, 0xff, TH_1520_MBOX_CLR);
345 	th1520_mbox_write(priv, 0x0, TH_1520_MBOX_MASK);
346 	return 0;
347 }
348 
349 static struct mbox_chan *th1520_mbox_xlate(struct mbox_controller *mbox,
350 					   const struct of_phandle_args *sp)
351 {
352 	u32 chan;
353 
354 	if (sp->args_count != 1) {
355 		dev_err(mbox->dev, "Invalid argument count %d\n",
356 			sp->args_count);
357 		return ERR_PTR(-EINVAL);
358 	}
359 
360 	chan = sp->args[0]; /* comm remote channel */
361 
362 	if (chan >= mbox->num_chans) {
363 		dev_err(mbox->dev, "Not supported channel number: %d\n", chan);
364 		return ERR_PTR(-EINVAL);
365 	}
366 
367 	if (chan == TH_1520_MBOX_ICU_KERNEL_CPU0) {
368 		dev_err(mbox->dev, "Cannot communicate with yourself\n");
369 		return ERR_PTR(-EINVAL);
370 	}
371 
372 	return &mbox->chans[chan];
373 }
374 
375 static void __iomem *th1520_map_mmio(struct platform_device *pdev,
376 				     char *res_name, size_t offset)
377 {
378 	void __iomem *mapped;
379 	struct resource *res;
380 
381 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
382 
383 	if (!res) {
384 		dev_err(&pdev->dev, "Failed to get resource: %s\n", res_name);
385 		return ERR_PTR(-EINVAL);
386 	}
387 
388 	mapped = devm_ioremap(&pdev->dev, res->start + offset,
389 			      resource_size(res) - offset);
390 	if (IS_ERR(mapped))
391 		dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
392 
393 	return mapped;
394 }
395 
396 static void th1520_disable_clk(void *data)
397 {
398 	struct th1520_mbox_priv *priv = data;
399 
400 	clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
401 }
402 
403 static int th1520_mbox_probe(struct platform_device *pdev)
404 {
405 	struct device *dev = &pdev->dev;
406 	struct th1520_mbox_priv *priv;
407 	unsigned int remote_idx = 0;
408 	unsigned int i;
409 	int ret;
410 
411 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
412 	if (!priv)
413 		return -ENOMEM;
414 
415 	priv->dev = dev;
416 
417 	priv->clocks[0].id = "clk-local";
418 	priv->clocks[1].id = "clk-remote-icu0";
419 	priv->clocks[2].id = "clk-remote-icu1";
420 	priv->clocks[3].id = "clk-remote-icu2";
421 
422 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks),
423 				priv->clocks);
424 	if (ret) {
425 		dev_err(dev, "Failed to get clocks\n");
426 		return ret;
427 	}
428 
429 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
430 	if (ret) {
431 		dev_err(dev, "Failed to enable clocks\n");
432 		return ret;
433 	}
434 
435 	ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
436 	if (ret) {
437 		clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
438 		return ret;
439 	}
440 
441 	/*
442 	 * The address mappings in the device tree align precisely with those
443 	 * outlined in the manual. However, register offsets within these
444 	 * mapped regions are irregular, particularly for remote-icu0.
445 	 * Consequently, th1520_map_mmio() requires an additional parameter to
446 	 * handle this quirk.
447 	 */
448 	priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] =
449 		th1520_map_mmio(pdev, "local", 0x0);
450 	if (IS_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]))
451 		return PTR_ERR(priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0]);
452 
453 	priv->remote_icu[0] = th1520_map_mmio(pdev, "remote-icu0", 0x4000);
454 	if (IS_ERR(priv->remote_icu[0]))
455 		return PTR_ERR(priv->remote_icu[0]);
456 
457 	priv->remote_icu[1] = th1520_map_mmio(pdev, "remote-icu1", 0x0);
458 	if (IS_ERR(priv->remote_icu[1]))
459 		return PTR_ERR(priv->remote_icu[1]);
460 
461 	priv->remote_icu[2] = th1520_map_mmio(pdev, "remote-icu2", 0x0);
462 	if (IS_ERR(priv->remote_icu[2]))
463 		return PTR_ERR(priv->remote_icu[2]);
464 
465 	priv->local_icu[TH_1520_MBOX_ICU_CPU1] =
466 		priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0] +
467 		TH_1520_MBOX_CHAN_RES_SIZE;
468 	priv->local_icu[TH_1520_MBOX_ICU_CPU2] =
469 		priv->local_icu[TH_1520_MBOX_ICU_CPU1] +
470 		TH_1520_MBOX_CHAN_RES_SIZE;
471 	priv->local_icu[TH_1520_MBOX_ICU_CPU3] =
472 		priv->local_icu[TH_1520_MBOX_ICU_CPU2] +
473 		TH_1520_MBOX_CHAN_RES_SIZE;
474 
475 	priv->cur_cpu_ch_base = priv->local_icu[TH_1520_MBOX_ICU_KERNEL_CPU0];
476 
477 	priv->irq = platform_get_irq(pdev, 0);
478 	if (priv->irq < 0)
479 		return priv->irq;
480 
481 	/* init the chans */
482 	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
483 		struct th1520_mbox_con_priv *cp = &priv->con_priv[i];
484 
485 		cp->idx = i;
486 		cp->chan = &priv->mbox_chans[i];
487 		priv->mbox_chans[i].con_priv = cp;
488 		snprintf(cp->irq_desc, sizeof(cp->irq_desc),
489 			 "th1520_mbox_chan[%i]", cp->idx);
490 
491 		cp->comm_local_base = priv->local_icu[i];
492 		if (i != TH_1520_MBOX_ICU_KERNEL_CPU0) {
493 			cp->comm_remote_base = priv->remote_icu[remote_idx];
494 			remote_idx++;
495 		}
496 	}
497 
498 	spin_lock_init(&priv->mbox_lock);
499 
500 	priv->mbox.dev = dev;
501 	priv->mbox.ops = &th1520_mbox_ops;
502 	priv->mbox.chans = priv->mbox_chans;
503 	priv->mbox.num_chans = TH_1520_MBOX_CHANS;
504 	priv->mbox.of_xlate = th1520_mbox_xlate;
505 	priv->mbox.txdone_irq = true;
506 
507 	platform_set_drvdata(pdev, priv);
508 
509 	ret = th1520_mbox_init_generic(priv);
510 	if (ret) {
511 		dev_err(dev, "Failed to init mailbox context\n");
512 		return ret;
513 	}
514 
515 	return devm_mbox_controller_register(dev, &priv->mbox);
516 }
517 
518 static const struct of_device_id th1520_mbox_dt_ids[] = {
519 	{ .compatible = "thead,th1520-mbox" },
520 	{}
521 };
522 MODULE_DEVICE_TABLE(of, th1520_mbox_dt_ids);
523 
524 #ifdef CONFIG_PM_SLEEP
525 static int __maybe_unused th1520_mbox_suspend_noirq(struct device *dev)
526 {
527 	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
528 	struct th1520_mbox_context *ctx = priv->ctx;
529 	u32 i;
530 	/*
531 	 * ONLY interrupt mask bit should be stored and restores.
532 	 * INFO data all assumed to be lost.
533 	 */
534 	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
535 		ctx->intr_mask[i] =
536 			ioread32(priv->local_icu[i] + TH_1520_MBOX_MASK);
537 	}
538 	return 0;
539 }
540 
541 static int __maybe_unused th1520_mbox_resume_noirq(struct device *dev)
542 {
543 	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
544 	struct th1520_mbox_context *ctx = priv->ctx;
545 	u32 i;
546 
547 	for (i = 0; i < TH_1520_MBOX_CHANS; i++) {
548 		iowrite32(ctx->intr_mask[i],
549 			  priv->local_icu[i] + TH_1520_MBOX_MASK);
550 	}
551 
552 	return 0;
553 }
554 #endif
555 
556 static int  __maybe_unused th1520_mbox_runtime_suspend(struct device *dev)
557 {
558 	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
559 
560 	clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
561 
562 	return 0;
563 }
564 
565 static int __maybe_unused th1520_mbox_runtime_resume(struct device *dev)
566 {
567 	struct th1520_mbox_priv *priv = dev_get_drvdata(dev);
568 	int ret;
569 
570 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
571 	if (ret)
572 		dev_err(dev, "Failed to enable clocks in runtime resume\n");
573 
574 	return ret;
575 }
576 
577 static const struct dev_pm_ops th1520_mbox_pm_ops = {
578 #ifdef CONFIG_PM_SLEEP
579 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(th1520_mbox_suspend_noirq,
580 				      th1520_mbox_resume_noirq)
581 #endif
582 	SET_RUNTIME_PM_OPS(th1520_mbox_runtime_suspend,
583 			   th1520_mbox_runtime_resume, NULL)
584 };
585 
586 static struct platform_driver th1520_mbox_driver = {
587 	.probe		= th1520_mbox_probe,
588 	.driver = {
589 		.name	= "th1520-mbox",
590 		.of_match_table = th1520_mbox_dt_ids,
591 		.pm = &th1520_mbox_pm_ops,
592 	},
593 };
594 module_platform_driver(th1520_mbox_driver);
595 
596 MODULE_DESCRIPTION("Thead TH-1520 mailbox IPC driver");
597 MODULE_LICENSE("GPL");
598