xref: /linux/drivers/mailbox/ti-msgmgr.c (revision 80d443e8876602be2c130f79c4de81e12e2a700d)
1 /*
2  * Texas Instruments' Message Manager Driver
3  *
4  * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
5  *	Nishanth Menon
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12  * kind, whether express or implied; without even the implied warranty
13  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  */
16 
17 #define pr_fmt(fmt) "%s: " fmt, __func__
18 
19 #include <linux/device.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/mailbox_controller.h>
24 #include <linux/module.h>
25 #include <linux/of_device.h>
26 #include <linux/of.h>
27 #include <linux/of_irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/ti-msgmgr.h>
30 
31 #define Q_DATA_OFFSET(proxy, queue, reg)	\
32 		     ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
33 #define Q_STATE_OFFSET(queue)			((queue) * 0x4)
34 #define Q_STATE_ENTRY_COUNT_MASK		(0xFFF000)
35 
36 /**
37  * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
38  * @queue_id:	Queue Number for this path
39  * @proxy_id:	Proxy ID representing the processor in SoC
40  * @is_tx:	Is this a receive path?
41  */
42 struct ti_msgmgr_valid_queue_desc {
43 	u8 queue_id;
44 	u8 proxy_id;
45 	bool is_tx;
46 };
47 
48 /**
49  * struct ti_msgmgr_desc - Description of message manager integration
50  * @queue_count:	Number of Queues
51  * @max_message_size:	Message size in bytes
52  * @max_messages:	Number of messages
53  * @q_slices:		Number of queue engines
54  * @q_proxies:		Number of queue proxies per page
55  * @data_first_reg:	First data register for proxy data region
56  * @data_last_reg:	Last data register for proxy data region
57  * @tx_polled:		Do I need to use polled mechanism for tx
58  * @tx_poll_timeout_ms: Timeout in ms if polled
59  * @valid_queues:	List of Valid queues that the processor can access
60  * @num_valid_queues:	Number of valid queues
61  *
62  * This structure is used in of match data to describe how integration
63  * for a specific compatible SoC is done.
64  */
65 struct ti_msgmgr_desc {
66 	u8 queue_count;
67 	u8 max_message_size;
68 	u8 max_messages;
69 	u8 q_slices;
70 	u8 q_proxies;
71 	u8 data_first_reg;
72 	u8 data_last_reg;
73 	bool tx_polled;
74 	int tx_poll_timeout_ms;
75 	const struct ti_msgmgr_valid_queue_desc *valid_queues;
76 	int num_valid_queues;
77 };
78 
79 /**
80  * struct ti_queue_inst - Description of a queue instance
81  * @name:	Queue Name
82  * @queue_id:	Queue Identifier as mapped on SoC
83  * @proxy_id:	Proxy Identifier as mapped on SoC
84  * @irq:	IRQ for Rx Queue
85  * @is_tx:	'true' if transmit queue, else, 'false'
86  * @queue_buff_start: First register of Data Buffer
87  * @queue_buff_end: Last (or confirmation) register of Data buffer
88  * @queue_state: Queue status register
89  * @chan:	Mailbox channel
90  * @rx_buff:	Receive buffer pointer allocated at probe, max_message_size
91  */
92 struct ti_queue_inst {
93 	char name[30];
94 	u8 queue_id;
95 	u8 proxy_id;
96 	int irq;
97 	bool is_tx;
98 	void __iomem *queue_buff_start;
99 	void __iomem *queue_buff_end;
100 	void __iomem *queue_state;
101 	struct mbox_chan *chan;
102 	u32 *rx_buff;
103 };
104 
105 /**
106  * struct ti_msgmgr_inst - Description of a Message Manager Instance
107  * @dev:	device pointer corresponding to the Message Manager instance
108  * @desc:	Description of the SoC integration
109  * @queue_proxy_region:	Queue proxy region where queue buffers are located
110  * @queue_state_debug_region:	Queue status register regions
111  * @num_valid_queues:	Number of valid queues defined for the processor
112  *		Note: other queues are probably reserved for other processors
113  *		in the SoC.
114  * @qinsts:	Array of valid Queue Instances for the Processor
115  * @mbox:	Mailbox Controller
116  * @chans:	Array for channels corresponding to the Queue Instances.
117  */
118 struct ti_msgmgr_inst {
119 	struct device *dev;
120 	const struct ti_msgmgr_desc *desc;
121 	void __iomem *queue_proxy_region;
122 	void __iomem *queue_state_debug_region;
123 	u8 num_valid_queues;
124 	struct ti_queue_inst *qinsts;
125 	struct mbox_controller mbox;
126 	struct mbox_chan *chans;
127 };
128 
129 /**
130  * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
131  * @qinst:	Queue instance for which we check the number of pending messages
132  *
133  * Return: number of messages pending in the queue (0 == no pending messages)
134  */
135 static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst)
136 {
137 	u32 val;
138 
139 	/*
140 	 * We cannot use relaxed operation here - update may happen
141 	 * real-time.
142 	 */
143 	val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK;
144 	val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK);
145 
146 	return val;
147 }
148 
149 /**
150  * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
151  * @irq:	Interrupt number
152  * @p:		Channel Pointer
153  *
154  * Return: -EINVAL if there is no instance
155  * IRQ_NONE if the interrupt is not ours.
156  * IRQ_HANDLED if the rx interrupt was successfully handled.
157  */
158 static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
159 {
160 	struct mbox_chan *chan = p;
161 	struct device *dev = chan->mbox->dev;
162 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
163 	struct ti_queue_inst *qinst = chan->con_priv;
164 	const struct ti_msgmgr_desc *desc;
165 	int msg_count, num_words;
166 	struct ti_msgmgr_message message;
167 	void __iomem *data_reg;
168 	u32 *word_data;
169 
170 	if (WARN_ON(!inst)) {
171 		dev_err(dev, "no platform drv data??\n");
172 		return -EINVAL;
173 	}
174 
175 	/* Do I have an invalid interrupt source? */
176 	if (qinst->is_tx) {
177 		dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
178 			qinst->name);
179 		return IRQ_NONE;
180 	}
181 
182 	/* Do I actually have messages to read? */
183 	msg_count = ti_msgmgr_queue_get_num_messages(qinst);
184 	if (!msg_count) {
185 		/* Shared IRQ? */
186 		dev_dbg(dev, "Spurious event - 0 pending data!\n");
187 		return IRQ_NONE;
188 	}
189 
190 	/*
191 	 * I have no idea about the protocol being used to communicate with the
192 	 * remote producer - 0 could be valid data, so I wont make a judgement
193 	 * of how many bytes I should be reading. Let the client figure this
194 	 * out.. I just read the full message and pass it on..
195 	 */
196 	desc = inst->desc;
197 	message.len = desc->max_message_size;
198 	message.buf = (u8 *)qinst->rx_buff;
199 
200 	/*
201 	 * NOTE about register access involved here:
202 	 * the hardware block is implemented with 32bit access operations and no
203 	 * support for data splitting.  We don't want the hardware to misbehave
204 	 * with sub 32bit access - For example: if the last register read is
205 	 * split into byte wise access, it can result in the queue getting
206 	 * stuck or indeterminate behavior. An out of order read operation may
207 	 * result in weird data results as well.
208 	 * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
209 	 * we depend on readl for the purpose.
210 	 *
211 	 * Also note that the final register read automatically marks the
212 	 * queue message as read.
213 	 */
214 	for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
215 	     num_words = (desc->max_message_size / sizeof(u32));
216 	     num_words; num_words--, data_reg += sizeof(u32), word_data++)
217 		*word_data = readl(data_reg);
218 
219 	/*
220 	 * Last register read automatically clears the IRQ if only 1 message
221 	 * is pending - so send the data up the stack..
222 	 * NOTE: Client is expected to be as optimal as possible, since
223 	 * we invoke the handler in IRQ context.
224 	 */
225 	mbox_chan_received_data(chan, (void *)&message);
226 
227 	return IRQ_HANDLED;
228 }
229 
230 /**
231  * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
232  * @chan:	Channel Pointer
233  *
234  * Return: 'true' if there is pending rx data, 'false' if there is none.
235  */
236 static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
237 {
238 	struct ti_queue_inst *qinst = chan->con_priv;
239 	int msg_count;
240 
241 	if (qinst->is_tx)
242 		return false;
243 
244 	msg_count = ti_msgmgr_queue_get_num_messages(qinst);
245 
246 	return msg_count ? true : false;
247 }
248 
249 /**
250  * ti_msgmgr_last_tx_done() - See if all the tx messages are sent
251  * @chan:	Channel pointer
252  *
253  * Return: 'true' is no pending tx data, 'false' if there are any.
254  */
255 static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
256 {
257 	struct ti_queue_inst *qinst = chan->con_priv;
258 	int msg_count;
259 
260 	if (!qinst->is_tx)
261 		return false;
262 
263 	msg_count = ti_msgmgr_queue_get_num_messages(qinst);
264 
265 	/* if we have any messages pending.. */
266 	return msg_count ? false : true;
267 }
268 
269 /**
270  * ti_msgmgr_send_data() - Send data
271  * @chan:	Channel Pointer
272  * @data:	ti_msgmgr_message * Message Pointer
273  *
274  * Return: 0 if all goes good, else appropriate error messages.
275  */
276 static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
277 {
278 	struct device *dev = chan->mbox->dev;
279 	struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
280 	const struct ti_msgmgr_desc *desc;
281 	struct ti_queue_inst *qinst = chan->con_priv;
282 	int num_words, trail_bytes;
283 	struct ti_msgmgr_message *message = data;
284 	void __iomem *data_reg;
285 	u32 *word_data;
286 
287 	if (WARN_ON(!inst)) {
288 		dev_err(dev, "no platform drv data??\n");
289 		return -EINVAL;
290 	}
291 	desc = inst->desc;
292 
293 	if (desc->max_message_size < message->len) {
294 		dev_err(dev, "Queue %s message length %d > max %d\n",
295 			qinst->name, message->len, desc->max_message_size);
296 		return -EINVAL;
297 	}
298 
299 	/* NOTE: Constraints similar to rx path exists here as well */
300 	for (data_reg = qinst->queue_buff_start,
301 	     num_words = message->len / sizeof(u32),
302 	     word_data = (u32 *)message->buf;
303 	     num_words; num_words--, data_reg += sizeof(u32), word_data++)
304 		writel(*word_data, data_reg);
305 
306 	trail_bytes = message->len % sizeof(u32);
307 	if (trail_bytes) {
308 		u32 data_trail = *word_data;
309 
310 		/* Ensure all unused data is 0 */
311 		data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
312 		writel(data_trail, data_reg);
313 		data_reg++;
314 	}
315 	/*
316 	 * 'data_reg' indicates next register to write. If we did not already
317 	 * write on tx complete reg(last reg), we must do so for transmit
318 	 */
319 	if (data_reg <= qinst->queue_buff_end)
320 		writel(0, qinst->queue_buff_end);
321 
322 	return 0;
323 }
324 
325 /**
326  * ti_msgmgr_queue_startup() - Startup queue
327  * @chan:	Channel pointer
328  *
329  * Return: 0 if all goes good, else return corresponding error message
330  */
331 static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
332 {
333 	struct ti_queue_inst *qinst = chan->con_priv;
334 	struct device *dev = chan->mbox->dev;
335 	int ret;
336 
337 	if (!qinst->is_tx) {
338 		/*
339 		 * With the expectation that the IRQ might be shared in SoC
340 		 */
341 		ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
342 				  IRQF_SHARED, qinst->name, chan);
343 		if (ret) {
344 			dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
345 				qinst->irq, qinst->name, ret);
346 			return ret;
347 		}
348 	}
349 
350 	return 0;
351 }
352 
353 /**
354  * ti_msgmgr_queue_shutdown() - Shutdown the queue
355  * @chan:	Channel pointer
356  */
357 static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
358 {
359 	struct ti_queue_inst *qinst = chan->con_priv;
360 
361 	if (!qinst->is_tx)
362 		free_irq(qinst->irq, chan);
363 }
364 
365 /**
366  * ti_msgmgr_of_xlate() - Translation of phandle to queue
367  * @mbox:	Mailbox controller
368  * @p:		phandle pointer
369  *
370  * Return: Mailbox channel corresponding to the queue, else return error
371  * pointer.
372  */
373 static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
374 					    const struct of_phandle_args *p)
375 {
376 	struct ti_msgmgr_inst *inst;
377 	int req_qid, req_pid;
378 	struct ti_queue_inst *qinst;
379 	int i;
380 
381 	inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
382 	if (WARN_ON(!inst))
383 		return ERR_PTR(-EINVAL);
384 
385 	/* #mbox-cells is 2 */
386 	if (p->args_count != 2) {
387 		dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n",
388 			p->args_count);
389 		return ERR_PTR(-EINVAL);
390 	}
391 	req_qid = p->args[0];
392 	req_pid = p->args[1];
393 
394 	for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
395 	     i++, qinst++) {
396 		if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
397 			return qinst->chan;
398 	}
399 
400 	dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n",
401 		req_qid, req_pid, p->np->name);
402 	return ERR_PTR(-ENOENT);
403 }
404 
405 /**
406  * ti_msgmgr_queue_setup() - Setup data structures for each queue instance
407  * @idx:	index of the queue
408  * @dev:	pointer to the message manager device
409  * @np:		pointer to the of node
410  * @inst:	Queue instance pointer
411  * @d:		Message Manager instance description data
412  * @qd:		Queue description data
413  * @qinst:	Queue instance pointer
414  * @chan:	pointer to mailbox channel
415  *
416  * Return: 0 if all went well, else return corresponding error
417  */
418 static int ti_msgmgr_queue_setup(int idx, struct device *dev,
419 				 struct device_node *np,
420 				 struct ti_msgmgr_inst *inst,
421 				 const struct ti_msgmgr_desc *d,
422 				 const struct ti_msgmgr_valid_queue_desc *qd,
423 				 struct ti_queue_inst *qinst,
424 				 struct mbox_chan *chan)
425 {
426 	qinst->proxy_id = qd->proxy_id;
427 	qinst->queue_id = qd->queue_id;
428 
429 	if (qinst->queue_id > d->queue_count) {
430 		dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
431 			idx, qinst->queue_id, d->queue_count);
432 		return -ERANGE;
433 	}
434 
435 	qinst->is_tx = qd->is_tx;
436 	snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
437 		 dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id,
438 		 qinst->proxy_id);
439 
440 	if (!qinst->is_tx) {
441 		char of_rx_irq_name[7];
442 
443 		snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
444 			 "rx_%03d", qinst->queue_id);
445 
446 		qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
447 		if (qinst->irq < 0) {
448 			dev_crit(dev,
449 				 "[%d]QID %d PID %d:No IRQ[%s]: %d\n",
450 				 idx, qinst->queue_id, qinst->proxy_id,
451 				 of_rx_irq_name, qinst->irq);
452 			return qinst->irq;
453 		}
454 		/* Allocate usage buffer for rx */
455 		qinst->rx_buff = devm_kzalloc(dev,
456 					      d->max_message_size, GFP_KERNEL);
457 		if (!qinst->rx_buff)
458 			return -ENOMEM;
459 	}
460 
461 	qinst->queue_buff_start = inst->queue_proxy_region +
462 	    Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg);
463 	qinst->queue_buff_end = inst->queue_proxy_region +
464 	    Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg);
465 	qinst->queue_state = inst->queue_state_debug_region +
466 	    Q_STATE_OFFSET(qinst->queue_id);
467 	qinst->chan = chan;
468 
469 	chan->con_priv = qinst;
470 
471 	dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
472 		idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
473 		qinst->queue_buff_start, qinst->queue_buff_end);
474 	return 0;
475 }
476 
477 /* Queue operations */
478 static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
479 	.startup = ti_msgmgr_queue_startup,
480 	.shutdown = ti_msgmgr_queue_shutdown,
481 	.peek_data = ti_msgmgr_queue_peek_data,
482 	.last_tx_done = ti_msgmgr_last_tx_done,
483 	.send_data = ti_msgmgr_send_data,
484 };
485 
486 /* Keystone K2G SoC integration details */
487 static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
488 	{.queue_id = 0, .proxy_id = 0, .is_tx = true,},
489 	{.queue_id = 1, .proxy_id = 0, .is_tx = true,},
490 	{.queue_id = 2, .proxy_id = 0, .is_tx = true,},
491 	{.queue_id = 3, .proxy_id = 0, .is_tx = true,},
492 	{.queue_id = 5, .proxy_id = 2, .is_tx = false,},
493 	{.queue_id = 56, .proxy_id = 1, .is_tx = true,},
494 	{.queue_id = 57, .proxy_id = 2, .is_tx = false,},
495 	{.queue_id = 58, .proxy_id = 3, .is_tx = true,},
496 	{.queue_id = 59, .proxy_id = 4, .is_tx = true,},
497 	{.queue_id = 60, .proxy_id = 5, .is_tx = true,},
498 	{.queue_id = 61, .proxy_id = 6, .is_tx = true,},
499 };
500 
501 static const struct ti_msgmgr_desc k2g_desc = {
502 	.queue_count = 64,
503 	.max_message_size = 64,
504 	.max_messages = 128,
505 	.q_slices = 1,
506 	.q_proxies = 1,
507 	.data_first_reg = 16,
508 	.data_last_reg = 31,
509 	.tx_polled = false,
510 	.valid_queues = k2g_valid_queues,
511 	.num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
512 };
513 
514 static const struct of_device_id ti_msgmgr_of_match[] = {
515 	{.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
516 	{ /* Sentinel */ }
517 };
518 MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
519 
520 static int ti_msgmgr_probe(struct platform_device *pdev)
521 {
522 	struct device *dev = &pdev->dev;
523 	const struct of_device_id *of_id;
524 	struct device_node *np;
525 	struct resource *res;
526 	const struct ti_msgmgr_desc *desc;
527 	struct ti_msgmgr_inst *inst;
528 	struct ti_queue_inst *qinst;
529 	struct mbox_controller *mbox;
530 	struct mbox_chan *chans;
531 	int queue_count;
532 	int i;
533 	int ret = -EINVAL;
534 	const struct ti_msgmgr_valid_queue_desc *queue_desc;
535 
536 	if (!dev->of_node) {
537 		dev_err(dev, "no OF information\n");
538 		return -EINVAL;
539 	}
540 	np = dev->of_node;
541 
542 	of_id = of_match_device(ti_msgmgr_of_match, dev);
543 	if (!of_id) {
544 		dev_err(dev, "OF data missing\n");
545 		return -EINVAL;
546 	}
547 	desc = of_id->data;
548 
549 	inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
550 	if (!inst)
551 		return -ENOMEM;
552 
553 	inst->dev = dev;
554 	inst->desc = desc;
555 
556 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
557 					   "queue_proxy_region");
558 	inst->queue_proxy_region = devm_ioremap_resource(dev, res);
559 	if (IS_ERR(inst->queue_proxy_region))
560 		return PTR_ERR(inst->queue_proxy_region);
561 
562 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
563 					   "queue_state_debug_region");
564 	inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
565 	if (IS_ERR(inst->queue_state_debug_region))
566 		return PTR_ERR(inst->queue_state_debug_region);
567 
568 	dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
569 		inst->queue_proxy_region, inst->queue_state_debug_region);
570 
571 	queue_count = desc->num_valid_queues;
572 	if (!queue_count || queue_count > desc->queue_count) {
573 		dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
574 			 queue_count, desc->queue_count);
575 		return -ERANGE;
576 	}
577 	inst->num_valid_queues = queue_count;
578 
579 	qinst = devm_kzalloc(dev, sizeof(*qinst) * queue_count, GFP_KERNEL);
580 	if (!qinst)
581 		return -ENOMEM;
582 	inst->qinsts = qinst;
583 
584 	chans = devm_kzalloc(dev, sizeof(*chans) * queue_count, GFP_KERNEL);
585 	if (!chans)
586 		return -ENOMEM;
587 	inst->chans = chans;
588 
589 	for (i = 0, queue_desc = desc->valid_queues;
590 	     i < queue_count; i++, qinst++, chans++, queue_desc++) {
591 		ret = ti_msgmgr_queue_setup(i, dev, np, inst,
592 					    desc, queue_desc, qinst, chans);
593 		if (ret)
594 			return ret;
595 	}
596 
597 	mbox = &inst->mbox;
598 	mbox->dev = dev;
599 	mbox->ops = &ti_msgmgr_chan_ops;
600 	mbox->chans = inst->chans;
601 	mbox->num_chans = inst->num_valid_queues;
602 	mbox->txdone_irq = false;
603 	mbox->txdone_poll = desc->tx_polled;
604 	if (desc->tx_polled)
605 		mbox->txpoll_period = desc->tx_poll_timeout_ms;
606 	mbox->of_xlate = ti_msgmgr_of_xlate;
607 
608 	platform_set_drvdata(pdev, inst);
609 	ret = mbox_controller_register(mbox);
610 	if (ret)
611 		dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
612 
613 	return ret;
614 }
615 
616 static int ti_msgmgr_remove(struct platform_device *pdev)
617 {
618 	struct ti_msgmgr_inst *inst;
619 
620 	inst = platform_get_drvdata(pdev);
621 	mbox_controller_unregister(&inst->mbox);
622 
623 	return 0;
624 }
625 
626 static struct platform_driver ti_msgmgr_driver = {
627 	.probe = ti_msgmgr_probe,
628 	.remove = ti_msgmgr_remove,
629 	.driver = {
630 		   .name = "ti-msgmgr",
631 		   .of_match_table = of_match_ptr(ti_msgmgr_of_match),
632 	},
633 };
634 module_platform_driver(ti_msgmgr_driver);
635 
636 MODULE_LICENSE("GPL v2");
637 MODULE_DESCRIPTION("TI message manager driver");
638 MODULE_AUTHOR("Nishanth Menon");
639 MODULE_ALIAS("platform:ti-msgmgr");
640