xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/mbox.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 
12 #include "rvu_reg.h"
13 #include "cn20k/reg.h"
14 #include "cn20k/api.h"
15 #include "mbox.h"
16 #include "rvu_trace.h"
17 #include "rvu.h"
18 
19 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
20 
__otx2_mbox_reset(struct otx2_mbox * mbox,int devid)21 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
22 {
23 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
24 	struct mbox_hdr *tx_hdr, *rx_hdr;
25 	void *hw_mbase = mdev->hwbase;
26 
27 	tx_hdr = hw_mbase + mbox->tx_start;
28 	rx_hdr = hw_mbase + mbox->rx_start;
29 
30 	mdev->msg_size = 0;
31 	mdev->rsp_size = 0;
32 	tx_hdr->num_msgs = 0;
33 	tx_hdr->msg_size = 0;
34 	tx_hdr->sig = 0;
35 	rx_hdr->num_msgs = 0;
36 	rx_hdr->msg_size = 0;
37 	rx_hdr->sig = 0;
38 }
39 EXPORT_SYMBOL(__otx2_mbox_reset);
40 
otx2_mbox_reset(struct otx2_mbox * mbox,int devid)41 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
42 {
43 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
44 
45 	spin_lock(&mdev->mbox_lock);
46 	__otx2_mbox_reset(mbox, devid);
47 	spin_unlock(&mdev->mbox_lock);
48 }
49 EXPORT_SYMBOL(otx2_mbox_reset);
50 
otx2_mbox_destroy(struct otx2_mbox * mbox)51 void otx2_mbox_destroy(struct otx2_mbox *mbox)
52 {
53 	mbox->reg_base = NULL;
54 	mbox->hwbase = NULL;
55 
56 	kfree(mbox->dev);
57 	mbox->dev = NULL;
58 }
59 EXPORT_SYMBOL(otx2_mbox_destroy);
60 
cn20k_mbox_setup(struct otx2_mbox * mbox,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)61 int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
62 		     void *reg_base, int direction, int ndevs)
63 {
64 	switch (direction) {
65 	case MBOX_DIR_AFPF:
66 	case MBOX_DIR_PFVF:
67 		mbox->tx_start = MBOX_DOWN_TX_START;
68 		mbox->rx_start = MBOX_DOWN_RX_START;
69 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
70 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
71 		break;
72 	case MBOX_DIR_PFAF:
73 	case MBOX_DIR_VFPF:
74 		mbox->tx_start = MBOX_DOWN_RX_START;
75 		mbox->rx_start = MBOX_DOWN_TX_START;
76 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
77 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
78 		break;
79 	case MBOX_DIR_AFPF_UP:
80 	case MBOX_DIR_PFVF_UP:
81 		mbox->tx_start = MBOX_UP_TX_START;
82 		mbox->rx_start = MBOX_UP_RX_START;
83 		mbox->tx_size  = MBOX_UP_TX_SIZE;
84 		mbox->rx_size  = MBOX_UP_RX_SIZE;
85 		break;
86 	case MBOX_DIR_PFAF_UP:
87 	case MBOX_DIR_VFPF_UP:
88 		mbox->tx_start = MBOX_UP_RX_START;
89 		mbox->rx_start = MBOX_UP_TX_START;
90 		mbox->tx_size  = MBOX_UP_RX_SIZE;
91 		mbox->rx_size  = MBOX_UP_TX_SIZE;
92 		break;
93 	default:
94 		return -ENODEV;
95 	}
96 
97 	switch (direction) {
98 	case MBOX_DIR_AFPF:
99 		mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1);
100 		mbox->tr_shift = 4;
101 		break;
102 	case MBOX_DIR_AFPF_UP:
103 		mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0);
104 		mbox->tr_shift = 4;
105 		break;
106 	case MBOX_DIR_PFAF:
107 		mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0);
108 		mbox->tr_shift = 0;
109 		break;
110 	case MBOX_DIR_PFAF_UP:
111 		mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1);
112 		mbox->tr_shift = 0;
113 		break;
114 	case MBOX_DIR_PFVF:
115 		mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1);
116 		mbox->tr_shift = 4;
117 		break;
118 	case MBOX_DIR_PFVF_UP:
119 		mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0);
120 		mbox->tr_shift = 4;
121 		break;
122 	case MBOX_DIR_VFPF:
123 		mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0);
124 		mbox->tr_shift = 0;
125 		break;
126 	case MBOX_DIR_VFPF_UP:
127 		mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1);
128 		mbox->tr_shift = 0;
129 		break;
130 	default:
131 		return -ENODEV;
132 	}
133 	mbox->reg_base = reg_base;
134 	mbox->pdev = pdev;
135 
136 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
137 	if (!mbox->dev) {
138 		otx2_mbox_destroy(mbox);
139 		return -ENOMEM;
140 	}
141 	mbox->ndevs = ndevs;
142 
143 	return 0;
144 }
145 
otx2_mbox_setup(struct otx2_mbox * mbox,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)146 static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
147 			   void *reg_base, int direction, int ndevs)
148 {
149 	if (is_cn20k(pdev))
150 		return cn20k_mbox_setup(mbox, pdev, reg_base,
151 							direction, ndevs);
152 
153 	switch (direction) {
154 	case MBOX_DIR_AFPF:
155 	case MBOX_DIR_PFVF:
156 		mbox->tx_start = MBOX_DOWN_TX_START;
157 		mbox->rx_start = MBOX_DOWN_RX_START;
158 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
159 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
160 		break;
161 	case MBOX_DIR_PFAF:
162 	case MBOX_DIR_VFPF:
163 		mbox->tx_start = MBOX_DOWN_RX_START;
164 		mbox->rx_start = MBOX_DOWN_TX_START;
165 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
166 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
167 		break;
168 	case MBOX_DIR_AFPF_UP:
169 	case MBOX_DIR_PFVF_UP:
170 		mbox->tx_start = MBOX_UP_TX_START;
171 		mbox->rx_start = MBOX_UP_RX_START;
172 		mbox->tx_size  = MBOX_UP_TX_SIZE;
173 		mbox->rx_size  = MBOX_UP_RX_SIZE;
174 		break;
175 	case MBOX_DIR_PFAF_UP:
176 	case MBOX_DIR_VFPF_UP:
177 		mbox->tx_start = MBOX_UP_RX_START;
178 		mbox->rx_start = MBOX_UP_TX_START;
179 		mbox->tx_size  = MBOX_UP_RX_SIZE;
180 		mbox->rx_size  = MBOX_UP_TX_SIZE;
181 		break;
182 	default:
183 		return -ENODEV;
184 	}
185 
186 	switch (direction) {
187 	case MBOX_DIR_AFPF:
188 	case MBOX_DIR_AFPF_UP:
189 		mbox->trigger = RVU_AF_AFPF_MBOX0;
190 		mbox->tr_shift = 4;
191 		break;
192 	case MBOX_DIR_PFAF:
193 	case MBOX_DIR_PFAF_UP:
194 		mbox->trigger = RVU_PF_PFAF_MBOX1;
195 		mbox->tr_shift = 0;
196 		break;
197 	case MBOX_DIR_PFVF:
198 	case MBOX_DIR_PFVF_UP:
199 		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
200 		mbox->tr_shift = 12;
201 		break;
202 	case MBOX_DIR_VFPF:
203 	case MBOX_DIR_VFPF_UP:
204 		mbox->trigger = RVU_VF_VFPF_MBOX1;
205 		mbox->tr_shift = 0;
206 		break;
207 	default:
208 		return -ENODEV;
209 	}
210 
211 	mbox->reg_base = reg_base;
212 	mbox->pdev = pdev;
213 
214 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
215 	if (!mbox->dev) {
216 		otx2_mbox_destroy(mbox);
217 		return -ENOMEM;
218 	}
219 	mbox->ndevs = ndevs;
220 
221 	return 0;
222 }
223 
otx2_mbox_init(struct otx2_mbox * mbox,void * hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)224 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
225 		   void *reg_base, int direction, int ndevs)
226 {
227 	struct otx2_mbox_dev *mdev;
228 	int devid, err;
229 
230 	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
231 	if (err)
232 		return err;
233 
234 	mbox->hwbase = hwbase;
235 
236 	for (devid = 0; devid < ndevs; devid++) {
237 		mdev = &mbox->dev[devid];
238 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
239 		mdev->hwbase = mdev->mbase;
240 		spin_lock_init(&mdev->mbox_lock);
241 		/* Init header to reset value */
242 		otx2_mbox_reset(mbox, devid);
243 	}
244 
245 	return 0;
246 }
247 EXPORT_SYMBOL(otx2_mbox_init);
248 
249 /* Initialize mailbox with the set of mailbox region addresses
250  * in the array hwbase.
251  */
otx2_mbox_regions_init(struct otx2_mbox * mbox,void ** hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs,unsigned long * pf_bmap)252 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
253 			   struct pci_dev *pdev, void *reg_base,
254 			   int direction, int ndevs, unsigned long *pf_bmap)
255 {
256 	struct otx2_mbox_dev *mdev;
257 	int devid, err;
258 
259 	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
260 	if (err)
261 		return err;
262 
263 	mbox->hwbase = hwbase[0];
264 
265 	for (devid = 0; devid < ndevs; devid++) {
266 		if (!test_bit(devid, pf_bmap))
267 			continue;
268 
269 		mdev = &mbox->dev[devid];
270 		mdev->mbase = hwbase[devid];
271 		mdev->hwbase = hwbase[devid];
272 		spin_lock_init(&mdev->mbox_lock);
273 		/* Init header to reset value */
274 		otx2_mbox_reset(mbox, devid);
275 	}
276 
277 	return 0;
278 }
279 EXPORT_SYMBOL(otx2_mbox_regions_init);
280 
otx2_mbox_wait_for_rsp(struct otx2_mbox * mbox,int devid)281 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
282 {
283 	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
284 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
285 
286 	while (!time_after(jiffies, timeout)) {
287 		if (mdev->num_msgs == mdev->msgs_acked)
288 			return 0;
289 		usleep_range(800, 1000);
290 	}
291 	trace_otx2_msg_wait_rsp(mbox->pdev);
292 	return -EIO;
293 }
294 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
295 
otx2_mbox_busy_poll_for_rsp(struct otx2_mbox * mbox,int devid)296 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
297 {
298 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
299 	unsigned long timeout = jiffies + 1 * HZ;
300 
301 	while (!time_after(jiffies, timeout)) {
302 		if (mdev->num_msgs == mdev->msgs_acked)
303 			return 0;
304 		cpu_relax();
305 	}
306 	return -EIO;
307 }
308 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
309 
otx2_mbox_msg_send_data(struct otx2_mbox * mbox,int devid,u64 data)310 static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
311 {
312 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
313 	struct mbox_hdr *tx_hdr, *rx_hdr;
314 	void *hw_mbase = mdev->hwbase;
315 	struct mbox_msghdr *msg;
316 	u64 intr_val;
317 
318 	tx_hdr = hw_mbase + mbox->tx_start;
319 	rx_hdr = hw_mbase + mbox->rx_start;
320 
321 	/* If bounce buffer is implemented copy mbox messages from
322 	 * bounce buffer to hw mbox memory.
323 	 */
324 	if (mdev->mbase != hw_mbase)
325 		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
326 		       mdev->mbase + mbox->tx_start + msgs_offset,
327 		       mdev->msg_size);
328 
329 	spin_lock(&mdev->mbox_lock);
330 
331 	if (!tx_hdr->sig) {
332 		tx_hdr->msg_size = mdev->msg_size;
333 		tx_hdr->num_msgs = mdev->num_msgs;
334 	}
335 
336 	/* Reset header for next messages */
337 	mdev->msg_size = 0;
338 	mdev->rsp_size = 0;
339 	mdev->msgs_acked = 0;
340 
341 	/* Sync mbox data into memory */
342 	smp_wmb();
343 
344 	/* num_msgs != 0 signals to the peer that the buffer has a number of
345 	 * messages.  So this should be written after writing all the messages
346 	 * to the shared memory.
347 	 */
348 	rx_hdr->num_msgs = 0;
349 
350 	msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
351 
352 	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
353 			    msg->id, msg->pcifunc);
354 
355 	spin_unlock(&mdev->mbox_lock);
356 
357 	/* Check if interrupt pending */
358 	intr_val = readq((void __iomem *)mbox->reg_base +
359 		     (mbox->trigger | (devid << mbox->tr_shift)));
360 
361 	intr_val |= data;
362 	/* The interrupt should be fired after num_msgs is written
363 	 * to the shared memory
364 	 */
365 	writeq(intr_val, (void __iomem *)mbox->reg_base +
366 	       (mbox->trigger | (devid << mbox->tr_shift)));
367 }
368 
otx2_mbox_msg_send(struct otx2_mbox * mbox,int devid)369 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
370 {
371 	otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
372 }
373 EXPORT_SYMBOL(otx2_mbox_msg_send);
374 
otx2_mbox_msg_send_up(struct otx2_mbox * mbox,int devid)375 void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
376 {
377 	otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
378 }
379 EXPORT_SYMBOL(otx2_mbox_msg_send_up);
380 
otx2_mbox_wait_for_zero(struct otx2_mbox * mbox,int devid)381 bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
382 {
383 	u64 data;
384 
385 	data = readq((void __iomem *)mbox->reg_base +
386 		     (mbox->trigger | (devid << mbox->tr_shift)));
387 
388 	/* If data is non-zero wait for ~1ms and return to caller
389 	 * whether data has changed to zero or not after the wait.
390 	 */
391 	if (!data)
392 		return true;
393 
394 	usleep_range(950, 1000);
395 
396 	data = readq((void __iomem *)mbox->reg_base +
397 		     (mbox->trigger | (devid << mbox->tr_shift)));
398 
399 	return data == 0;
400 }
401 EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
402 
otx2_mbox_alloc_msg_rsp(struct otx2_mbox * mbox,int devid,int size,int size_rsp)403 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
404 					    int size, int size_rsp)
405 {
406 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
407 	struct mbox_msghdr *msghdr = NULL;
408 	struct mbox_hdr *mboxhdr = NULL;
409 
410 	spin_lock(&mdev->mbox_lock);
411 	size = ALIGN(size, MBOX_MSG_ALIGN);
412 	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
413 	/* Check if there is space in mailbox */
414 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
415 		goto exit;
416 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
417 		goto exit;
418 
419 	if (mdev->msg_size == 0)
420 		mdev->num_msgs = 0;
421 	mdev->num_msgs++;
422 
423 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
424 
425 	/* Clear the whole msg region */
426 	memset(msghdr, 0, size);
427 	/* Init message header with reset values */
428 	msghdr->ver = OTX2_MBOX_VERSION;
429 	mdev->msg_size += size;
430 	mdev->rsp_size += size_rsp;
431 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
432 
433 	mboxhdr = mdev->mbase + mbox->tx_start;
434 	/* Clear the msg header region */
435 	memset(mboxhdr, 0, msgs_offset);
436 
437 exit:
438 	spin_unlock(&mdev->mbox_lock);
439 
440 	return msghdr;
441 }
442 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
443 
otx2_mbox_get_rsp(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * msg)444 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
445 				      struct mbox_msghdr *msg)
446 {
447 	unsigned long imsg = mbox->tx_start + msgs_offset;
448 	unsigned long irsp = mbox->rx_start + msgs_offset;
449 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
450 	u16 msgs;
451 
452 	spin_lock(&mdev->mbox_lock);
453 
454 	if (mdev->num_msgs != mdev->msgs_acked)
455 		goto error;
456 
457 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
458 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
459 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
460 
461 		if (msg == pmsg) {
462 			if (pmsg->id != prsp->id)
463 				goto error;
464 			spin_unlock(&mdev->mbox_lock);
465 			return prsp;
466 		}
467 
468 		imsg = mbox->tx_start + pmsg->next_msgoff;
469 		irsp = mbox->rx_start + prsp->next_msgoff;
470 	}
471 
472 error:
473 	spin_unlock(&mdev->mbox_lock);
474 	return ERR_PTR(-ENODEV);
475 }
476 EXPORT_SYMBOL(otx2_mbox_get_rsp);
477 
otx2_mbox_check_rsp_msgs(struct otx2_mbox * mbox,int devid)478 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
479 {
480 	unsigned long ireq = mbox->tx_start + msgs_offset;
481 	unsigned long irsp = mbox->rx_start + msgs_offset;
482 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
483 	int rc = -ENODEV;
484 	u16 msgs;
485 
486 	spin_lock(&mdev->mbox_lock);
487 
488 	if (mdev->num_msgs != mdev->msgs_acked)
489 		goto exit;
490 
491 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
492 		struct mbox_msghdr *preq = mdev->mbase + ireq;
493 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
494 
495 		if (preq->id != prsp->id) {
496 			trace_otx2_msg_check(mbox->pdev, preq->id,
497 					     prsp->id, prsp->rc);
498 			goto exit;
499 		}
500 		if (prsp->rc) {
501 			rc = prsp->rc;
502 			trace_otx2_msg_check(mbox->pdev, preq->id,
503 					     prsp->id, prsp->rc);
504 			goto exit;
505 		}
506 
507 		ireq = mbox->tx_start + preq->next_msgoff;
508 		irsp = mbox->rx_start + prsp->next_msgoff;
509 	}
510 	rc = 0;
511 exit:
512 	spin_unlock(&mdev->mbox_lock);
513 	return rc;
514 }
515 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
516 
517 int
otx2_reply_invalid_msg(struct otx2_mbox * mbox,int devid,u16 pcifunc,u16 id)518 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
519 {
520 	struct msg_rsp *rsp;
521 
522 	rsp = (struct msg_rsp *)
523 	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
524 	if (!rsp)
525 		return -ENOMEM;
526 	rsp->hdr.id = id;
527 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
528 	rsp->hdr.rc = MBOX_MSG_INVALID;
529 	rsp->hdr.pcifunc = pcifunc;
530 	return 0;
531 }
532 EXPORT_SYMBOL(otx2_reply_invalid_msg);
533 
otx2_mbox_nonempty(struct otx2_mbox * mbox,int devid)534 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
535 {
536 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
537 	bool ret;
538 
539 	spin_lock(&mdev->mbox_lock);
540 	ret = mdev->num_msgs != 0;
541 	spin_unlock(&mdev->mbox_lock);
542 
543 	return ret;
544 }
545 EXPORT_SYMBOL(otx2_mbox_nonempty);
546 
otx2_mbox_id2name(u16 id)547 const char *otx2_mbox_id2name(u16 id)
548 {
549 	switch (id) {
550 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
551 	MBOX_MESSAGES
552 #undef M
553 
554 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
555 	MBOX_UP_CGX_MESSAGES
556 #undef M
557 
558 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
559 	MBOX_UP_CPT_MESSAGES
560 #undef M
561 	default:
562 		return "INVALID ID";
563 	}
564 }
565 EXPORT_SYMBOL(otx2_mbox_id2name);
566 
567 MODULE_AUTHOR("Marvell.");
568 MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
569 MODULE_LICENSE("GPL v2");
570