xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/mbox.c (revision e47a324d6f07c9ef252cfce1f14cfa5110cbed99)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 
12 #include "rvu_reg.h"
13 #include "mbox.h"
14 #include "rvu_trace.h"
15 
16 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
17 
18 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
19 {
20 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
21 	struct mbox_hdr *tx_hdr, *rx_hdr;
22 	void *hw_mbase = mdev->hwbase;
23 
24 	tx_hdr = hw_mbase + mbox->tx_start;
25 	rx_hdr = hw_mbase + mbox->rx_start;
26 
27 	mdev->msg_size = 0;
28 	mdev->rsp_size = 0;
29 	tx_hdr->num_msgs = 0;
30 	tx_hdr->msg_size = 0;
31 	rx_hdr->num_msgs = 0;
32 	rx_hdr->msg_size = 0;
33 }
34 EXPORT_SYMBOL(__otx2_mbox_reset);
35 
36 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
37 {
38 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
39 
40 	spin_lock(&mdev->mbox_lock);
41 	__otx2_mbox_reset(mbox, devid);
42 	spin_unlock(&mdev->mbox_lock);
43 }
44 EXPORT_SYMBOL(otx2_mbox_reset);
45 
46 void otx2_mbox_destroy(struct otx2_mbox *mbox)
47 {
48 	mbox->reg_base = NULL;
49 	mbox->hwbase = NULL;
50 
51 	kfree(mbox->dev);
52 	mbox->dev = NULL;
53 }
54 EXPORT_SYMBOL(otx2_mbox_destroy);
55 
56 static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
57 			   void *reg_base, int direction, int ndevs)
58 {
59 	switch (direction) {
60 	case MBOX_DIR_AFPF:
61 	case MBOX_DIR_PFVF:
62 		mbox->tx_start = MBOX_DOWN_TX_START;
63 		mbox->rx_start = MBOX_DOWN_RX_START;
64 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
65 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
66 		break;
67 	case MBOX_DIR_PFAF:
68 	case MBOX_DIR_VFPF:
69 		mbox->tx_start = MBOX_DOWN_RX_START;
70 		mbox->rx_start = MBOX_DOWN_TX_START;
71 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
72 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
73 		break;
74 	case MBOX_DIR_AFPF_UP:
75 	case MBOX_DIR_PFVF_UP:
76 		mbox->tx_start = MBOX_UP_TX_START;
77 		mbox->rx_start = MBOX_UP_RX_START;
78 		mbox->tx_size  = MBOX_UP_TX_SIZE;
79 		mbox->rx_size  = MBOX_UP_RX_SIZE;
80 		break;
81 	case MBOX_DIR_PFAF_UP:
82 	case MBOX_DIR_VFPF_UP:
83 		mbox->tx_start = MBOX_UP_RX_START;
84 		mbox->rx_start = MBOX_UP_TX_START;
85 		mbox->tx_size  = MBOX_UP_RX_SIZE;
86 		mbox->rx_size  = MBOX_UP_TX_SIZE;
87 		break;
88 	default:
89 		return -ENODEV;
90 	}
91 
92 	switch (direction) {
93 	case MBOX_DIR_AFPF:
94 	case MBOX_DIR_AFPF_UP:
95 		mbox->trigger = RVU_AF_AFPF_MBOX0;
96 		mbox->tr_shift = 4;
97 		break;
98 	case MBOX_DIR_PFAF:
99 	case MBOX_DIR_PFAF_UP:
100 		mbox->trigger = RVU_PF_PFAF_MBOX1;
101 		mbox->tr_shift = 0;
102 		break;
103 	case MBOX_DIR_PFVF:
104 	case MBOX_DIR_PFVF_UP:
105 		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
106 		mbox->tr_shift = 12;
107 		break;
108 	case MBOX_DIR_VFPF:
109 	case MBOX_DIR_VFPF_UP:
110 		mbox->trigger = RVU_VF_VFPF_MBOX1;
111 		mbox->tr_shift = 0;
112 		break;
113 	default:
114 		return -ENODEV;
115 	}
116 
117 	mbox->reg_base = reg_base;
118 	mbox->pdev = pdev;
119 
120 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
121 	if (!mbox->dev) {
122 		otx2_mbox_destroy(mbox);
123 		return -ENOMEM;
124 	}
125 	mbox->ndevs = ndevs;
126 
127 	return 0;
128 }
129 
130 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
131 		   void *reg_base, int direction, int ndevs)
132 {
133 	struct otx2_mbox_dev *mdev;
134 	int devid, err;
135 
136 	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
137 	if (err)
138 		return err;
139 
140 	mbox->hwbase = hwbase;
141 
142 	for (devid = 0; devid < ndevs; devid++) {
143 		mdev = &mbox->dev[devid];
144 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
145 		mdev->hwbase = mdev->mbase;
146 		spin_lock_init(&mdev->mbox_lock);
147 		/* Init header to reset value */
148 		otx2_mbox_reset(mbox, devid);
149 	}
150 
151 	return 0;
152 }
153 EXPORT_SYMBOL(otx2_mbox_init);
154 
155 /* Initialize mailbox with the set of mailbox region addresses
156  * in the array hwbase.
157  */
158 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
159 			   struct pci_dev *pdev, void *reg_base,
160 			   int direction, int ndevs, unsigned long *pf_bmap)
161 {
162 	struct otx2_mbox_dev *mdev;
163 	int devid, err;
164 
165 	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
166 	if (err)
167 		return err;
168 
169 	mbox->hwbase = hwbase[0];
170 
171 	for (devid = 0; devid < ndevs; devid++) {
172 		if (!test_bit(devid, pf_bmap))
173 			continue;
174 
175 		mdev = &mbox->dev[devid];
176 		mdev->mbase = hwbase[devid];
177 		mdev->hwbase = hwbase[devid];
178 		spin_lock_init(&mdev->mbox_lock);
179 		/* Init header to reset value */
180 		otx2_mbox_reset(mbox, devid);
181 	}
182 
183 	return 0;
184 }
185 EXPORT_SYMBOL(otx2_mbox_regions_init);
186 
187 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
188 {
189 	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
190 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
191 
192 	while (!time_after(jiffies, timeout)) {
193 		if (mdev->num_msgs == mdev->msgs_acked)
194 			return 0;
195 		usleep_range(800, 1000);
196 	}
197 	trace_otx2_msg_wait_rsp(mbox->pdev);
198 	return -EIO;
199 }
200 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
201 
202 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
203 {
204 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
205 	unsigned long timeout = jiffies + 1 * HZ;
206 
207 	while (!time_after(jiffies, timeout)) {
208 		if (mdev->num_msgs == mdev->msgs_acked)
209 			return 0;
210 		cpu_relax();
211 	}
212 	return -EIO;
213 }
214 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
215 
216 static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
217 {
218 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
219 	struct mbox_hdr *tx_hdr, *rx_hdr;
220 	void *hw_mbase = mdev->hwbase;
221 	struct mbox_msghdr *msg;
222 	u64 intr_val;
223 
224 	tx_hdr = hw_mbase + mbox->tx_start;
225 	rx_hdr = hw_mbase + mbox->rx_start;
226 
227 	/* If bounce buffer is implemented copy mbox messages from
228 	 * bounce buffer to hw mbox memory.
229 	 */
230 	if (mdev->mbase != hw_mbase)
231 		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
232 		       mdev->mbase + mbox->tx_start + msgs_offset,
233 		       mdev->msg_size);
234 
235 	spin_lock(&mdev->mbox_lock);
236 
237 	tx_hdr->msg_size = mdev->msg_size;
238 
239 	/* Reset header for next messages */
240 	mdev->msg_size = 0;
241 	mdev->rsp_size = 0;
242 	mdev->msgs_acked = 0;
243 
244 	/* Sync mbox data into memory */
245 	smp_wmb();
246 
247 	/* num_msgs != 0 signals to the peer that the buffer has a number of
248 	 * messages.  So this should be written after writing all the messages
249 	 * to the shared memory.
250 	 */
251 	tx_hdr->num_msgs = mdev->num_msgs;
252 	rx_hdr->num_msgs = 0;
253 
254 	msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
255 
256 	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
257 			    msg->id, msg->pcifunc);
258 
259 	spin_unlock(&mdev->mbox_lock);
260 
261 	/* Check if interrupt pending */
262 	intr_val = readq((void __iomem *)mbox->reg_base +
263 		     (mbox->trigger | (devid << mbox->tr_shift)));
264 
265 	intr_val |= data;
266 	/* The interrupt should be fired after num_msgs is written
267 	 * to the shared memory
268 	 */
269 	writeq(intr_val, (void __iomem *)mbox->reg_base +
270 	       (mbox->trigger | (devid << mbox->tr_shift)));
271 }
272 
273 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
274 {
275 	otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
276 }
277 EXPORT_SYMBOL(otx2_mbox_msg_send);
278 
279 void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
280 {
281 	otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
282 }
283 EXPORT_SYMBOL(otx2_mbox_msg_send_up);
284 
285 bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
286 {
287 	u64 data;
288 
289 	data = readq((void __iomem *)mbox->reg_base +
290 		     (mbox->trigger | (devid << mbox->tr_shift)));
291 
292 	/* If data is non-zero wait for ~1ms and return to caller
293 	 * whether data has changed to zero or not after the wait.
294 	 */
295 	if (!data)
296 		return true;
297 
298 	usleep_range(950, 1000);
299 
300 	data = readq((void __iomem *)mbox->reg_base +
301 		     (mbox->trigger | (devid << mbox->tr_shift)));
302 
303 	return data == 0;
304 }
305 EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
306 
307 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
308 					    int size, int size_rsp)
309 {
310 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
311 	struct mbox_msghdr *msghdr = NULL;
312 
313 	spin_lock(&mdev->mbox_lock);
314 	size = ALIGN(size, MBOX_MSG_ALIGN);
315 	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
316 	/* Check if there is space in mailbox */
317 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
318 		goto exit;
319 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
320 		goto exit;
321 
322 	if (mdev->msg_size == 0)
323 		mdev->num_msgs = 0;
324 	mdev->num_msgs++;
325 
326 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
327 
328 	/* Clear the whole msg region */
329 	memset(msghdr, 0, size);
330 	/* Init message header with reset values */
331 	msghdr->ver = OTX2_MBOX_VERSION;
332 	mdev->msg_size += size;
333 	mdev->rsp_size += size_rsp;
334 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
335 exit:
336 	spin_unlock(&mdev->mbox_lock);
337 
338 	return msghdr;
339 }
340 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
341 
342 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
343 				      struct mbox_msghdr *msg)
344 {
345 	unsigned long imsg = mbox->tx_start + msgs_offset;
346 	unsigned long irsp = mbox->rx_start + msgs_offset;
347 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
348 	u16 msgs;
349 
350 	spin_lock(&mdev->mbox_lock);
351 
352 	if (mdev->num_msgs != mdev->msgs_acked)
353 		goto error;
354 
355 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
356 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
357 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
358 
359 		if (msg == pmsg) {
360 			if (pmsg->id != prsp->id)
361 				goto error;
362 			spin_unlock(&mdev->mbox_lock);
363 			return prsp;
364 		}
365 
366 		imsg = mbox->tx_start + pmsg->next_msgoff;
367 		irsp = mbox->rx_start + prsp->next_msgoff;
368 	}
369 
370 error:
371 	spin_unlock(&mdev->mbox_lock);
372 	return ERR_PTR(-ENODEV);
373 }
374 EXPORT_SYMBOL(otx2_mbox_get_rsp);
375 
376 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
377 {
378 	unsigned long ireq = mbox->tx_start + msgs_offset;
379 	unsigned long irsp = mbox->rx_start + msgs_offset;
380 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
381 	int rc = -ENODEV;
382 	u16 msgs;
383 
384 	spin_lock(&mdev->mbox_lock);
385 
386 	if (mdev->num_msgs != mdev->msgs_acked)
387 		goto exit;
388 
389 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
390 		struct mbox_msghdr *preq = mdev->mbase + ireq;
391 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
392 
393 		if (preq->id != prsp->id) {
394 			trace_otx2_msg_check(mbox->pdev, preq->id,
395 					     prsp->id, prsp->rc);
396 			goto exit;
397 		}
398 		if (prsp->rc) {
399 			rc = prsp->rc;
400 			trace_otx2_msg_check(mbox->pdev, preq->id,
401 					     prsp->id, prsp->rc);
402 			goto exit;
403 		}
404 
405 		ireq = mbox->tx_start + preq->next_msgoff;
406 		irsp = mbox->rx_start + prsp->next_msgoff;
407 	}
408 	rc = 0;
409 exit:
410 	spin_unlock(&mdev->mbox_lock);
411 	return rc;
412 }
413 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
414 
415 int
416 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
417 {
418 	struct msg_rsp *rsp;
419 
420 	rsp = (struct msg_rsp *)
421 	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
422 	if (!rsp)
423 		return -ENOMEM;
424 	rsp->hdr.id = id;
425 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
426 	rsp->hdr.rc = MBOX_MSG_INVALID;
427 	rsp->hdr.pcifunc = pcifunc;
428 	return 0;
429 }
430 EXPORT_SYMBOL(otx2_reply_invalid_msg);
431 
432 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
433 {
434 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
435 	bool ret;
436 
437 	spin_lock(&mdev->mbox_lock);
438 	ret = mdev->num_msgs != 0;
439 	spin_unlock(&mdev->mbox_lock);
440 
441 	return ret;
442 }
443 EXPORT_SYMBOL(otx2_mbox_nonempty);
444 
445 const char *otx2_mbox_id2name(u16 id)
446 {
447 	switch (id) {
448 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
449 	MBOX_MESSAGES
450 #undef M
451 
452 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
453 	MBOX_UP_CGX_MESSAGES
454 #undef M
455 
456 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
457 	MBOX_UP_CPT_MESSAGES
458 #undef M
459 	default:
460 		return "INVALID ID";
461 	}
462 }
463 EXPORT_SYMBOL(otx2_mbox_id2name);
464 
465 MODULE_AUTHOR("Marvell.");
466 MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
467 MODULE_LICENSE("GPL v2");
468