xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/mbox.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_reg.h"
16 #include "mbox.h"
17 #include "rvu_trace.h"
18 
19 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
20 
21 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
22 {
23 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
24 	struct mbox_hdr *tx_hdr, *rx_hdr;
25 	void *hw_mbase = mdev->hwbase;
26 
27 	tx_hdr = hw_mbase + mbox->tx_start;
28 	rx_hdr = hw_mbase + mbox->rx_start;
29 
30 	mdev->msg_size = 0;
31 	mdev->rsp_size = 0;
32 	tx_hdr->num_msgs = 0;
33 	tx_hdr->msg_size = 0;
34 	rx_hdr->num_msgs = 0;
35 	rx_hdr->msg_size = 0;
36 }
37 EXPORT_SYMBOL(__otx2_mbox_reset);
38 
39 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
40 {
41 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
42 
43 	spin_lock(&mdev->mbox_lock);
44 	__otx2_mbox_reset(mbox, devid);
45 	spin_unlock(&mdev->mbox_lock);
46 }
47 EXPORT_SYMBOL(otx2_mbox_reset);
48 
49 void otx2_mbox_destroy(struct otx2_mbox *mbox)
50 {
51 	mbox->reg_base = NULL;
52 	mbox->hwbase = NULL;
53 
54 	kfree(mbox->dev);
55 	mbox->dev = NULL;
56 }
57 EXPORT_SYMBOL(otx2_mbox_destroy);
58 
59 static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
60 			   void *reg_base, int direction, int ndevs)
61 {
62 	switch (direction) {
63 	case MBOX_DIR_AFPF:
64 	case MBOX_DIR_PFVF:
65 		mbox->tx_start = MBOX_DOWN_TX_START;
66 		mbox->rx_start = MBOX_DOWN_RX_START;
67 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
68 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
69 		break;
70 	case MBOX_DIR_PFAF:
71 	case MBOX_DIR_VFPF:
72 		mbox->tx_start = MBOX_DOWN_RX_START;
73 		mbox->rx_start = MBOX_DOWN_TX_START;
74 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
75 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
76 		break;
77 	case MBOX_DIR_AFPF_UP:
78 	case MBOX_DIR_PFVF_UP:
79 		mbox->tx_start = MBOX_UP_TX_START;
80 		mbox->rx_start = MBOX_UP_RX_START;
81 		mbox->tx_size  = MBOX_UP_TX_SIZE;
82 		mbox->rx_size  = MBOX_UP_RX_SIZE;
83 		break;
84 	case MBOX_DIR_PFAF_UP:
85 	case MBOX_DIR_VFPF_UP:
86 		mbox->tx_start = MBOX_UP_RX_START;
87 		mbox->rx_start = MBOX_UP_TX_START;
88 		mbox->tx_size  = MBOX_UP_RX_SIZE;
89 		mbox->rx_size  = MBOX_UP_TX_SIZE;
90 		break;
91 	default:
92 		return -ENODEV;
93 	}
94 
95 	switch (direction) {
96 	case MBOX_DIR_AFPF:
97 	case MBOX_DIR_AFPF_UP:
98 		mbox->trigger = RVU_AF_AFPF_MBOX0;
99 		mbox->tr_shift = 4;
100 		break;
101 	case MBOX_DIR_PFAF:
102 	case MBOX_DIR_PFAF_UP:
103 		mbox->trigger = RVU_PF_PFAF_MBOX1;
104 		mbox->tr_shift = 0;
105 		break;
106 	case MBOX_DIR_PFVF:
107 	case MBOX_DIR_PFVF_UP:
108 		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
109 		mbox->tr_shift = 12;
110 		break;
111 	case MBOX_DIR_VFPF:
112 	case MBOX_DIR_VFPF_UP:
113 		mbox->trigger = RVU_VF_VFPF_MBOX1;
114 		mbox->tr_shift = 0;
115 		break;
116 	default:
117 		return -ENODEV;
118 	}
119 
120 	mbox->reg_base = reg_base;
121 	mbox->pdev = pdev;
122 
123 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
124 	if (!mbox->dev) {
125 		otx2_mbox_destroy(mbox);
126 		return -ENOMEM;
127 	}
128 	mbox->ndevs = ndevs;
129 
130 	return 0;
131 }
132 
133 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
134 		   void *reg_base, int direction, int ndevs)
135 {
136 	struct otx2_mbox_dev *mdev;
137 	int devid, err;
138 
139 	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
140 	if (err)
141 		return err;
142 
143 	mbox->hwbase = hwbase;
144 
145 	for (devid = 0; devid < ndevs; devid++) {
146 		mdev = &mbox->dev[devid];
147 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
148 		mdev->hwbase = mdev->mbase;
149 		spin_lock_init(&mdev->mbox_lock);
150 		/* Init header to reset value */
151 		otx2_mbox_reset(mbox, devid);
152 	}
153 
154 	return 0;
155 }
156 EXPORT_SYMBOL(otx2_mbox_init);
157 
158 /* Initialize mailbox with the set of mailbox region addresses
159  * in the array hwbase.
160  */
161 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
162 			   struct pci_dev *pdev, void *reg_base,
163 			   int direction, int ndevs)
164 {
165 	struct otx2_mbox_dev *mdev;
166 	int devid, err;
167 
168 	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
169 	if (err)
170 		return err;
171 
172 	mbox->hwbase = hwbase[0];
173 
174 	for (devid = 0; devid < ndevs; devid++) {
175 		mdev = &mbox->dev[devid];
176 		mdev->mbase = hwbase[devid];
177 		mdev->hwbase = hwbase[devid];
178 		spin_lock_init(&mdev->mbox_lock);
179 		/* Init header to reset value */
180 		otx2_mbox_reset(mbox, devid);
181 	}
182 
183 	return 0;
184 }
185 EXPORT_SYMBOL(otx2_mbox_regions_init);
186 
187 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
188 {
189 	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
190 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
191 	struct device *sender = &mbox->pdev->dev;
192 
193 	while (!time_after(jiffies, timeout)) {
194 		if (mdev->num_msgs == mdev->msgs_acked)
195 			return 0;
196 		usleep_range(800, 1000);
197 	}
198 	dev_dbg(sender, "timed out while waiting for rsp\n");
199 	return -EIO;
200 }
201 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
202 
203 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
204 {
205 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
206 	unsigned long timeout = jiffies + 1 * HZ;
207 
208 	while (!time_after(jiffies, timeout)) {
209 		if (mdev->num_msgs == mdev->msgs_acked)
210 			return 0;
211 		cpu_relax();
212 	}
213 	return -EIO;
214 }
215 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
216 
217 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
218 {
219 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
220 	struct mbox_hdr *tx_hdr, *rx_hdr;
221 	void *hw_mbase = mdev->hwbase;
222 
223 	tx_hdr = hw_mbase + mbox->tx_start;
224 	rx_hdr = hw_mbase + mbox->rx_start;
225 
226 	/* If bounce buffer is implemented copy mbox messages from
227 	 * bounce buffer to hw mbox memory.
228 	 */
229 	if (mdev->mbase != hw_mbase)
230 		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
231 		       mdev->mbase + mbox->tx_start + msgs_offset,
232 		       mdev->msg_size);
233 
234 	spin_lock(&mdev->mbox_lock);
235 
236 	tx_hdr->msg_size = mdev->msg_size;
237 
238 	/* Reset header for next messages */
239 	mdev->msg_size = 0;
240 	mdev->rsp_size = 0;
241 	mdev->msgs_acked = 0;
242 
243 	/* Sync mbox data into memory */
244 	smp_wmb();
245 
246 	/* num_msgs != 0 signals to the peer that the buffer has a number of
247 	 * messages.  So this should be written after writing all the messages
248 	 * to the shared memory.
249 	 */
250 	tx_hdr->num_msgs = mdev->num_msgs;
251 	rx_hdr->num_msgs = 0;
252 
253 	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
254 
255 	spin_unlock(&mdev->mbox_lock);
256 
257 	/* The interrupt should be fired after num_msgs is written
258 	 * to the shared memory
259 	 */
260 	writeq(1, (void __iomem *)mbox->reg_base +
261 	       (mbox->trigger | (devid << mbox->tr_shift)));
262 }
263 EXPORT_SYMBOL(otx2_mbox_msg_send);
264 
265 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
266 					    int size, int size_rsp)
267 {
268 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
269 	struct mbox_msghdr *msghdr = NULL;
270 
271 	spin_lock(&mdev->mbox_lock);
272 	size = ALIGN(size, MBOX_MSG_ALIGN);
273 	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
274 	/* Check if there is space in mailbox */
275 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
276 		goto exit;
277 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
278 		goto exit;
279 
280 	if (mdev->msg_size == 0)
281 		mdev->num_msgs = 0;
282 	mdev->num_msgs++;
283 
284 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
285 
286 	/* Clear the whole msg region */
287 	memset(msghdr, 0, size);
288 	/* Init message header with reset values */
289 	msghdr->ver = OTX2_MBOX_VERSION;
290 	mdev->msg_size += size;
291 	mdev->rsp_size += size_rsp;
292 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
293 exit:
294 	spin_unlock(&mdev->mbox_lock);
295 
296 	return msghdr;
297 }
298 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
299 
300 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
301 				      struct mbox_msghdr *msg)
302 {
303 	unsigned long imsg = mbox->tx_start + msgs_offset;
304 	unsigned long irsp = mbox->rx_start + msgs_offset;
305 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
306 	u16 msgs;
307 
308 	spin_lock(&mdev->mbox_lock);
309 
310 	if (mdev->num_msgs != mdev->msgs_acked)
311 		goto error;
312 
313 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
314 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
315 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
316 
317 		if (msg == pmsg) {
318 			if (pmsg->id != prsp->id)
319 				goto error;
320 			spin_unlock(&mdev->mbox_lock);
321 			return prsp;
322 		}
323 
324 		imsg = mbox->tx_start + pmsg->next_msgoff;
325 		irsp = mbox->rx_start + prsp->next_msgoff;
326 	}
327 
328 error:
329 	spin_unlock(&mdev->mbox_lock);
330 	return ERR_PTR(-ENODEV);
331 }
332 EXPORT_SYMBOL(otx2_mbox_get_rsp);
333 
334 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
335 {
336 	unsigned long ireq = mbox->tx_start + msgs_offset;
337 	unsigned long irsp = mbox->rx_start + msgs_offset;
338 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
339 	int rc = -ENODEV;
340 	u16 msgs;
341 
342 	spin_lock(&mdev->mbox_lock);
343 
344 	if (mdev->num_msgs != mdev->msgs_acked)
345 		goto exit;
346 
347 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
348 		struct mbox_msghdr *preq = mdev->mbase + ireq;
349 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
350 
351 		if (preq->id != prsp->id) {
352 			trace_otx2_msg_check(mbox->pdev, preq->id,
353 					     prsp->id, prsp->rc);
354 			goto exit;
355 		}
356 		if (prsp->rc) {
357 			rc = prsp->rc;
358 			trace_otx2_msg_check(mbox->pdev, preq->id,
359 					     prsp->id, prsp->rc);
360 			goto exit;
361 		}
362 
363 		ireq = mbox->tx_start + preq->next_msgoff;
364 		irsp = mbox->rx_start + prsp->next_msgoff;
365 	}
366 	rc = 0;
367 exit:
368 	spin_unlock(&mdev->mbox_lock);
369 	return rc;
370 }
371 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
372 
373 int
374 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
375 {
376 	struct msg_rsp *rsp;
377 
378 	rsp = (struct msg_rsp *)
379 	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
380 	if (!rsp)
381 		return -ENOMEM;
382 	rsp->hdr.id = id;
383 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
384 	rsp->hdr.rc = MBOX_MSG_INVALID;
385 	rsp->hdr.pcifunc = pcifunc;
386 	return 0;
387 }
388 EXPORT_SYMBOL(otx2_reply_invalid_msg);
389 
390 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
391 {
392 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
393 	bool ret;
394 
395 	spin_lock(&mdev->mbox_lock);
396 	ret = mdev->num_msgs != 0;
397 	spin_unlock(&mdev->mbox_lock);
398 
399 	return ret;
400 }
401 EXPORT_SYMBOL(otx2_mbox_nonempty);
402 
403 const char *otx2_mbox_id2name(u16 id)
404 {
405 	switch (id) {
406 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
407 	MBOX_MESSAGES
408 #undef M
409 	default:
410 		return "INVALID ID";
411 	}
412 }
413 EXPORT_SYMBOL(otx2_mbox_id2name);
414 
415 MODULE_AUTHOR("Marvell International Ltd.");
416 MODULE_LICENSE("GPL v2");
417