xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/etherdevice.h>
12 #include <linux/of.h>
13 #include <linux/if_vlan.h>
14 #include <linux/iommu.h>
15 #include <net/ip.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <linux/bitfield.h>
19 #include <net/page_pool/types.h>
20 
21 #include "otx2_reg.h"
22 #include "otx2_common.h"
23 #include "otx2_txrx.h"
24 #include "otx2_struct.h"
25 #include "otx2_ptp.h"
26 #include "cn10k.h"
27 #include "qos.h"
28 #include <rvu_trace.h>
29 
30 #define DRV_NAME	"rvu_nicpf"
31 #define DRV_STRING	"Marvell RVU NIC Physical Function Driver"
32 
33 /* Supported devices */
34 static const struct pci_device_id otx2_pf_id_table[] = {
35 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
36 	{ 0, }  /* end of table */
37 };
38 
39 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
40 MODULE_DESCRIPTION(DRV_STRING);
41 MODULE_LICENSE("GPL v2");
42 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
43 
44 static void otx2_vf_link_event_task(struct work_struct *work);
45 
46 enum {
47 	TYPE_PFAF,
48 	TYPE_PFVF,
49 };
50 
51 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
52 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
53 
otx2_change_mtu(struct net_device * netdev,int new_mtu)54 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
55 {
56 	struct otx2_nic *pf = netdev_priv(netdev);
57 	bool if_up = netif_running(netdev);
58 	int err = 0;
59 
60 	if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
61 		netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
62 			    netdev->mtu);
63 		return -EINVAL;
64 	}
65 	if (if_up)
66 		otx2_stop(netdev);
67 
68 	netdev_info(netdev, "Changing MTU from %d to %d\n",
69 		    netdev->mtu, new_mtu);
70 	WRITE_ONCE(netdev->mtu, new_mtu);
71 
72 	if (if_up)
73 		err = otx2_open(netdev);
74 
75 	return err;
76 }
77 
otx2_disable_flr_me_intr(struct otx2_nic * pf)78 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
79 {
80 	int irq, vfs = pf->total_vfs;
81 
82 	/* Disable VFs ME interrupts */
83 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
84 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
85 	free_irq(irq, pf);
86 
87 	/* Disable VFs FLR interrupts */
88 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
89 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
90 	free_irq(irq, pf);
91 
92 	if (vfs <= 64)
93 		return;
94 
95 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
96 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
97 	free_irq(irq, pf);
98 
99 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
100 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
101 	free_irq(irq, pf);
102 }
103 
otx2_flr_wq_destroy(struct otx2_nic * pf)104 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
105 {
106 	if (!pf->flr_wq)
107 		return;
108 	destroy_workqueue(pf->flr_wq);
109 	pf->flr_wq = NULL;
110 	devm_kfree(pf->dev, pf->flr_wrk);
111 }
112 
otx2_flr_handler(struct work_struct * work)113 static void otx2_flr_handler(struct work_struct *work)
114 {
115 	struct flr_work *flrwork = container_of(work, struct flr_work, work);
116 	struct otx2_nic *pf = flrwork->pf;
117 	struct mbox *mbox = &pf->mbox;
118 	struct msg_req *req;
119 	int vf, reg = 0;
120 
121 	vf = flrwork - pf->flr_wrk;
122 
123 	mutex_lock(&mbox->lock);
124 	req = otx2_mbox_alloc_msg_vf_flr(mbox);
125 	if (!req) {
126 		mutex_unlock(&mbox->lock);
127 		return;
128 	}
129 	req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
130 	req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
131 
132 	if (!otx2_sync_mbox_msg(&pf->mbox)) {
133 		if (vf >= 64) {
134 			reg = 1;
135 			vf = vf - 64;
136 		}
137 		/* clear transcation pending bit */
138 		otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
139 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
140 	}
141 
142 	mutex_unlock(&mbox->lock);
143 }
144 
otx2_pf_flr_intr_handler(int irq,void * pf_irq)145 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
146 {
147 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
148 	int reg, dev, vf, start_vf, num_reg = 1;
149 	u64 intr;
150 
151 	if (pf->total_vfs > 64)
152 		num_reg = 2;
153 
154 	for (reg = 0; reg < num_reg; reg++) {
155 		intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
156 		if (!intr)
157 			continue;
158 		start_vf = 64 * reg;
159 		for (vf = 0; vf < 64; vf++) {
160 			if (!(intr & BIT_ULL(vf)))
161 				continue;
162 			dev = vf + start_vf;
163 			queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
164 			/* Clear interrupt */
165 			otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
166 			/* Disable the interrupt */
167 			otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
168 				     BIT_ULL(vf));
169 		}
170 	}
171 	return IRQ_HANDLED;
172 }
173 
otx2_pf_me_intr_handler(int irq,void * pf_irq)174 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
175 {
176 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
177 	int vf, reg, num_reg = 1;
178 	u64 intr;
179 
180 	if (pf->total_vfs > 64)
181 		num_reg = 2;
182 
183 	for (reg = 0; reg < num_reg; reg++) {
184 		intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
185 		if (!intr)
186 			continue;
187 		for (vf = 0; vf < 64; vf++) {
188 			if (!(intr & BIT_ULL(vf)))
189 				continue;
190 			/* clear trpend bit */
191 			otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
192 			/* clear interrupt */
193 			otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
194 		}
195 	}
196 	return IRQ_HANDLED;
197 }
198 
otx2_register_flr_me_intr(struct otx2_nic * pf,int numvfs)199 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
200 {
201 	struct otx2_hw *hw = &pf->hw;
202 	char *irq_name;
203 	int ret;
204 
205 	/* Register ME interrupt handler*/
206 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
207 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
208 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
209 			  otx2_pf_me_intr_handler, 0, irq_name, pf);
210 	if (ret) {
211 		dev_err(pf->dev,
212 			"RVUPF: IRQ registration failed for ME0\n");
213 	}
214 
215 	/* Register FLR interrupt handler */
216 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
217 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
218 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
219 			  otx2_pf_flr_intr_handler, 0, irq_name, pf);
220 	if (ret) {
221 		dev_err(pf->dev,
222 			"RVUPF: IRQ registration failed for FLR0\n");
223 		return ret;
224 	}
225 
226 	if (numvfs > 64) {
227 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
228 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
229 			 rvu_get_pf(pf->pcifunc));
230 		ret = request_irq(pci_irq_vector
231 				  (pf->pdev, RVU_PF_INT_VEC_VFME1),
232 				  otx2_pf_me_intr_handler, 0, irq_name, pf);
233 		if (ret) {
234 			dev_err(pf->dev,
235 				"RVUPF: IRQ registration failed for ME1\n");
236 		}
237 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
238 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
239 			 rvu_get_pf(pf->pcifunc));
240 		ret = request_irq(pci_irq_vector
241 				  (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
242 				  otx2_pf_flr_intr_handler, 0, irq_name, pf);
243 		if (ret) {
244 			dev_err(pf->dev,
245 				"RVUPF: IRQ registration failed for FLR1\n");
246 			return ret;
247 		}
248 	}
249 
250 	/* Enable ME interrupt for all VFs*/
251 	otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
252 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
253 
254 	/* Enable FLR interrupt for all VFs*/
255 	otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
256 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
257 
258 	if (numvfs > 64) {
259 		numvfs -= 64;
260 
261 		otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
262 		otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
263 			     INTR_MASK(numvfs));
264 
265 		otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
266 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
267 			     INTR_MASK(numvfs));
268 	}
269 	return 0;
270 }
271 
otx2_pf_flr_init(struct otx2_nic * pf,int num_vfs)272 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
273 {
274 	int vf;
275 
276 	pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
277 	if (!pf->flr_wq)
278 		return -ENOMEM;
279 
280 	pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
281 				   sizeof(struct flr_work), GFP_KERNEL);
282 	if (!pf->flr_wrk) {
283 		destroy_workqueue(pf->flr_wq);
284 		return -ENOMEM;
285 	}
286 
287 	for (vf = 0; vf < num_vfs; vf++) {
288 		pf->flr_wrk[vf].pf = pf;
289 		INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
290 	}
291 
292 	return 0;
293 }
294 
otx2_queue_vf_work(struct mbox * mw,struct workqueue_struct * mbox_wq,int first,int mdevs,u64 intr)295 static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
296 			       int first, int mdevs, u64 intr)
297 {
298 	struct otx2_mbox_dev *mdev;
299 	struct otx2_mbox *mbox;
300 	struct mbox_hdr *hdr;
301 	int i;
302 
303 	for (i = first; i < mdevs; i++) {
304 		/* start from 0 */
305 		if (!(intr & BIT_ULL(i - first)))
306 			continue;
307 
308 		mbox = &mw->mbox;
309 		mdev = &mbox->dev[i];
310 		hdr = mdev->mbase + mbox->rx_start;
311 		/* The hdr->num_msgs is set to zero immediately in the interrupt
312 		 * handler to ensure that it holds a correct value next time
313 		 * when the interrupt handler is called. pf->mw[i].num_msgs
314 		 * holds the data for use in otx2_pfvf_mbox_handler and
315 		 * pf->mw[i].up_num_msgs holds the data for use in
316 		 * otx2_pfvf_mbox_up_handler.
317 		 */
318 		if (hdr->num_msgs) {
319 			mw[i].num_msgs = hdr->num_msgs;
320 			hdr->num_msgs = 0;
321 			queue_work(mbox_wq, &mw[i].mbox_wrk);
322 		}
323 
324 		mbox = &mw->mbox_up;
325 		mdev = &mbox->dev[i];
326 		hdr = mdev->mbase + mbox->rx_start;
327 		if (hdr->num_msgs) {
328 			mw[i].up_num_msgs = hdr->num_msgs;
329 			hdr->num_msgs = 0;
330 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
331 		}
332 	}
333 }
334 
otx2_forward_msg_pfvf(struct otx2_mbox_dev * mdev,struct otx2_mbox * pfvf_mbox,void * bbuf_base,int devid)335 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
336 				  struct otx2_mbox *pfvf_mbox, void *bbuf_base,
337 				  int devid)
338 {
339 	struct otx2_mbox_dev *src_mdev = mdev;
340 	int offset;
341 
342 	/* Msgs are already copied, trigger VF's mbox irq */
343 	smp_wmb();
344 
345 	otx2_mbox_wait_for_zero(pfvf_mbox, devid);
346 
347 	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
348 	writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
349 
350 	/* Restore VF's mbox bounce buffer region address */
351 	src_mdev->mbase = bbuf_base;
352 }
353 
otx2_forward_vf_mbox_msgs(struct otx2_nic * pf,struct otx2_mbox * src_mbox,int dir,int vf,int num_msgs)354 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
355 				     struct otx2_mbox *src_mbox,
356 				     int dir, int vf, int num_msgs)
357 {
358 	struct otx2_mbox_dev *src_mdev, *dst_mdev;
359 	struct mbox_hdr *mbox_hdr;
360 	struct mbox_hdr *req_hdr;
361 	struct mbox *dst_mbox;
362 	int dst_size, err;
363 
364 	if (dir == MBOX_DIR_PFAF) {
365 		/* Set VF's mailbox memory as PF's bounce buffer memory, so
366 		 * that explicit copying of VF's msgs to PF=>AF mbox region
367 		 * and AF=>PF responses to VF's mbox region can be avoided.
368 		 */
369 		src_mdev = &src_mbox->dev[vf];
370 		mbox_hdr = src_mbox->hwbase +
371 				src_mbox->rx_start + (vf * MBOX_SIZE);
372 
373 		dst_mbox = &pf->mbox;
374 		dst_size = dst_mbox->mbox.tx_size -
375 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
376 		/* Check if msgs fit into destination area and has valid size */
377 		if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
378 			return -EINVAL;
379 
380 		dst_mdev = &dst_mbox->mbox.dev[0];
381 
382 		mutex_lock(&pf->mbox.lock);
383 		dst_mdev->mbase = src_mdev->mbase;
384 		dst_mdev->msg_size = mbox_hdr->msg_size;
385 		dst_mdev->num_msgs = num_msgs;
386 		err = otx2_sync_mbox_msg(dst_mbox);
387 		/* Error code -EIO indicate there is a communication failure
388 		 * to the AF. Rest of the error codes indicate that AF processed
389 		 * VF messages and set the error codes in response messages
390 		 * (if any) so simply forward responses to VF.
391 		 */
392 		if (err == -EIO) {
393 			dev_warn(pf->dev,
394 				 "AF not responding to VF%d messages\n", vf);
395 			/* restore PF mbase and exit */
396 			dst_mdev->mbase = pf->mbox.bbuf_base;
397 			mutex_unlock(&pf->mbox.lock);
398 			return err;
399 		}
400 		/* At this point, all the VF messages sent to AF are acked
401 		 * with proper responses and responses are copied to VF
402 		 * mailbox hence raise interrupt to VF.
403 		 */
404 		req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
405 					      dst_mbox->mbox.rx_start);
406 		req_hdr->num_msgs = num_msgs;
407 
408 		otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
409 				      pf->mbox.bbuf_base, vf);
410 		mutex_unlock(&pf->mbox.lock);
411 	} else if (dir == MBOX_DIR_PFVF_UP) {
412 		src_mdev = &src_mbox->dev[0];
413 		mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
414 		req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
415 					      src_mbox->rx_start);
416 		req_hdr->num_msgs = num_msgs;
417 
418 		dst_mbox = &pf->mbox_pfvf[0];
419 		dst_size = dst_mbox->mbox_up.tx_size -
420 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
421 		/* Check if msgs fit into destination area */
422 		if (mbox_hdr->msg_size > dst_size)
423 			return -EINVAL;
424 
425 		dst_mdev = &dst_mbox->mbox_up.dev[vf];
426 		dst_mdev->mbase = src_mdev->mbase;
427 		dst_mdev->msg_size = mbox_hdr->msg_size;
428 		dst_mdev->num_msgs = mbox_hdr->num_msgs;
429 		err = otx2_sync_mbox_up_msg(dst_mbox, vf);
430 		if (err) {
431 			dev_warn(pf->dev,
432 				 "VF%d is not responding to mailbox\n", vf);
433 			return err;
434 		}
435 	} else if (dir == MBOX_DIR_VFPF_UP) {
436 		req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
437 					      src_mbox->rx_start);
438 		req_hdr->num_msgs = num_msgs;
439 		otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
440 				      &pf->mbox.mbox_up,
441 				      pf->mbox_pfvf[vf].bbuf_base,
442 				      0);
443 	}
444 
445 	return 0;
446 }
447 
otx2_pfvf_mbox_handler(struct work_struct * work)448 static void otx2_pfvf_mbox_handler(struct work_struct *work)
449 {
450 	struct mbox_msghdr *msg = NULL;
451 	int offset, vf_idx, id, err;
452 	struct otx2_mbox_dev *mdev;
453 	struct otx2_mbox *mbox;
454 	struct mbox *vf_mbox;
455 	struct otx2_nic *pf;
456 
457 	vf_mbox = container_of(work, struct mbox, mbox_wrk);
458 	pf = vf_mbox->pfvf;
459 	vf_idx = vf_mbox - pf->mbox_pfvf;
460 
461 	mbox = &pf->mbox_pfvf[0].mbox;
462 	mdev = &mbox->dev[vf_idx];
463 
464 	offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
465 
466 	for (id = 0; id < vf_mbox->num_msgs; id++) {
467 		msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
468 					     offset);
469 
470 		if (msg->sig != OTX2_MBOX_REQ_SIG)
471 			goto inval_msg;
472 
473 		/* Set VF's number in each of the msg */
474 		msg->pcifunc &= RVU_PFVF_FUNC_MASK;
475 		msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
476 		offset = msg->next_msgoff;
477 	}
478 	err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
479 					vf_mbox->num_msgs);
480 	if (err)
481 		goto inval_msg;
482 	return;
483 
484 inval_msg:
485 	otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
486 	otx2_mbox_msg_send(mbox, vf_idx);
487 }
488 
otx2_pfvf_mbox_up_handler(struct work_struct * work)489 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
490 {
491 	struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
492 	struct otx2_nic *pf = vf_mbox->pfvf;
493 	struct otx2_mbox_dev *mdev;
494 	int offset, id, vf_idx = 0;
495 	struct mbox_msghdr *msg;
496 	struct otx2_mbox *mbox;
497 
498 	vf_idx = vf_mbox - pf->mbox_pfvf;
499 	mbox = &pf->mbox_pfvf[0].mbox_up;
500 	mdev = &mbox->dev[vf_idx];
501 
502 	offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
503 
504 	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
505 		msg = mdev->mbase + offset;
506 
507 		if (msg->id >= MBOX_MSG_MAX) {
508 			dev_err(pf->dev,
509 				"Mbox msg with unknown ID 0x%x\n", msg->id);
510 			goto end;
511 		}
512 
513 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
514 			dev_err(pf->dev,
515 				"Mbox msg with wrong signature %x, ID 0x%x\n",
516 				msg->sig, msg->id);
517 			goto end;
518 		}
519 
520 		switch (msg->id) {
521 		case MBOX_MSG_CGX_LINK_EVENT:
522 			break;
523 		default:
524 			if (msg->rc)
525 				dev_err(pf->dev,
526 					"Mbox msg response has err %d, ID 0x%x\n",
527 					msg->rc, msg->id);
528 			break;
529 		}
530 
531 end:
532 		offset = mbox->rx_start + msg->next_msgoff;
533 		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
534 			__otx2_mbox_reset(mbox, vf_idx);
535 		mdev->msgs_acked++;
536 	}
537 }
538 
otx2_pfvf_mbox_intr_handler(int irq,void * pf_irq)539 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
540 {
541 	struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
542 	int vfs = pf->total_vfs;
543 	struct mbox *mbox;
544 	u64 intr;
545 
546 	mbox = pf->mbox_pfvf;
547 	/* Handle VF interrupts */
548 	if (vfs > 64) {
549 		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
550 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
551 		otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
552 		if (intr)
553 			trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
554 		vfs = 64;
555 	}
556 
557 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
558 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
559 
560 	otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
561 
562 	if (intr)
563 		trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
564 
565 	return IRQ_HANDLED;
566 }
567 
otx2_pfvf_mbox_init(struct otx2_nic * pf,int numvfs)568 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
569 {
570 	void __iomem *hwbase;
571 	struct mbox *mbox;
572 	int err, vf;
573 	u64 base;
574 
575 	if (!numvfs)
576 		return -EINVAL;
577 
578 	pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
579 				     sizeof(struct mbox), GFP_KERNEL);
580 	if (!pf->mbox_pfvf)
581 		return -ENOMEM;
582 
583 	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
584 					   WQ_UNBOUND | WQ_HIGHPRI |
585 					   WQ_MEM_RECLAIM, 0);
586 	if (!pf->mbox_pfvf_wq)
587 		return -ENOMEM;
588 
589 	/* On CN10K platform, PF <-> VF mailbox region follows after
590 	 * PF <-> AF mailbox region.
591 	 */
592 	if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
593 		base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
594 		       MBOX_SIZE;
595 	else
596 		base = readq((void __iomem *)((u64)pf->reg_base +
597 					      RVU_PF_VF_BAR4_ADDR));
598 
599 	hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
600 	if (!hwbase) {
601 		err = -ENOMEM;
602 		goto free_wq;
603 	}
604 
605 	mbox = &pf->mbox_pfvf[0];
606 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
607 			     MBOX_DIR_PFVF, numvfs);
608 	if (err)
609 		goto free_iomem;
610 
611 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
612 			     MBOX_DIR_PFVF_UP, numvfs);
613 	if (err)
614 		goto free_iomem;
615 
616 	for (vf = 0; vf < numvfs; vf++) {
617 		mbox->pfvf = pf;
618 		INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
619 		INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
620 		mbox++;
621 	}
622 
623 	return 0;
624 
625 free_iomem:
626 	if (hwbase)
627 		iounmap(hwbase);
628 free_wq:
629 	destroy_workqueue(pf->mbox_pfvf_wq);
630 	return err;
631 }
632 
otx2_pfvf_mbox_destroy(struct otx2_nic * pf)633 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
634 {
635 	struct mbox *mbox = &pf->mbox_pfvf[0];
636 
637 	if (!mbox)
638 		return;
639 
640 	if (pf->mbox_pfvf_wq) {
641 		destroy_workqueue(pf->mbox_pfvf_wq);
642 		pf->mbox_pfvf_wq = NULL;
643 	}
644 
645 	if (mbox->mbox.hwbase)
646 		iounmap(mbox->mbox.hwbase);
647 
648 	otx2_mbox_destroy(&mbox->mbox);
649 }
650 
otx2_enable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)651 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
652 {
653 	/* Clear PF <=> VF mailbox IRQ */
654 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
655 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
656 
657 	/* Enable PF <=> VF mailbox IRQ */
658 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
659 	if (numvfs > 64) {
660 		numvfs -= 64;
661 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
662 			     INTR_MASK(numvfs));
663 	}
664 }
665 
otx2_disable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)666 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
667 {
668 	int vector;
669 
670 	/* Disable PF <=> VF mailbox IRQ */
671 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
672 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
673 
674 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
675 	vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
676 	free_irq(vector, pf);
677 
678 	if (numvfs > 64) {
679 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
680 		vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
681 		free_irq(vector, pf);
682 	}
683 }
684 
otx2_register_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)685 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
686 {
687 	struct otx2_hw *hw = &pf->hw;
688 	char *irq_name;
689 	int err;
690 
691 	/* Register MBOX0 interrupt handler */
692 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
693 	if (pf->pcifunc)
694 		snprintf(irq_name, NAME_SIZE,
695 			 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
696 	else
697 		snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
698 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
699 			  otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
700 	if (err) {
701 		dev_err(pf->dev,
702 			"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
703 		return err;
704 	}
705 
706 	if (numvfs > 64) {
707 		/* Register MBOX1 interrupt handler */
708 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
709 		if (pf->pcifunc)
710 			snprintf(irq_name, NAME_SIZE,
711 				 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
712 		else
713 			snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
714 		err = request_irq(pci_irq_vector(pf->pdev,
715 						 RVU_PF_INT_VEC_VFPF_MBOX1),
716 						 otx2_pfvf_mbox_intr_handler,
717 						 0, irq_name, pf);
718 		if (err) {
719 			dev_err(pf->dev,
720 				"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
721 			return err;
722 		}
723 	}
724 
725 	otx2_enable_pfvf_mbox_intr(pf, numvfs);
726 
727 	return 0;
728 }
729 
otx2_process_pfaf_mbox_msg(struct otx2_nic * pf,struct mbox_msghdr * msg)730 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
731 				       struct mbox_msghdr *msg)
732 {
733 	int devid;
734 
735 	if (msg->id >= MBOX_MSG_MAX) {
736 		dev_err(pf->dev,
737 			"Mbox msg with unknown ID 0x%x\n", msg->id);
738 		return;
739 	}
740 
741 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
742 		dev_err(pf->dev,
743 			"Mbox msg with wrong signature %x, ID 0x%x\n",
744 			 msg->sig, msg->id);
745 		return;
746 	}
747 
748 	/* message response heading VF */
749 	devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
750 	if (devid) {
751 		struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
752 		struct delayed_work *dwork;
753 
754 		switch (msg->id) {
755 		case MBOX_MSG_NIX_LF_START_RX:
756 			config->intf_down = false;
757 			dwork = &config->link_event_work;
758 			schedule_delayed_work(dwork, msecs_to_jiffies(100));
759 			break;
760 		case MBOX_MSG_NIX_LF_STOP_RX:
761 			config->intf_down = true;
762 			break;
763 		}
764 
765 		return;
766 	}
767 
768 	switch (msg->id) {
769 	case MBOX_MSG_READY:
770 		pf->pcifunc = msg->pcifunc;
771 		break;
772 	case MBOX_MSG_MSIX_OFFSET:
773 		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
774 		break;
775 	case MBOX_MSG_NPA_LF_ALLOC:
776 		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
777 		break;
778 	case MBOX_MSG_NIX_LF_ALLOC:
779 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
780 		break;
781 	case MBOX_MSG_NIX_BP_ENABLE:
782 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
783 		break;
784 	case MBOX_MSG_CGX_STATS:
785 		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
786 		break;
787 	case MBOX_MSG_CGX_FEC_STATS:
788 		mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
789 		break;
790 	default:
791 		if (msg->rc)
792 			dev_err(pf->dev,
793 				"Mbox msg response has err %d, ID 0x%x\n",
794 				msg->rc, msg->id);
795 		break;
796 	}
797 }
798 
otx2_pfaf_mbox_handler(struct work_struct * work)799 static void otx2_pfaf_mbox_handler(struct work_struct *work)
800 {
801 	struct otx2_mbox_dev *mdev;
802 	struct mbox_hdr *rsp_hdr;
803 	struct mbox_msghdr *msg;
804 	struct otx2_mbox *mbox;
805 	struct mbox *af_mbox;
806 	struct otx2_nic *pf;
807 	int offset, id;
808 	u16 num_msgs;
809 
810 	af_mbox = container_of(work, struct mbox, mbox_wrk);
811 	mbox = &af_mbox->mbox;
812 	mdev = &mbox->dev[0];
813 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
814 	num_msgs = rsp_hdr->num_msgs;
815 
816 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
817 	pf = af_mbox->pfvf;
818 
819 	for (id = 0; id < num_msgs; id++) {
820 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
821 		otx2_process_pfaf_mbox_msg(pf, msg);
822 		offset = mbox->rx_start + msg->next_msgoff;
823 		if (mdev->msgs_acked == (num_msgs - 1))
824 			__otx2_mbox_reset(mbox, 0);
825 		mdev->msgs_acked++;
826 	}
827 
828 }
829 
otx2_handle_link_event(struct otx2_nic * pf)830 static void otx2_handle_link_event(struct otx2_nic *pf)
831 {
832 	struct cgx_link_user_info *linfo = &pf->linfo;
833 	struct net_device *netdev = pf->netdev;
834 
835 	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
836 		linfo->link_up ? "UP" : "DOWN", linfo->speed,
837 		linfo->full_duplex ? "Full" : "Half");
838 	if (linfo->link_up) {
839 		netif_carrier_on(netdev);
840 		netif_tx_start_all_queues(netdev);
841 	} else {
842 		netif_tx_stop_all_queues(netdev);
843 		netif_carrier_off(netdev);
844 	}
845 }
846 
otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic * pf,struct mcs_intr_info * event,struct msg_rsp * rsp)847 int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
848 					 struct mcs_intr_info *event,
849 					 struct msg_rsp *rsp)
850 {
851 	cn10k_handle_mcs_event(pf, event);
852 
853 	return 0;
854 }
855 
otx2_mbox_up_handler_cgx_link_event(struct otx2_nic * pf,struct cgx_link_info_msg * msg,struct msg_rsp * rsp)856 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
857 					struct cgx_link_info_msg *msg,
858 					struct msg_rsp *rsp)
859 {
860 	int i;
861 
862 	/* Copy the link info sent by AF */
863 	pf->linfo = msg->link_info;
864 
865 	/* notify VFs about link event */
866 	for (i = 0; i < pci_num_vf(pf->pdev); i++) {
867 		struct otx2_vf_config *config = &pf->vf_configs[i];
868 		struct delayed_work *dwork = &config->link_event_work;
869 
870 		if (config->intf_down)
871 			continue;
872 
873 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
874 	}
875 
876 	/* interface has not been fully configured yet */
877 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
878 		return 0;
879 
880 	otx2_handle_link_event(pf);
881 	return 0;
882 }
883 
otx2_process_mbox_msg_up(struct otx2_nic * pf,struct mbox_msghdr * req)884 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
885 				    struct mbox_msghdr *req)
886 {
887 	/* Check if valid, if not reply with a invalid msg */
888 	if (req->sig != OTX2_MBOX_REQ_SIG) {
889 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
890 		return -ENODEV;
891 	}
892 
893 	switch (req->id) {
894 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
895 	case _id: {							\
896 		struct _rsp_type *rsp;					\
897 		int err;						\
898 									\
899 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
900 			&pf->mbox.mbox_up, 0,				\
901 			sizeof(struct _rsp_type));			\
902 		if (!rsp)						\
903 			return -ENOMEM;					\
904 									\
905 		rsp->hdr.id = _id;					\
906 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
907 		rsp->hdr.pcifunc = 0;					\
908 		rsp->hdr.rc = 0;					\
909 									\
910 		err = otx2_mbox_up_handler_ ## _fn_name(		\
911 			pf, (struct _req_type *)req, rsp);		\
912 		return err;						\
913 	}
914 MBOX_UP_CGX_MESSAGES
915 MBOX_UP_MCS_MESSAGES
916 #undef M
917 		break;
918 	default:
919 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
920 		return -ENODEV;
921 	}
922 	return 0;
923 }
924 
otx2_pfaf_mbox_up_handler(struct work_struct * work)925 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
926 {
927 	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
928 	struct otx2_mbox *mbox = &af_mbox->mbox_up;
929 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
930 	struct otx2_nic *pf = af_mbox->pfvf;
931 	int offset, id, devid = 0;
932 	struct mbox_hdr *rsp_hdr;
933 	struct mbox_msghdr *msg;
934 	u16 num_msgs;
935 
936 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
937 	num_msgs = rsp_hdr->num_msgs;
938 
939 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
940 
941 	for (id = 0; id < num_msgs; id++) {
942 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
943 
944 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
945 		/* Skip processing VF's messages */
946 		if (!devid)
947 			otx2_process_mbox_msg_up(pf, msg);
948 		offset = mbox->rx_start + msg->next_msgoff;
949 	}
950 	/* Forward to VF iff VFs are really present */
951 	if (devid && pci_num_vf(pf->pdev)) {
952 		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
953 					  MBOX_DIR_PFVF_UP, devid - 1,
954 					  num_msgs);
955 		return;
956 	}
957 
958 	otx2_mbox_msg_send(mbox, 0);
959 }
960 
otx2_pfaf_mbox_intr_handler(int irq,void * pf_irq)961 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
962 {
963 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
964 	struct mbox *mw = &pf->mbox;
965 	struct otx2_mbox_dev *mdev;
966 	struct otx2_mbox *mbox;
967 	struct mbox_hdr *hdr;
968 	u64 mbox_data;
969 
970 	/* Clear the IRQ */
971 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
972 
973 
974 	mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
975 
976 	if (mbox_data & MBOX_UP_MSG) {
977 		mbox_data &= ~MBOX_UP_MSG;
978 		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
979 
980 		mbox = &mw->mbox_up;
981 		mdev = &mbox->dev[0];
982 		otx2_sync_mbox_bbuf(mbox, 0);
983 
984 		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
985 		if (hdr->num_msgs)
986 			queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
987 
988 		trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
989 					 BIT_ULL(0));
990 	}
991 
992 	if (mbox_data & MBOX_DOWN_MSG) {
993 		mbox_data &= ~MBOX_DOWN_MSG;
994 		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
995 
996 		mbox = &mw->mbox;
997 		mdev = &mbox->dev[0];
998 		otx2_sync_mbox_bbuf(mbox, 0);
999 
1000 		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
1001 		if (hdr->num_msgs)
1002 			queue_work(pf->mbox_wq, &mw->mbox_wrk);
1003 
1004 		trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
1005 					 BIT_ULL(0));
1006 	}
1007 
1008 	return IRQ_HANDLED;
1009 }
1010 
otx2_disable_mbox_intr(struct otx2_nic * pf)1011 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
1012 {
1013 	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
1014 
1015 	/* Disable AF => PF mailbox IRQ */
1016 	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
1017 	free_irq(vector, pf);
1018 }
1019 
otx2_register_mbox_intr(struct otx2_nic * pf,bool probe_af)1020 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
1021 {
1022 	struct otx2_hw *hw = &pf->hw;
1023 	struct msg_req *req;
1024 	char *irq_name;
1025 	int err;
1026 
1027 	/* Register mailbox interrupt handler */
1028 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
1029 	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
1030 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
1031 			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
1032 	if (err) {
1033 		dev_err(pf->dev,
1034 			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
1035 		return err;
1036 	}
1037 
1038 	/* Enable mailbox interrupt for msgs coming from AF.
1039 	 * First clear to avoid spurious interrupts, if any.
1040 	 */
1041 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1042 	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1043 
1044 	if (!probe_af)
1045 		return 0;
1046 
1047 	/* Check mailbox communication with AF */
1048 	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1049 	if (!req) {
1050 		otx2_disable_mbox_intr(pf);
1051 		return -ENOMEM;
1052 	}
1053 	err = otx2_sync_mbox_msg(&pf->mbox);
1054 	if (err) {
1055 		dev_warn(pf->dev,
1056 			 "AF not responding to mailbox, deferring probe\n");
1057 		otx2_disable_mbox_intr(pf);
1058 		return -EPROBE_DEFER;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
otx2_pfaf_mbox_destroy(struct otx2_nic * pf)1064 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1065 {
1066 	struct mbox *mbox = &pf->mbox;
1067 
1068 	if (pf->mbox_wq) {
1069 		destroy_workqueue(pf->mbox_wq);
1070 		pf->mbox_wq = NULL;
1071 	}
1072 
1073 	if (mbox->mbox.hwbase)
1074 		iounmap((void __iomem *)mbox->mbox.hwbase);
1075 
1076 	otx2_mbox_destroy(&mbox->mbox);
1077 	otx2_mbox_destroy(&mbox->mbox_up);
1078 }
1079 
otx2_pfaf_mbox_init(struct otx2_nic * pf)1080 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1081 {
1082 	struct mbox *mbox = &pf->mbox;
1083 	void __iomem *hwbase;
1084 	int err;
1085 
1086 	mbox->pfvf = pf;
1087 	pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
1088 					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
1089 	if (!pf->mbox_wq)
1090 		return -ENOMEM;
1091 
1092 	/* Mailbox is a reserved memory (in RAM) region shared between
1093 	 * admin function (i.e AF) and this PF, shouldn't be mapped as
1094 	 * device memory to allow unaligned accesses.
1095 	 */
1096 	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1097 			    MBOX_SIZE);
1098 	if (!hwbase) {
1099 		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1100 		err = -ENOMEM;
1101 		goto exit;
1102 	}
1103 
1104 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1105 			     MBOX_DIR_PFAF, 1);
1106 	if (err)
1107 		goto exit;
1108 
1109 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1110 			     MBOX_DIR_PFAF_UP, 1);
1111 	if (err)
1112 		goto exit;
1113 
1114 	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1115 	if (err)
1116 		goto exit;
1117 
1118 	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1119 	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1120 	mutex_init(&mbox->lock);
1121 
1122 	return 0;
1123 exit:
1124 	otx2_pfaf_mbox_destroy(pf);
1125 	return err;
1126 }
1127 
otx2_cgx_config_linkevents(struct otx2_nic * pf,bool enable)1128 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1129 {
1130 	struct msg_req *msg;
1131 	int err;
1132 
1133 	mutex_lock(&pf->mbox.lock);
1134 	if (enable)
1135 		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1136 	else
1137 		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1138 
1139 	if (!msg) {
1140 		mutex_unlock(&pf->mbox.lock);
1141 		return -ENOMEM;
1142 	}
1143 
1144 	err = otx2_sync_mbox_msg(&pf->mbox);
1145 	mutex_unlock(&pf->mbox.lock);
1146 	return err;
1147 }
1148 
otx2_reset_mac_stats(struct otx2_nic * pfvf)1149 int otx2_reset_mac_stats(struct otx2_nic *pfvf)
1150 {
1151 	struct msg_req *req;
1152 	int err;
1153 
1154 	mutex_lock(&pfvf->mbox.lock);
1155 	req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox);
1156 	if (!req) {
1157 		mutex_unlock(&pfvf->mbox.lock);
1158 		return -ENOMEM;
1159 	}
1160 
1161 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1162 	mutex_unlock(&pfvf->mbox.lock);
1163 	return err;
1164 }
1165 
otx2_cgx_config_loopback(struct otx2_nic * pf,bool enable)1166 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1167 {
1168 	struct msg_req *msg;
1169 	int err;
1170 
1171 	if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
1172 				    pf->flow_cfg->dmacflt_max_flows))
1173 		netdev_warn(pf->netdev,
1174 			    "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1175 
1176 	mutex_lock(&pf->mbox.lock);
1177 	if (enable)
1178 		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1179 	else
1180 		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1181 
1182 	if (!msg) {
1183 		mutex_unlock(&pf->mbox.lock);
1184 		return -ENOMEM;
1185 	}
1186 
1187 	err = otx2_sync_mbox_msg(&pf->mbox);
1188 	mutex_unlock(&pf->mbox.lock);
1189 	return err;
1190 }
1191 
otx2_set_real_num_queues(struct net_device * netdev,int tx_queues,int rx_queues)1192 int otx2_set_real_num_queues(struct net_device *netdev,
1193 			     int tx_queues, int rx_queues)
1194 {
1195 	int err;
1196 
1197 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
1198 	if (err) {
1199 		netdev_err(netdev,
1200 			   "Failed to set no of Tx queues: %d\n", tx_queues);
1201 		return err;
1202 	}
1203 
1204 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
1205 	if (err)
1206 		netdev_err(netdev,
1207 			   "Failed to set no of Rx queues: %d\n", rx_queues);
1208 	return err;
1209 }
1210 EXPORT_SYMBOL(otx2_set_real_num_queues);
1211 
1212 static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
1213 	"NIX_SQOPERR_OOR",
1214 	"NIX_SQOPERR_CTX_FAULT",
1215 	"NIX_SQOPERR_CTX_POISON",
1216 	"NIX_SQOPERR_DISABLED",
1217 	"NIX_SQOPERR_SIZE_ERR",
1218 	"NIX_SQOPERR_OFLOW",
1219 	"NIX_SQOPERR_SQB_NULL",
1220 	"NIX_SQOPERR_SQB_FAULT",
1221 	"NIX_SQOPERR_SQE_SZ_ZERO",
1222 };
1223 
1224 static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
1225 	"NIX_MNQERR_SQ_CTX_FAULT",
1226 	"NIX_MNQERR_SQ_CTX_POISON",
1227 	"NIX_MNQERR_SQB_FAULT",
1228 	"NIX_MNQERR_SQB_POISON",
1229 	"NIX_MNQERR_TOTAL_ERR",
1230 	"NIX_MNQERR_LSO_ERR",
1231 	"NIX_MNQERR_CQ_QUERY_ERR",
1232 	"NIX_MNQERR_MAX_SQE_SIZE_ERR",
1233 	"NIX_MNQERR_MAXLEN_ERR",
1234 	"NIX_MNQERR_SQE_SIZEM1_ZERO",
1235 };
1236 
1237 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  {
1238 	[NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
1239 	[NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
1240 	[NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
1241 	[NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
1242 	[NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
1243 	[NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
1244 	[NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
1245 	[NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
1246 	[NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
1247 	[NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
1248 	[NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
1249 	[NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
1250 	[NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
1251 	[NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
1252 	[NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
1253 	[NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
1254 	[NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
1255 	[NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
1256 	[NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
1257 	[NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
1258 	[NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
1259 	[NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
1260 	[NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
1261 	[NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
1262 	[NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
1263 	[NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
1264 };
1265 
otx2_q_intr_handler(int irq,void * data)1266 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1267 {
1268 	struct otx2_nic *pf = data;
1269 	struct otx2_snd_queue *sq;
1270 	u64 val, *ptr;
1271 	u64 qidx = 0;
1272 
1273 	/* CQ */
1274 	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1275 		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1276 		val = otx2_atomic64_add((qidx << 44), ptr);
1277 
1278 		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1279 			     (val & NIX_CQERRINT_BITS));
1280 		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1281 			continue;
1282 
1283 		if (val & BIT_ULL(42)) {
1284 			netdev_err(pf->netdev,
1285 				   "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1286 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1287 		} else {
1288 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1289 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1290 					   qidx);
1291 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1292 				netdev_err(pf->netdev,
1293 					   "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1294 					   qidx);
1295 		}
1296 
1297 		schedule_work(&pf->reset_task);
1298 	}
1299 
1300 	/* SQ */
1301 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1302 		u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
1303 		u8 sq_op_err_code, mnq_err_code, snd_err_code;
1304 
1305 		sq = &pf->qset.sq[qidx];
1306 		if (!sq->sqb_ptrs)
1307 			continue;
1308 
1309 		/* Below debug registers captures first errors corresponding to
1310 		 * those registers. We don't have to check against SQ qid as
1311 		 * these are fatal errors.
1312 		 */
1313 
1314 		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1315 		val = otx2_atomic64_add((qidx << 44), ptr);
1316 		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1317 			     (val & NIX_SQINT_BITS));
1318 
1319 		if (val & BIT_ULL(42)) {
1320 			netdev_err(pf->netdev,
1321 				   "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1322 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1323 			goto done;
1324 		}
1325 
1326 		sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
1327 		if (!(sq_op_err_dbg & BIT(44)))
1328 			goto chk_mnq_err_dbg;
1329 
1330 		sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
1331 		netdev_err(pf->netdev,
1332 			   "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx)  err=%s(%#x)\n",
1333 			   qidx, sq_op_err_dbg,
1334 			   nix_sqoperr_e_str[sq_op_err_code],
1335 			   sq_op_err_code);
1336 
1337 		otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
1338 
1339 		if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
1340 			goto chk_mnq_err_dbg;
1341 
1342 		/* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
1343 		 * TODO: But we are in irq context. How to call mbox functions which does sleep
1344 		 */
1345 
1346 chk_mnq_err_dbg:
1347 		mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
1348 		if (!(mnq_err_dbg & BIT(44)))
1349 			goto chk_snd_err_dbg;
1350 
1351 		mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
1352 		netdev_err(pf->netdev,
1353 			   "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx)  err=%s(%#x)\n",
1354 			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code],
1355 			   mnq_err_code);
1356 		otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
1357 
1358 chk_snd_err_dbg:
1359 		snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
1360 		if (snd_err_dbg & BIT(44)) {
1361 			snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
1362 			netdev_err(pf->netdev,
1363 				   "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
1364 				   qidx, snd_err_dbg,
1365 				   nix_snd_status_e_str[snd_err_code],
1366 				   snd_err_code);
1367 			otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
1368 		}
1369 
1370 done:
1371 		/* Print values and reset */
1372 		if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1373 			netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1374 				   qidx);
1375 
1376 		schedule_work(&pf->reset_task);
1377 	}
1378 
1379 	return IRQ_HANDLED;
1380 }
1381 
otx2_cq_intr_handler(int irq,void * cq_irq)1382 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1383 {
1384 	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1385 	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1386 	int qidx = cq_poll->cint_idx;
1387 
1388 	/* Disable interrupts.
1389 	 *
1390 	 * Completion interrupts behave in a level-triggered interrupt
1391 	 * fashion, and hence have to be cleared only after it is serviced.
1392 	 */
1393 	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1394 
1395 	/* Schedule NAPI */
1396 	pf->napi_events++;
1397 	napi_schedule_irqoff(&cq_poll->napi);
1398 
1399 	return IRQ_HANDLED;
1400 }
1401 
otx2_disable_napi(struct otx2_nic * pf)1402 static void otx2_disable_napi(struct otx2_nic *pf)
1403 {
1404 	struct otx2_qset *qset = &pf->qset;
1405 	struct otx2_cq_poll *cq_poll;
1406 	int qidx;
1407 
1408 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1409 		cq_poll = &qset->napi[qidx];
1410 		cancel_work_sync(&cq_poll->dim.work);
1411 		napi_disable(&cq_poll->napi);
1412 		netif_napi_del(&cq_poll->napi);
1413 	}
1414 }
1415 
otx2_free_cq_res(struct otx2_nic * pf)1416 static void otx2_free_cq_res(struct otx2_nic *pf)
1417 {
1418 	struct otx2_qset *qset = &pf->qset;
1419 	struct otx2_cq_queue *cq;
1420 	int qidx;
1421 
1422 	/* Disable CQs */
1423 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1424 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1425 		cq = &qset->cq[qidx];
1426 		qmem_free(pf->dev, cq->cqe);
1427 	}
1428 }
1429 
otx2_free_sq_res(struct otx2_nic * pf)1430 static void otx2_free_sq_res(struct otx2_nic *pf)
1431 {
1432 	struct otx2_qset *qset = &pf->qset;
1433 	struct otx2_snd_queue *sq;
1434 	int qidx;
1435 
1436 	/* Disable SQs */
1437 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1438 	/* Free SQB pointers */
1439 	otx2_sq_free_sqbs(pf);
1440 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1441 		sq = &qset->sq[qidx];
1442 		/* Skip freeing Qos queues if they are not initialized */
1443 		if (!sq->sqe)
1444 			continue;
1445 		qmem_free(pf->dev, sq->sqe);
1446 		qmem_free(pf->dev, sq->tso_hdrs);
1447 		kfree(sq->sg);
1448 		kfree(sq->sqb_ptrs);
1449 	}
1450 }
1451 
otx2_get_rbuf_size(struct otx2_nic * pf,int mtu)1452 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1453 {
1454 	int frame_size;
1455 	int total_size;
1456 	int rbuf_size;
1457 
1458 	if (pf->hw.rbuf_len)
1459 		return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
1460 
1461 	/* The data transferred by NIX to memory consists of actual packet
1462 	 * plus additional data which has timestamp and/or EDSA/HIGIG2
1463 	 * headers if interface is configured in corresponding modes.
1464 	 * NIX transfers entire data using 6 segments/buffers and writes
1465 	 * a CQE_RX descriptor with those segment addresses. First segment
1466 	 * has additional data prepended to packet. Also software omits a
1467 	 * headroom of 128 bytes in each segment. Hence the total size of
1468 	 * memory needed to receive a packet with 'mtu' is:
1469 	 * frame size =  mtu + additional data;
1470 	 * memory = frame_size + headroom * 6;
1471 	 * each receive buffer size = memory / 6;
1472 	 */
1473 	frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1474 	total_size = frame_size + OTX2_HEAD_ROOM * 6;
1475 	rbuf_size = total_size / 6;
1476 
1477 	return ALIGN(rbuf_size, 2048);
1478 }
1479 
otx2_init_hw_resources(struct otx2_nic * pf)1480 static int otx2_init_hw_resources(struct otx2_nic *pf)
1481 {
1482 	struct nix_lf_free_req *free_req;
1483 	struct mbox *mbox = &pf->mbox;
1484 	struct otx2_hw *hw = &pf->hw;
1485 	struct msg_req *req;
1486 	int err = 0, lvl;
1487 
1488 	/* Set required NPA LF's pool counts
1489 	 * Auras and Pools are used in a 1:1 mapping,
1490 	 * so, aura count = pool count.
1491 	 */
1492 	hw->rqpool_cnt = hw->rx_queues;
1493 	hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
1494 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1495 
1496 	/* Maximum hardware supported transmit length */
1497 	pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
1498 
1499 	pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1500 
1501 	mutex_lock(&mbox->lock);
1502 	/* NPA init */
1503 	err = otx2_config_npa(pf);
1504 	if (err)
1505 		goto exit;
1506 
1507 	/* NIX init */
1508 	err = otx2_config_nix(pf);
1509 	if (err)
1510 		goto err_free_npa_lf;
1511 
1512 	/* Enable backpressure for CGX mapped PF/VFs */
1513 	if (!is_otx2_lbkvf(pf->pdev))
1514 		otx2_nix_config_bp(pf, true);
1515 
1516 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1517 	err = otx2_rq_aura_pool_init(pf);
1518 	if (err) {
1519 		mutex_unlock(&mbox->lock);
1520 		goto err_free_nix_lf;
1521 	}
1522 	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
1523 	err = otx2_sq_aura_pool_init(pf);
1524 	if (err) {
1525 		mutex_unlock(&mbox->lock);
1526 		goto err_free_rq_ptrs;
1527 	}
1528 
1529 	err = otx2_txsch_alloc(pf);
1530 	if (err) {
1531 		mutex_unlock(&mbox->lock);
1532 		goto err_free_sq_ptrs;
1533 	}
1534 
1535 #ifdef CONFIG_DCB
1536 	if (pf->pfc_en) {
1537 		err = otx2_pfc_txschq_alloc(pf);
1538 		if (err) {
1539 			mutex_unlock(&mbox->lock);
1540 			goto err_free_sq_ptrs;
1541 		}
1542 	}
1543 #endif
1544 
1545 	err = otx2_config_nix_queues(pf);
1546 	if (err) {
1547 		mutex_unlock(&mbox->lock);
1548 		goto err_free_txsch;
1549 	}
1550 
1551 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1552 		err = otx2_txschq_config(pf, lvl, 0, false);
1553 		if (err) {
1554 			mutex_unlock(&mbox->lock);
1555 			goto err_free_nix_queues;
1556 		}
1557 	}
1558 
1559 #ifdef CONFIG_DCB
1560 	if (pf->pfc_en) {
1561 		err = otx2_pfc_txschq_config(pf);
1562 		if (err) {
1563 			mutex_unlock(&mbox->lock);
1564 			goto err_free_nix_queues;
1565 		}
1566 	}
1567 #endif
1568 
1569 	mutex_unlock(&mbox->lock);
1570 	return err;
1571 
1572 err_free_nix_queues:
1573 	otx2_free_sq_res(pf);
1574 	otx2_free_cq_res(pf);
1575 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1576 err_free_txsch:
1577 	otx2_txschq_stop(pf);
1578 err_free_sq_ptrs:
1579 	otx2_sq_free_sqbs(pf);
1580 err_free_rq_ptrs:
1581 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1582 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1583 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1584 	otx2_aura_pool_free(pf);
1585 err_free_nix_lf:
1586 	mutex_lock(&mbox->lock);
1587 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1588 	if (free_req) {
1589 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1590 		if (otx2_sync_mbox_msg(mbox))
1591 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1592 	}
1593 err_free_npa_lf:
1594 	/* Reset NPA LF */
1595 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1596 	if (req) {
1597 		if (otx2_sync_mbox_msg(mbox))
1598 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1599 	}
1600 exit:
1601 	mutex_unlock(&mbox->lock);
1602 	return err;
1603 }
1604 
otx2_free_hw_resources(struct otx2_nic * pf)1605 static void otx2_free_hw_resources(struct otx2_nic *pf)
1606 {
1607 	struct otx2_qset *qset = &pf->qset;
1608 	struct nix_lf_free_req *free_req;
1609 	struct mbox *mbox = &pf->mbox;
1610 	struct otx2_cq_queue *cq;
1611 	struct otx2_pool *pool;
1612 	struct msg_req *req;
1613 	int pool_id;
1614 	int qidx;
1615 
1616 	/* Ensure all SQE are processed */
1617 	otx2_sqb_flush(pf);
1618 
1619 	/* Stop transmission */
1620 	otx2_txschq_stop(pf);
1621 
1622 #ifdef CONFIG_DCB
1623 	if (pf->pfc_en)
1624 		otx2_pfc_txschq_stop(pf);
1625 #endif
1626 
1627 	otx2_clean_qos_queues(pf);
1628 
1629 	mutex_lock(&mbox->lock);
1630 	/* Disable backpressure */
1631 	if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1632 		otx2_nix_config_bp(pf, false);
1633 	mutex_unlock(&mbox->lock);
1634 
1635 	/* Disable RQs */
1636 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1637 
1638 	/*Dequeue all CQEs */
1639 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1640 		cq = &qset->cq[qidx];
1641 		if (cq->cq_type == CQ_RX)
1642 			otx2_cleanup_rx_cqes(pf, cq, qidx);
1643 		else
1644 			otx2_cleanup_tx_cqes(pf, cq);
1645 	}
1646 	otx2_free_pending_sqe(pf);
1647 
1648 	otx2_free_sq_res(pf);
1649 
1650 	/* Free RQ buffer pointers*/
1651 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1652 
1653 	for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
1654 		pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
1655 		pool = &pf->qset.pool[pool_id];
1656 		page_pool_destroy(pool->page_pool);
1657 		pool->page_pool = NULL;
1658 	}
1659 
1660 	otx2_free_cq_res(pf);
1661 
1662 	/* Free all ingress bandwidth profiles allocated */
1663 	cn10k_free_all_ipolicers(pf);
1664 
1665 	mutex_lock(&mbox->lock);
1666 	/* Reset NIX LF */
1667 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1668 	if (free_req) {
1669 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1670 		if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1671 			free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1672 		if (otx2_sync_mbox_msg(mbox))
1673 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1674 	}
1675 	mutex_unlock(&mbox->lock);
1676 
1677 	/* Disable NPA Pool and Aura hw context */
1678 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1679 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1680 	otx2_aura_pool_free(pf);
1681 
1682 	mutex_lock(&mbox->lock);
1683 	/* Reset NPA LF */
1684 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1685 	if (req) {
1686 		if (otx2_sync_mbox_msg(mbox))
1687 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1688 	}
1689 	mutex_unlock(&mbox->lock);
1690 }
1691 
otx2_promisc_use_mce_list(struct otx2_nic * pfvf)1692 static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
1693 {
1694 	int vf;
1695 
1696 	/* The AF driver will determine whether to allow the VF netdev or not */
1697 	if (is_otx2_vf(pfvf->pcifunc))
1698 		return true;
1699 
1700 	/* check if there are any trusted VFs associated with the PF netdev */
1701 	for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
1702 		if (pfvf->vf_configs[vf].trusted)
1703 			return true;
1704 	return false;
1705 }
1706 
otx2_do_set_rx_mode(struct otx2_nic * pf)1707 static void otx2_do_set_rx_mode(struct otx2_nic *pf)
1708 {
1709 	struct net_device *netdev = pf->netdev;
1710 	struct nix_rx_mode *req;
1711 	bool promisc = false;
1712 
1713 	if (!(netdev->flags & IFF_UP))
1714 		return;
1715 
1716 	if ((netdev->flags & IFF_PROMISC) ||
1717 	    (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) {
1718 		promisc = true;
1719 	}
1720 
1721 	/* Write unicast address to mcam entries or del from mcam */
1722 	if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1723 		__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1724 
1725 	mutex_lock(&pf->mbox.lock);
1726 	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1727 	if (!req) {
1728 		mutex_unlock(&pf->mbox.lock);
1729 		return;
1730 	}
1731 
1732 	req->mode = NIX_RX_MODE_UCAST;
1733 
1734 	if (promisc)
1735 		req->mode |= NIX_RX_MODE_PROMISC;
1736 	if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1737 		req->mode |= NIX_RX_MODE_ALLMULTI;
1738 
1739 	if (otx2_promisc_use_mce_list(pf))
1740 		req->mode |= NIX_RX_MODE_USE_MCE;
1741 
1742 	otx2_sync_mbox_msg(&pf->mbox);
1743 	mutex_unlock(&pf->mbox.lock);
1744 }
1745 
otx2_set_irq_coalesce(struct otx2_nic * pfvf)1746 static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
1747 {
1748 	int cint;
1749 
1750 	for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
1751 		otx2_config_irq_coalescing(pfvf, cint);
1752 }
1753 
otx2_dim_work(struct work_struct * w)1754 static void otx2_dim_work(struct work_struct *w)
1755 {
1756 	struct dim_cq_moder cur_moder;
1757 	struct otx2_cq_poll *cq_poll;
1758 	struct otx2_nic *pfvf;
1759 	struct dim *dim;
1760 
1761 	dim = container_of(w, struct dim, work);
1762 	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1763 	cq_poll = container_of(dim, struct otx2_cq_poll, dim);
1764 	pfvf = (struct otx2_nic *)cq_poll->dev;
1765 	pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
1766 		CQ_TIMER_THRESH_MAX : cur_moder.usec;
1767 	pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
1768 		NAPI_POLL_WEIGHT : cur_moder.pkts;
1769 	otx2_set_irq_coalesce(pfvf);
1770 	dim->state = DIM_START_MEASURE;
1771 }
1772 
otx2_open(struct net_device * netdev)1773 int otx2_open(struct net_device *netdev)
1774 {
1775 	struct otx2_nic *pf = netdev_priv(netdev);
1776 	struct otx2_cq_poll *cq_poll = NULL;
1777 	struct otx2_qset *qset = &pf->qset;
1778 	int err = 0, qidx, vec;
1779 	char *irq_name;
1780 
1781 	netif_carrier_off(netdev);
1782 
1783 	/* RQ and SQs are mapped to different CQs,
1784 	 * so find out max CQ IRQs (i.e CINTs) needed.
1785 	 */
1786 	pf->hw.non_qos_queues =  pf->hw.tx_queues + pf->hw.xdp_queues;
1787 	pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
1788 			       pf->hw.tc_tx_queues);
1789 
1790 	pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
1791 
1792 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1793 	if (!qset->napi)
1794 		return -ENOMEM;
1795 
1796 	/* CQ size of RQ */
1797 	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1798 	/* CQ size of SQ */
1799 	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1800 
1801 	err = -ENOMEM;
1802 	qset->cq = kcalloc(pf->qset.cq_cnt,
1803 			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
1804 	if (!qset->cq)
1805 		goto err_free_mem;
1806 
1807 	qset->sq = kcalloc(otx2_get_total_tx_queues(pf),
1808 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
1809 	if (!qset->sq)
1810 		goto err_free_mem;
1811 
1812 	qset->rq = kcalloc(pf->hw.rx_queues,
1813 			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1814 	if (!qset->rq)
1815 		goto err_free_mem;
1816 
1817 	err = otx2_init_hw_resources(pf);
1818 	if (err)
1819 		goto err_free_mem;
1820 
1821 	/* Register NAPI handler */
1822 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1823 		cq_poll = &qset->napi[qidx];
1824 		cq_poll->cint_idx = qidx;
1825 		/* RQ0 & SQ0 are mapped to CINT0 and so on..
1826 		 * 'cq_ids[0]' points to RQ's CQ and
1827 		 * 'cq_ids[1]' points to SQ's CQ and
1828 		 * 'cq_ids[2]' points to XDP's CQ and
1829 		 */
1830 		cq_poll->cq_ids[CQ_RX] =
1831 			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1832 		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1833 				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1834 		if (pf->xdp_prog)
1835 			cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
1836 						  (qidx + pf->hw.rx_queues +
1837 						  pf->hw.tx_queues) :
1838 						  CINT_INVALID_CQ;
1839 		else
1840 			cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
1841 
1842 		cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
1843 					  (qidx + pf->hw.rx_queues +
1844 					   pf->hw.non_qos_queues) :
1845 					  CINT_INVALID_CQ;
1846 
1847 		cq_poll->dev = (void *)pf;
1848 		cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1849 		INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
1850 		netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
1851 		napi_enable(&cq_poll->napi);
1852 	}
1853 
1854 	/* Set maximum frame size allowed in HW */
1855 	err = otx2_hw_set_mtu(pf, netdev->mtu);
1856 	if (err)
1857 		goto err_disable_napi;
1858 
1859 	/* Setup segmentation algorithms, if failed, clear offload capability */
1860 	otx2_setup_segmentation(pf);
1861 
1862 	/* Initialize RSS */
1863 	err = otx2_rss_init(pf);
1864 	if (err)
1865 		goto err_disable_napi;
1866 
1867 	/* Register Queue IRQ handlers */
1868 	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1869 	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1870 
1871 	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1872 
1873 	err = request_irq(pci_irq_vector(pf->pdev, vec),
1874 			  otx2_q_intr_handler, 0, irq_name, pf);
1875 	if (err) {
1876 		dev_err(pf->dev,
1877 			"RVUPF%d: IRQ registration failed for QERR\n",
1878 			rvu_get_pf(pf->pcifunc));
1879 		goto err_disable_napi;
1880 	}
1881 
1882 	/* Enable QINT IRQ */
1883 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1884 
1885 	/* Register CQ IRQ handlers */
1886 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1887 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1888 		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1889 		int name_len;
1890 
1891 		name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d",
1892 				    pf->netdev->name, qidx);
1893 		if (name_len >= NAME_SIZE) {
1894 			dev_err(pf->dev,
1895 				"RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
1896 				rvu_get_pf(pf->pcifunc), qidx);
1897 			err = -EINVAL;
1898 			goto err_free_cints;
1899 		}
1900 
1901 		err = request_irq(pci_irq_vector(pf->pdev, vec),
1902 				  otx2_cq_intr_handler, 0, irq_name,
1903 				  &qset->napi[qidx]);
1904 		if (err) {
1905 			dev_err(pf->dev,
1906 				"RVUPF%d: IRQ registration failed for CQ%d\n",
1907 				rvu_get_pf(pf->pcifunc), qidx);
1908 			goto err_free_cints;
1909 		}
1910 		vec++;
1911 
1912 		otx2_config_irq_coalescing(pf, qidx);
1913 
1914 		/* Enable CQ IRQ */
1915 		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1916 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1917 	}
1918 
1919 	otx2_set_cints_affinity(pf);
1920 
1921 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1922 		otx2_enable_rxvlan(pf, true);
1923 
1924 	/* When reinitializing enable time stamping if it is enabled before */
1925 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1926 		pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1927 		otx2_config_hw_tx_tstamp(pf, true);
1928 	}
1929 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1930 		pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1931 		otx2_config_hw_rx_tstamp(pf, true);
1932 	}
1933 
1934 	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1935 	/* 'intf_down' may be checked on any cpu */
1936 	smp_wmb();
1937 
1938 	/* Enable QoS configuration before starting tx queues */
1939 	otx2_qos_config_txschq(pf);
1940 
1941 	/* we have already received link status notification */
1942 	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1943 		otx2_handle_link_event(pf);
1944 
1945 	/* Install DMAC Filters */
1946 	if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
1947 		otx2_dmacflt_reinstall_flows(pf);
1948 
1949 	otx2_tc_apply_ingress_police_rules(pf);
1950 
1951 	err = otx2_rxtx_enable(pf, true);
1952 	/* If a mbox communication error happens at this point then interface
1953 	 * will end up in a state such that it is in down state but hardware
1954 	 * mcam entries are enabled to receive the packets. Hence disable the
1955 	 * packet I/O.
1956 	 */
1957 	if (err == -EIO)
1958 		goto err_disable_rxtx;
1959 	else if (err)
1960 		goto err_tx_stop_queues;
1961 
1962 	otx2_do_set_rx_mode(pf);
1963 
1964 	return 0;
1965 
1966 err_disable_rxtx:
1967 	otx2_rxtx_enable(pf, false);
1968 err_tx_stop_queues:
1969 	netif_tx_stop_all_queues(netdev);
1970 	netif_carrier_off(netdev);
1971 	pf->flags |= OTX2_FLAG_INTF_DOWN;
1972 err_free_cints:
1973 	otx2_free_cints(pf, qidx);
1974 	vec = pci_irq_vector(pf->pdev,
1975 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1976 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1977 	free_irq(vec, pf);
1978 err_disable_napi:
1979 	otx2_disable_napi(pf);
1980 	otx2_free_hw_resources(pf);
1981 err_free_mem:
1982 	kfree(qset->sq);
1983 	kfree(qset->cq);
1984 	kfree(qset->rq);
1985 	kfree(qset->napi);
1986 	return err;
1987 }
1988 EXPORT_SYMBOL(otx2_open);
1989 
otx2_stop(struct net_device * netdev)1990 int otx2_stop(struct net_device *netdev)
1991 {
1992 	struct otx2_nic *pf = netdev_priv(netdev);
1993 	struct otx2_cq_poll *cq_poll = NULL;
1994 	struct otx2_qset *qset = &pf->qset;
1995 	struct otx2_rss_info *rss;
1996 	int qidx, vec, wrk;
1997 
1998 	/* If the DOWN flag is set resources are already freed */
1999 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
2000 		return 0;
2001 
2002 	netif_carrier_off(netdev);
2003 	netif_tx_stop_all_queues(netdev);
2004 
2005 	pf->flags |= OTX2_FLAG_INTF_DOWN;
2006 	/* 'intf_down' may be checked on any cpu */
2007 	smp_wmb();
2008 
2009 	/* First stop packet Rx/Tx */
2010 	otx2_rxtx_enable(pf, false);
2011 
2012 	/* Clear RSS enable flag */
2013 	rss = &pf->hw.rss_info;
2014 	rss->enable = false;
2015 	if (!netif_is_rxfh_configured(netdev))
2016 		kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
2017 
2018 	/* Cleanup Queue IRQ */
2019 	vec = pci_irq_vector(pf->pdev,
2020 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
2021 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
2022 	free_irq(vec, pf);
2023 
2024 	/* Cleanup CQ NAPI and IRQ */
2025 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
2026 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
2027 		/* Disable interrupt */
2028 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
2029 
2030 		synchronize_irq(pci_irq_vector(pf->pdev, vec));
2031 
2032 		cq_poll = &qset->napi[qidx];
2033 		napi_synchronize(&cq_poll->napi);
2034 		vec++;
2035 	}
2036 
2037 	netif_tx_disable(netdev);
2038 
2039 	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
2040 		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
2041 	devm_kfree(pf->dev, pf->refill_wrk);
2042 
2043 	otx2_free_hw_resources(pf);
2044 	otx2_free_cints(pf, pf->hw.cint_cnt);
2045 	otx2_disable_napi(pf);
2046 
2047 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
2048 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
2049 
2050 
2051 	kfree(qset->sq);
2052 	kfree(qset->cq);
2053 	kfree(qset->rq);
2054 	kfree(qset->napi);
2055 	/* Do not clear RQ/SQ ringsize settings */
2056 	memset_startat(qset, 0, sqe_cnt);
2057 	return 0;
2058 }
2059 EXPORT_SYMBOL(otx2_stop);
2060 
otx2_xmit(struct sk_buff * skb,struct net_device * netdev)2061 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
2062 {
2063 	struct otx2_nic *pf = netdev_priv(netdev);
2064 	int qidx = skb_get_queue_mapping(skb);
2065 	struct otx2_snd_queue *sq;
2066 	struct netdev_queue *txq;
2067 	int sq_idx;
2068 
2069 	/* XDP SQs are not mapped with TXQs
2070 	 * advance qid to derive correct sq mapped with QOS
2071 	 */
2072 	sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
2073 
2074 	/* Check for minimum and maximum packet length */
2075 	if (skb->len <= ETH_HLEN ||
2076 	    (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
2077 		dev_kfree_skb(skb);
2078 		return NETDEV_TX_OK;
2079 	}
2080 
2081 	sq = &pf->qset.sq[sq_idx];
2082 	txq = netdev_get_tx_queue(netdev, qidx);
2083 
2084 	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
2085 		netif_tx_stop_queue(txq);
2086 
2087 		/* Check again, incase SQBs got freed up */
2088 		smp_mb();
2089 		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
2090 							> sq->sqe_thresh)
2091 			netif_tx_wake_queue(txq);
2092 
2093 		return NETDEV_TX_BUSY;
2094 	}
2095 
2096 	return NETDEV_TX_OK;
2097 }
2098 
otx2_qos_select_htb_queue(struct otx2_nic * pf,struct sk_buff * skb,u16 htb_maj_id)2099 static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb,
2100 				     u16 htb_maj_id)
2101 {
2102 	u16 classid;
2103 
2104 	if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
2105 		classid = TC_H_MIN(skb->priority);
2106 	else
2107 		classid = READ_ONCE(pf->qos.defcls);
2108 
2109 	if (!classid)
2110 		return 0;
2111 
2112 	return otx2_get_txq_by_classid(pf, classid);
2113 }
2114 
otx2_select_queue(struct net_device * netdev,struct sk_buff * skb,struct net_device * sb_dev)2115 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
2116 		      struct net_device *sb_dev)
2117 {
2118 	struct otx2_nic *pf = netdev_priv(netdev);
2119 	bool qos_enabled;
2120 #ifdef CONFIG_DCB
2121 	u8 vlan_prio;
2122 #endif
2123 	int txq;
2124 
2125 	qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues;
2126 	if (unlikely(qos_enabled)) {
2127 		/* This smp_load_acquire() pairs with smp_store_release() in
2128 		 * otx2_qos_root_add() called from htb offload root creation
2129 		 */
2130 		u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id);
2131 
2132 		if (unlikely(htb_maj_id)) {
2133 			txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id);
2134 			if (txq > 0)
2135 				return txq;
2136 			goto process_pfc;
2137 		}
2138 	}
2139 
2140 process_pfc:
2141 #ifdef CONFIG_DCB
2142 	if (!skb_vlan_tag_present(skb))
2143 		goto pick_tx;
2144 
2145 	vlan_prio = skb->vlan_tci >> 13;
2146 	if ((vlan_prio > pf->hw.tx_queues - 1) ||
2147 	    !pf->pfc_alloc_status[vlan_prio])
2148 		goto pick_tx;
2149 
2150 	return vlan_prio;
2151 
2152 pick_tx:
2153 #endif
2154 	txq = netdev_pick_tx(netdev, skb, NULL);
2155 	if (unlikely(qos_enabled))
2156 		return txq % pf->hw.tx_queues;
2157 
2158 	return txq;
2159 }
2160 EXPORT_SYMBOL(otx2_select_queue);
2161 
otx2_fix_features(struct net_device * dev,netdev_features_t features)2162 static netdev_features_t otx2_fix_features(struct net_device *dev,
2163 					   netdev_features_t features)
2164 {
2165 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2166 		features |= NETIF_F_HW_VLAN_STAG_RX;
2167 	else
2168 		features &= ~NETIF_F_HW_VLAN_STAG_RX;
2169 
2170 	return features;
2171 }
2172 
otx2_set_rx_mode(struct net_device * netdev)2173 static void otx2_set_rx_mode(struct net_device *netdev)
2174 {
2175 	struct otx2_nic *pf = netdev_priv(netdev);
2176 
2177 	queue_work(pf->otx2_wq, &pf->rx_mode_work);
2178 }
2179 
otx2_rx_mode_wrk_handler(struct work_struct * work)2180 static void otx2_rx_mode_wrk_handler(struct work_struct *work)
2181 {
2182 	struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
2183 
2184 	otx2_do_set_rx_mode(pf);
2185 }
2186 
otx2_set_features(struct net_device * netdev,netdev_features_t features)2187 static int otx2_set_features(struct net_device *netdev,
2188 			     netdev_features_t features)
2189 {
2190 	netdev_features_t changed = features ^ netdev->features;
2191 	struct otx2_nic *pf = netdev_priv(netdev);
2192 
2193 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
2194 		return otx2_cgx_config_loopback(pf,
2195 						features & NETIF_F_LOOPBACK);
2196 
2197 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
2198 		return otx2_enable_rxvlan(pf,
2199 					  features & NETIF_F_HW_VLAN_CTAG_RX);
2200 
2201 	return otx2_handle_ntuple_tc_features(netdev, features);
2202 }
2203 
otx2_reset_task(struct work_struct * work)2204 static void otx2_reset_task(struct work_struct *work)
2205 {
2206 	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
2207 
2208 	if (!netif_running(pf->netdev))
2209 		return;
2210 
2211 	rtnl_lock();
2212 	otx2_stop(pf->netdev);
2213 	pf->reset_count++;
2214 	otx2_open(pf->netdev);
2215 	netif_trans_update(pf->netdev);
2216 	rtnl_unlock();
2217 }
2218 
otx2_config_hw_rx_tstamp(struct otx2_nic * pfvf,bool enable)2219 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
2220 {
2221 	struct msg_req *req;
2222 	int err;
2223 
2224 	if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
2225 		return 0;
2226 
2227 	mutex_lock(&pfvf->mbox.lock);
2228 	if (enable)
2229 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
2230 	else
2231 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
2232 	if (!req) {
2233 		mutex_unlock(&pfvf->mbox.lock);
2234 		return -ENOMEM;
2235 	}
2236 
2237 	err = otx2_sync_mbox_msg(&pfvf->mbox);
2238 	if (err) {
2239 		mutex_unlock(&pfvf->mbox.lock);
2240 		return err;
2241 	}
2242 
2243 	mutex_unlock(&pfvf->mbox.lock);
2244 	if (enable)
2245 		pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
2246 	else
2247 		pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
2248 	return 0;
2249 }
2250 
otx2_config_hw_tx_tstamp(struct otx2_nic * pfvf,bool enable)2251 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
2252 {
2253 	struct msg_req *req;
2254 	int err;
2255 
2256 	if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
2257 		return 0;
2258 
2259 	mutex_lock(&pfvf->mbox.lock);
2260 	if (enable)
2261 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
2262 	else
2263 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
2264 	if (!req) {
2265 		mutex_unlock(&pfvf->mbox.lock);
2266 		return -ENOMEM;
2267 	}
2268 
2269 	err = otx2_sync_mbox_msg(&pfvf->mbox);
2270 	if (err) {
2271 		mutex_unlock(&pfvf->mbox.lock);
2272 		return err;
2273 	}
2274 
2275 	mutex_unlock(&pfvf->mbox.lock);
2276 	if (enable)
2277 		pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
2278 	else
2279 		pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
2280 	return 0;
2281 }
2282 
otx2_config_hwtstamp(struct net_device * netdev,struct ifreq * ifr)2283 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
2284 {
2285 	struct otx2_nic *pfvf = netdev_priv(netdev);
2286 	struct hwtstamp_config config;
2287 
2288 	if (!pfvf->ptp)
2289 		return -ENODEV;
2290 
2291 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2292 		return -EFAULT;
2293 
2294 	switch (config.tx_type) {
2295 	case HWTSTAMP_TX_OFF:
2296 		if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
2297 			pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
2298 
2299 		cancel_delayed_work(&pfvf->ptp->synctstamp_work);
2300 		otx2_config_hw_tx_tstamp(pfvf, false);
2301 		break;
2302 	case HWTSTAMP_TX_ONESTEP_SYNC:
2303 		if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
2304 			return -ERANGE;
2305 		pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
2306 		schedule_delayed_work(&pfvf->ptp->synctstamp_work,
2307 				      msecs_to_jiffies(500));
2308 		fallthrough;
2309 	case HWTSTAMP_TX_ON:
2310 		otx2_config_hw_tx_tstamp(pfvf, true);
2311 		break;
2312 	default:
2313 		return -ERANGE;
2314 	}
2315 
2316 	switch (config.rx_filter) {
2317 	case HWTSTAMP_FILTER_NONE:
2318 		otx2_config_hw_rx_tstamp(pfvf, false);
2319 		break;
2320 	case HWTSTAMP_FILTER_ALL:
2321 	case HWTSTAMP_FILTER_SOME:
2322 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2323 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2324 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2325 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2326 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2327 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2328 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2329 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2330 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2331 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2332 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2333 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2334 		otx2_config_hw_rx_tstamp(pfvf, true);
2335 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2336 		break;
2337 	default:
2338 		return -ERANGE;
2339 	}
2340 
2341 	memcpy(&pfvf->tstamp, &config, sizeof(config));
2342 
2343 	return copy_to_user(ifr->ifr_data, &config,
2344 			    sizeof(config)) ? -EFAULT : 0;
2345 }
2346 EXPORT_SYMBOL(otx2_config_hwtstamp);
2347 
otx2_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)2348 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2349 {
2350 	struct otx2_nic *pfvf = netdev_priv(netdev);
2351 	struct hwtstamp_config *cfg = &pfvf->tstamp;
2352 
2353 	switch (cmd) {
2354 	case SIOCSHWTSTAMP:
2355 		return otx2_config_hwtstamp(netdev, req);
2356 	case SIOCGHWTSTAMP:
2357 		return copy_to_user(req->ifr_data, cfg,
2358 				    sizeof(*cfg)) ? -EFAULT : 0;
2359 	default:
2360 		return -EOPNOTSUPP;
2361 	}
2362 }
2363 EXPORT_SYMBOL(otx2_ioctl);
2364 
otx2_do_set_vf_mac(struct otx2_nic * pf,int vf,const u8 * mac)2365 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2366 {
2367 	struct npc_install_flow_req *req;
2368 	int err;
2369 
2370 	mutex_lock(&pf->mbox.lock);
2371 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2372 	if (!req) {
2373 		err = -ENOMEM;
2374 		goto out;
2375 	}
2376 
2377 	ether_addr_copy(req->packet.dmac, mac);
2378 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2379 	req->features = BIT_ULL(NPC_DMAC);
2380 	req->channel = pf->hw.rx_chan_base;
2381 	req->intf = NIX_INTF_RX;
2382 	req->default_rule = 1;
2383 	req->append = 1;
2384 	req->vf = vf + 1;
2385 	req->op = NIX_RX_ACTION_DEFAULT;
2386 
2387 	err = otx2_sync_mbox_msg(&pf->mbox);
2388 out:
2389 	mutex_unlock(&pf->mbox.lock);
2390 	return err;
2391 }
2392 
otx2_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2393 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2394 {
2395 	struct otx2_nic *pf = netdev_priv(netdev);
2396 	struct pci_dev *pdev = pf->pdev;
2397 	struct otx2_vf_config *config;
2398 	int ret;
2399 
2400 	if (!netif_running(netdev))
2401 		return -EAGAIN;
2402 
2403 	if (vf >= pf->total_vfs)
2404 		return -EINVAL;
2405 
2406 	if (!is_valid_ether_addr(mac))
2407 		return -EINVAL;
2408 
2409 	config = &pf->vf_configs[vf];
2410 	ether_addr_copy(config->mac, mac);
2411 
2412 	ret = otx2_do_set_vf_mac(pf, vf, mac);
2413 	if (ret == 0)
2414 		dev_info(&pdev->dev,
2415 			 "Load/Reload VF driver\n");
2416 
2417 	return ret;
2418 }
2419 
otx2_do_set_vf_vlan(struct otx2_nic * pf,int vf,u16 vlan,u8 qos,__be16 proto)2420 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2421 			       __be16 proto)
2422 {
2423 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2424 	struct nix_vtag_config_rsp *vtag_rsp;
2425 	struct npc_delete_flow_req *del_req;
2426 	struct nix_vtag_config *vtag_req;
2427 	struct npc_install_flow_req *req;
2428 	struct otx2_vf_config *config;
2429 	int err = 0;
2430 	u32 idx;
2431 
2432 	config = &pf->vf_configs[vf];
2433 
2434 	if (!vlan && !config->vlan)
2435 		goto out;
2436 
2437 	mutex_lock(&pf->mbox.lock);
2438 
2439 	/* free old tx vtag entry */
2440 	if (config->vlan) {
2441 		vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2442 		if (!vtag_req) {
2443 			err = -ENOMEM;
2444 			goto out;
2445 		}
2446 		vtag_req->cfg_type = 0;
2447 		vtag_req->tx.free_vtag0 = 1;
2448 		vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2449 
2450 		err = otx2_sync_mbox_msg(&pf->mbox);
2451 		if (err)
2452 			goto out;
2453 	}
2454 
2455 	if (!vlan && config->vlan) {
2456 		/* rx */
2457 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2458 		if (!del_req) {
2459 			err = -ENOMEM;
2460 			goto out;
2461 		}
2462 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2463 		del_req->entry =
2464 			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2465 		err = otx2_sync_mbox_msg(&pf->mbox);
2466 		if (err)
2467 			goto out;
2468 
2469 		/* tx */
2470 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2471 		if (!del_req) {
2472 			err = -ENOMEM;
2473 			goto out;
2474 		}
2475 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2476 		del_req->entry =
2477 			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2478 		err = otx2_sync_mbox_msg(&pf->mbox);
2479 
2480 		goto out;
2481 	}
2482 
2483 	/* rx */
2484 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2485 	if (!req) {
2486 		err = -ENOMEM;
2487 		goto out;
2488 	}
2489 
2490 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2491 	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2492 	req->packet.vlan_tci = htons(vlan);
2493 	req->mask.vlan_tci = htons(VLAN_VID_MASK);
2494 	/* af fills the destination mac addr */
2495 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2496 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2497 	req->channel = pf->hw.rx_chan_base;
2498 	req->intf = NIX_INTF_RX;
2499 	req->vf = vf + 1;
2500 	req->op = NIX_RX_ACTION_DEFAULT;
2501 	req->vtag0_valid = true;
2502 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2503 	req->set_cntr = 1;
2504 
2505 	err = otx2_sync_mbox_msg(&pf->mbox);
2506 	if (err)
2507 		goto out;
2508 
2509 	/* tx */
2510 	vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2511 	if (!vtag_req) {
2512 		err = -ENOMEM;
2513 		goto out;
2514 	}
2515 
2516 	/* configure tx vtag params */
2517 	vtag_req->vtag_size = VTAGSIZE_T4;
2518 	vtag_req->cfg_type = 0; /* tx vlan cfg */
2519 	vtag_req->tx.cfg_vtag0 = 1;
2520 	vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2521 
2522 	err = otx2_sync_mbox_msg(&pf->mbox);
2523 	if (err)
2524 		goto out;
2525 
2526 	vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2527 			(&pf->mbox.mbox, 0, &vtag_req->hdr);
2528 	if (IS_ERR(vtag_rsp)) {
2529 		err = PTR_ERR(vtag_rsp);
2530 		goto out;
2531 	}
2532 	config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2533 
2534 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2535 	if (!req) {
2536 		err = -ENOMEM;
2537 		goto out;
2538 	}
2539 
2540 	eth_zero_addr((u8 *)&req->mask.dmac);
2541 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2542 	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2543 	req->features = BIT_ULL(NPC_DMAC);
2544 	req->channel = pf->hw.tx_chan_base;
2545 	req->intf = NIX_INTF_TX;
2546 	req->vf = vf + 1;
2547 	req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2548 	req->vtag0_def = vtag_rsp->vtag0_idx;
2549 	req->vtag0_op = VTAG_INSERT;
2550 	req->set_cntr = 1;
2551 
2552 	err = otx2_sync_mbox_msg(&pf->mbox);
2553 out:
2554 	config->vlan = vlan;
2555 	mutex_unlock(&pf->mbox.lock);
2556 	return err;
2557 }
2558 
otx2_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2559 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2560 			    __be16 proto)
2561 {
2562 	struct otx2_nic *pf = netdev_priv(netdev);
2563 	struct pci_dev *pdev = pf->pdev;
2564 
2565 	if (!netif_running(netdev))
2566 		return -EAGAIN;
2567 
2568 	if (vf >= pci_num_vf(pdev))
2569 		return -EINVAL;
2570 
2571 	/* qos is currently unsupported */
2572 	if (vlan >= VLAN_N_VID || qos)
2573 		return -EINVAL;
2574 
2575 	if (proto != htons(ETH_P_8021Q))
2576 		return -EPROTONOSUPPORT;
2577 
2578 	if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2579 		return -EOPNOTSUPP;
2580 
2581 	return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2582 }
2583 
otx2_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivi)2584 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2585 			      struct ifla_vf_info *ivi)
2586 {
2587 	struct otx2_nic *pf = netdev_priv(netdev);
2588 	struct pci_dev *pdev = pf->pdev;
2589 	struct otx2_vf_config *config;
2590 
2591 	if (!netif_running(netdev))
2592 		return -EAGAIN;
2593 
2594 	if (vf >= pci_num_vf(pdev))
2595 		return -EINVAL;
2596 
2597 	config = &pf->vf_configs[vf];
2598 	ivi->vf = vf;
2599 	ether_addr_copy(ivi->mac, config->mac);
2600 	ivi->vlan = config->vlan;
2601 	ivi->trusted = config->trusted;
2602 
2603 	return 0;
2604 }
2605 
otx2_xdp_xmit_tx(struct otx2_nic * pf,struct xdp_frame * xdpf,int qidx)2606 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
2607 			    int qidx)
2608 {
2609 	struct page *page;
2610 	u64 dma_addr;
2611 	int err = 0;
2612 
2613 	dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
2614 				     offset_in_page(xdpf->data), xdpf->len,
2615 				     DMA_TO_DEVICE);
2616 	if (dma_mapping_error(pf->dev, dma_addr))
2617 		return -ENOMEM;
2618 
2619 	err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
2620 	if (!err) {
2621 		otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
2622 		page = virt_to_page(xdpf->data);
2623 		put_page(page);
2624 		return -ENOMEM;
2625 	}
2626 	return 0;
2627 }
2628 
otx2_xdp_xmit(struct net_device * netdev,int n,struct xdp_frame ** frames,u32 flags)2629 static int otx2_xdp_xmit(struct net_device *netdev, int n,
2630 			 struct xdp_frame **frames, u32 flags)
2631 {
2632 	struct otx2_nic *pf = netdev_priv(netdev);
2633 	int qidx = smp_processor_id();
2634 	struct otx2_snd_queue *sq;
2635 	int drops = 0, i;
2636 
2637 	if (!netif_running(netdev))
2638 		return -ENETDOWN;
2639 
2640 	qidx += pf->hw.tx_queues;
2641 	sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
2642 
2643 	/* Abort xmit if xdp queue is not */
2644 	if (unlikely(!sq))
2645 		return -ENXIO;
2646 
2647 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2648 		return -EINVAL;
2649 
2650 	for (i = 0; i < n; i++) {
2651 		struct xdp_frame *xdpf = frames[i];
2652 		int err;
2653 
2654 		err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
2655 		if (err)
2656 			drops++;
2657 	}
2658 	return n - drops;
2659 }
2660 
otx2_xdp_setup(struct otx2_nic * pf,struct bpf_prog * prog)2661 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
2662 {
2663 	struct net_device *dev = pf->netdev;
2664 	bool if_up = netif_running(pf->netdev);
2665 	struct bpf_prog *old_prog;
2666 
2667 	if (prog && dev->mtu > MAX_XDP_MTU) {
2668 		netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
2669 		return -EOPNOTSUPP;
2670 	}
2671 
2672 	if (if_up)
2673 		otx2_stop(pf->netdev);
2674 
2675 	old_prog = xchg(&pf->xdp_prog, prog);
2676 
2677 	if (old_prog)
2678 		bpf_prog_put(old_prog);
2679 
2680 	if (pf->xdp_prog)
2681 		bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
2682 
2683 	/* Network stack and XDP shared same rx queues.
2684 	 * Use separate tx queues for XDP and network stack.
2685 	 */
2686 	if (pf->xdp_prog) {
2687 		pf->hw.xdp_queues = pf->hw.rx_queues;
2688 		xdp_features_set_redirect_target(dev, false);
2689 	} else {
2690 		pf->hw.xdp_queues = 0;
2691 		xdp_features_clear_redirect_target(dev);
2692 	}
2693 
2694 	if (if_up)
2695 		otx2_open(pf->netdev);
2696 
2697 	return 0;
2698 }
2699 
otx2_xdp(struct net_device * netdev,struct netdev_bpf * xdp)2700 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2701 {
2702 	struct otx2_nic *pf = netdev_priv(netdev);
2703 
2704 	switch (xdp->command) {
2705 	case XDP_SETUP_PROG:
2706 		return otx2_xdp_setup(pf, xdp->prog);
2707 	default:
2708 		return -EINVAL;
2709 	}
2710 }
2711 
otx2_set_vf_permissions(struct otx2_nic * pf,int vf,int req_perm)2712 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2713 				   int req_perm)
2714 {
2715 	struct set_vf_perm *req;
2716 	int rc;
2717 
2718 	mutex_lock(&pf->mbox.lock);
2719 	req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2720 	if (!req) {
2721 		rc = -ENOMEM;
2722 		goto out;
2723 	}
2724 
2725 	/* Let AF reset VF permissions as sriov is disabled */
2726 	if (req_perm == OTX2_RESET_VF_PERM) {
2727 		req->flags |= RESET_VF_PERM;
2728 	} else if (req_perm == OTX2_TRUSTED_VF) {
2729 		if (pf->vf_configs[vf].trusted)
2730 			req->flags |= VF_TRUSTED;
2731 	}
2732 
2733 	req->vf = vf;
2734 	rc = otx2_sync_mbox_msg(&pf->mbox);
2735 out:
2736 	mutex_unlock(&pf->mbox.lock);
2737 	return rc;
2738 }
2739 
otx2_ndo_set_vf_trust(struct net_device * netdev,int vf,bool enable)2740 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2741 				 bool enable)
2742 {
2743 	struct otx2_nic *pf = netdev_priv(netdev);
2744 	struct pci_dev *pdev = pf->pdev;
2745 	int rc;
2746 
2747 	if (vf >= pci_num_vf(pdev))
2748 		return -EINVAL;
2749 
2750 	if (pf->vf_configs[vf].trusted == enable)
2751 		return 0;
2752 
2753 	pf->vf_configs[vf].trusted = enable;
2754 	rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2755 
2756 	if (rc) {
2757 		pf->vf_configs[vf].trusted = !enable;
2758 	} else {
2759 		netdev_info(pf->netdev, "VF %d is %strusted\n",
2760 			    vf, enable ? "" : "not ");
2761 		otx2_set_rx_mode(netdev);
2762 	}
2763 
2764 	return rc;
2765 }
2766 
2767 static const struct net_device_ops otx2_netdev_ops = {
2768 	.ndo_open		= otx2_open,
2769 	.ndo_stop		= otx2_stop,
2770 	.ndo_start_xmit		= otx2_xmit,
2771 	.ndo_select_queue	= otx2_select_queue,
2772 	.ndo_fix_features	= otx2_fix_features,
2773 	.ndo_set_mac_address    = otx2_set_mac_address,
2774 	.ndo_change_mtu		= otx2_change_mtu,
2775 	.ndo_set_rx_mode	= otx2_set_rx_mode,
2776 	.ndo_set_features	= otx2_set_features,
2777 	.ndo_tx_timeout		= otx2_tx_timeout,
2778 	.ndo_get_stats64	= otx2_get_stats64,
2779 	.ndo_eth_ioctl		= otx2_ioctl,
2780 	.ndo_set_vf_mac		= otx2_set_vf_mac,
2781 	.ndo_set_vf_vlan	= otx2_set_vf_vlan,
2782 	.ndo_get_vf_config	= otx2_get_vf_config,
2783 	.ndo_bpf		= otx2_xdp,
2784 	.ndo_xdp_xmit           = otx2_xdp_xmit,
2785 	.ndo_setup_tc		= otx2_setup_tc,
2786 	.ndo_set_vf_trust	= otx2_ndo_set_vf_trust,
2787 };
2788 
otx2_wq_init(struct otx2_nic * pf)2789 static int otx2_wq_init(struct otx2_nic *pf)
2790 {
2791 	pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2792 	if (!pf->otx2_wq)
2793 		return -ENOMEM;
2794 
2795 	INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
2796 	INIT_WORK(&pf->reset_task, otx2_reset_task);
2797 	return 0;
2798 }
2799 
otx2_check_pf_usable(struct otx2_nic * nic)2800 static int otx2_check_pf_usable(struct otx2_nic *nic)
2801 {
2802 	u64 rev;
2803 
2804 	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2805 	rev = (rev >> 12) & 0xFF;
2806 	/* Check if AF has setup revision for RVUM block,
2807 	 * otherwise this driver probe should be deferred
2808 	 * until AF driver comes up.
2809 	 */
2810 	if (!rev) {
2811 		dev_warn(nic->dev,
2812 			 "AF is not initialized, deferring probe\n");
2813 		return -EPROBE_DEFER;
2814 	}
2815 	return 0;
2816 }
2817 
otx2_realloc_msix_vectors(struct otx2_nic * pf)2818 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2819 {
2820 	struct otx2_hw *hw = &pf->hw;
2821 	int num_vec, err;
2822 
2823 	/* NPA interrupts are inot registered, so alloc only
2824 	 * upto NIX vector offset.
2825 	 */
2826 	num_vec = hw->nix_msixoff;
2827 	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2828 
2829 	otx2_disable_mbox_intr(pf);
2830 	pci_free_irq_vectors(hw->pdev);
2831 	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2832 	if (err < 0) {
2833 		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2834 			__func__, num_vec);
2835 		return err;
2836 	}
2837 
2838 	return otx2_register_mbox_intr(pf, false);
2839 }
2840 
otx2_sriov_vfcfg_init(struct otx2_nic * pf)2841 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2842 {
2843 	int i;
2844 
2845 	pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2846 				      sizeof(struct otx2_vf_config),
2847 				      GFP_KERNEL);
2848 	if (!pf->vf_configs)
2849 		return -ENOMEM;
2850 
2851 	for (i = 0; i < pf->total_vfs; i++) {
2852 		pf->vf_configs[i].pf = pf;
2853 		pf->vf_configs[i].intf_down = true;
2854 		pf->vf_configs[i].trusted = false;
2855 		INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2856 				  otx2_vf_link_event_task);
2857 	}
2858 
2859 	return 0;
2860 }
2861 
otx2_sriov_vfcfg_cleanup(struct otx2_nic * pf)2862 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2863 {
2864 	int i;
2865 
2866 	if (!pf->vf_configs)
2867 		return;
2868 
2869 	for (i = 0; i < pf->total_vfs; i++) {
2870 		cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2871 		otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2872 	}
2873 }
2874 
otx2_probe(struct pci_dev * pdev,const struct pci_device_id * id)2875 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2876 {
2877 	struct device *dev = &pdev->dev;
2878 	int err, qcount, qos_txqs;
2879 	struct net_device *netdev;
2880 	struct otx2_nic *pf;
2881 	struct otx2_hw *hw;
2882 	int num_vec;
2883 
2884 	err = pcim_enable_device(pdev);
2885 	if (err) {
2886 		dev_err(dev, "Failed to enable PCI device\n");
2887 		return err;
2888 	}
2889 
2890 	err = pci_request_regions(pdev, DRV_NAME);
2891 	if (err) {
2892 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
2893 		return err;
2894 	}
2895 
2896 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2897 	if (err) {
2898 		dev_err(dev, "DMA mask config failed, abort\n");
2899 		goto err_release_regions;
2900 	}
2901 
2902 	pci_set_master(pdev);
2903 
2904 	/* Set number of queues */
2905 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2906 	qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
2907 
2908 	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
2909 	if (!netdev) {
2910 		err = -ENOMEM;
2911 		goto err_release_regions;
2912 	}
2913 
2914 	pci_set_drvdata(pdev, netdev);
2915 	SET_NETDEV_DEV(netdev, &pdev->dev);
2916 	pf = netdev_priv(netdev);
2917 	pf->netdev = netdev;
2918 	pf->pdev = pdev;
2919 	pf->dev = dev;
2920 	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2921 	pf->flags |= OTX2_FLAG_INTF_DOWN;
2922 
2923 	hw = &pf->hw;
2924 	hw->pdev = pdev;
2925 	hw->rx_queues = qcount;
2926 	hw->tx_queues = qcount;
2927 	hw->non_qos_queues = qcount;
2928 	hw->max_queues = qcount;
2929 	hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
2930 	/* Use CQE of 128 byte descriptor size by default */
2931 	hw->xqe_size = 128;
2932 
2933 	num_vec = pci_msix_vec_count(pdev);
2934 	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2935 					  GFP_KERNEL);
2936 	if (!hw->irq_name) {
2937 		err = -ENOMEM;
2938 		goto err_free_netdev;
2939 	}
2940 
2941 	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2942 					 sizeof(cpumask_var_t), GFP_KERNEL);
2943 	if (!hw->affinity_mask) {
2944 		err = -ENOMEM;
2945 		goto err_free_netdev;
2946 	}
2947 
2948 	/* Map CSRs */
2949 	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2950 	if (!pf->reg_base) {
2951 		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2952 		err = -ENOMEM;
2953 		goto err_free_netdev;
2954 	}
2955 
2956 	err = otx2_check_pf_usable(pf);
2957 	if (err)
2958 		goto err_free_netdev;
2959 
2960 	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2961 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2962 	if (err < 0) {
2963 		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2964 			__func__, num_vec);
2965 		goto err_free_netdev;
2966 	}
2967 
2968 	otx2_setup_dev_hw_settings(pf);
2969 
2970 	/* Init PF <=> AF mailbox stuff */
2971 	err = otx2_pfaf_mbox_init(pf);
2972 	if (err)
2973 		goto err_free_irq_vectors;
2974 
2975 	/* Register mailbox interrupt */
2976 	err = otx2_register_mbox_intr(pf, true);
2977 	if (err)
2978 		goto err_mbox_destroy;
2979 
2980 	/* Request AF to attach NPA and NIX LFs to this PF.
2981 	 * NIX and NPA LFs are needed for this PF to function as a NIC.
2982 	 */
2983 	err = otx2_attach_npa_nix(pf);
2984 	if (err)
2985 		goto err_disable_mbox_intr;
2986 
2987 	err = otx2_realloc_msix_vectors(pf);
2988 	if (err)
2989 		goto err_detach_rsrc;
2990 
2991 	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2992 	if (err)
2993 		goto err_detach_rsrc;
2994 
2995 	err = cn10k_lmtst_init(pf);
2996 	if (err)
2997 		goto err_detach_rsrc;
2998 
2999 	/* Assign default mac address */
3000 	otx2_get_mac_from_af(netdev);
3001 
3002 	/* Don't check for error.  Proceed without ptp */
3003 	otx2_ptp_init(pf);
3004 
3005 	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
3006 	 * HW allocates buffer pointer from stack and uses it for DMA'ing
3007 	 * ingress packet. In some scenarios HW can free back allocated buffer
3008 	 * pointers to pool. This makes it impossible for SW to maintain a
3009 	 * parallel list where physical addresses of buffer pointers (IOVAs)
3010 	 * given to HW can be saved for later reference.
3011 	 *
3012 	 * So the only way to convert Rx packet's buffer address is to use
3013 	 * IOMMU's iova_to_phys() handler which translates the address by
3014 	 * walking through the translation tables.
3015 	 */
3016 	pf->iommu_domain = iommu_get_domain_for_dev(dev);
3017 
3018 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3019 			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
3020 			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3021 			       NETIF_F_GSO_UDP_L4);
3022 	netdev->features |= netdev->hw_features;
3023 
3024 	err = otx2_mcam_flow_init(pf);
3025 	if (err)
3026 		goto err_ptp_destroy;
3027 
3028 	err = cn10k_mcs_init(pf);
3029 	if (err)
3030 		goto err_del_mcam_entries;
3031 
3032 	if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
3033 		netdev->hw_features |= NETIF_F_NTUPLE;
3034 
3035 	if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
3036 		netdev->priv_flags |= IFF_UNICAST_FLT;
3037 
3038 	/* Support TSO on tag interface */
3039 	netdev->vlan_features |= netdev->features;
3040 	netdev->hw_features  |= NETIF_F_HW_VLAN_CTAG_TX |
3041 				NETIF_F_HW_VLAN_STAG_TX;
3042 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
3043 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
3044 				       NETIF_F_HW_VLAN_STAG_RX;
3045 	netdev->features |= netdev->hw_features;
3046 
3047 	/* HW supports tc offload but mutually exclusive with n-tuple filters */
3048 	if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
3049 		netdev->hw_features |= NETIF_F_HW_TC;
3050 
3051 	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
3052 
3053 	netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
3054 	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
3055 
3056 	netdev->netdev_ops = &otx2_netdev_ops;
3057 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
3058 
3059 	netdev->min_mtu = OTX2_MIN_MTU;
3060 	netdev->max_mtu = otx2_get_max_mtu(pf);
3061 
3062 	/* reset CGX/RPM MAC stats */
3063 	otx2_reset_mac_stats(pf);
3064 
3065 	err = register_netdev(netdev);
3066 	if (err) {
3067 		dev_err(dev, "Failed to register netdevice\n");
3068 		goto err_mcs_free;
3069 	}
3070 
3071 	err = otx2_wq_init(pf);
3072 	if (err)
3073 		goto err_unreg_netdev;
3074 
3075 	otx2_set_ethtool_ops(netdev);
3076 
3077 	err = otx2_init_tc(pf);
3078 	if (err)
3079 		goto err_mcam_flow_del;
3080 
3081 	err = otx2_register_dl(pf);
3082 	if (err)
3083 		goto err_mcam_flow_del;
3084 
3085 	/* Initialize SR-IOV resources */
3086 	err = otx2_sriov_vfcfg_init(pf);
3087 	if (err)
3088 		goto err_pf_sriov_init;
3089 
3090 	/* Enable link notifications */
3091 	otx2_cgx_config_linkevents(pf, true);
3092 
3093 #ifdef CONFIG_DCB
3094 	err = otx2_dcbnl_set_ops(netdev);
3095 	if (err)
3096 		goto err_pf_sriov_init;
3097 #endif
3098 
3099 	otx2_qos_init(pf, qos_txqs);
3100 
3101 	return 0;
3102 
3103 err_pf_sriov_init:
3104 	otx2_shutdown_tc(pf);
3105 err_mcam_flow_del:
3106 	otx2_mcam_flow_del(pf);
3107 err_unreg_netdev:
3108 	unregister_netdev(netdev);
3109 err_mcs_free:
3110 	cn10k_mcs_free(pf);
3111 err_del_mcam_entries:
3112 	otx2_mcam_flow_del(pf);
3113 err_ptp_destroy:
3114 	otx2_ptp_destroy(pf);
3115 err_detach_rsrc:
3116 	if (pf->hw.lmt_info)
3117 		free_percpu(pf->hw.lmt_info);
3118 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3119 		qmem_free(pf->dev, pf->dync_lmt);
3120 	otx2_detach_resources(&pf->mbox);
3121 err_disable_mbox_intr:
3122 	otx2_disable_mbox_intr(pf);
3123 err_mbox_destroy:
3124 	otx2_pfaf_mbox_destroy(pf);
3125 err_free_irq_vectors:
3126 	pci_free_irq_vectors(hw->pdev);
3127 err_free_netdev:
3128 	pci_set_drvdata(pdev, NULL);
3129 	free_netdev(netdev);
3130 err_release_regions:
3131 	pci_release_regions(pdev);
3132 	return err;
3133 }
3134 
otx2_vf_link_event_task(struct work_struct * work)3135 static void otx2_vf_link_event_task(struct work_struct *work)
3136 {
3137 	struct otx2_vf_config *config;
3138 	struct cgx_link_info_msg *req;
3139 	struct mbox_msghdr *msghdr;
3140 	struct delayed_work *dwork;
3141 	struct otx2_nic *pf;
3142 	int vf_idx;
3143 
3144 	config = container_of(work, struct otx2_vf_config,
3145 			      link_event_work.work);
3146 	vf_idx = config - config->pf->vf_configs;
3147 	pf = config->pf;
3148 
3149 	if (config->intf_down)
3150 		return;
3151 
3152 	mutex_lock(&pf->mbox.lock);
3153 
3154 	dwork = &config->link_event_work;
3155 
3156 	if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
3157 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
3158 		mutex_unlock(&pf->mbox.lock);
3159 		return;
3160 	}
3161 
3162 	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
3163 					 sizeof(*req), sizeof(struct msg_rsp));
3164 	if (!msghdr) {
3165 		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
3166 		mutex_unlock(&pf->mbox.lock);
3167 		return;
3168 	}
3169 
3170 	req = (struct cgx_link_info_msg *)msghdr;
3171 	req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
3172 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
3173 	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
3174 
3175 	otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
3176 
3177 	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
3178 
3179 	mutex_unlock(&pf->mbox.lock);
3180 }
3181 
otx2_sriov_enable(struct pci_dev * pdev,int numvfs)3182 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
3183 {
3184 	struct net_device *netdev = pci_get_drvdata(pdev);
3185 	struct otx2_nic *pf = netdev_priv(netdev);
3186 	int ret;
3187 
3188 	/* Init PF <=> VF mailbox stuff */
3189 	ret = otx2_pfvf_mbox_init(pf, numvfs);
3190 	if (ret)
3191 		return ret;
3192 
3193 	ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
3194 	if (ret)
3195 		goto free_mbox;
3196 
3197 	ret = otx2_pf_flr_init(pf, numvfs);
3198 	if (ret)
3199 		goto free_intr;
3200 
3201 	ret = otx2_register_flr_me_intr(pf, numvfs);
3202 	if (ret)
3203 		goto free_flr;
3204 
3205 	ret = pci_enable_sriov(pdev, numvfs);
3206 	if (ret)
3207 		goto free_flr_intr;
3208 
3209 	return numvfs;
3210 free_flr_intr:
3211 	otx2_disable_flr_me_intr(pf);
3212 free_flr:
3213 	otx2_flr_wq_destroy(pf);
3214 free_intr:
3215 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
3216 free_mbox:
3217 	otx2_pfvf_mbox_destroy(pf);
3218 	return ret;
3219 }
3220 
otx2_sriov_disable(struct pci_dev * pdev)3221 static int otx2_sriov_disable(struct pci_dev *pdev)
3222 {
3223 	struct net_device *netdev = pci_get_drvdata(pdev);
3224 	struct otx2_nic *pf = netdev_priv(netdev);
3225 	int numvfs = pci_num_vf(pdev);
3226 
3227 	if (!numvfs)
3228 		return 0;
3229 
3230 	pci_disable_sriov(pdev);
3231 
3232 	otx2_disable_flr_me_intr(pf);
3233 	otx2_flr_wq_destroy(pf);
3234 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
3235 	otx2_pfvf_mbox_destroy(pf);
3236 
3237 	return 0;
3238 }
3239 
otx2_sriov_configure(struct pci_dev * pdev,int numvfs)3240 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
3241 {
3242 	if (numvfs == 0)
3243 		return otx2_sriov_disable(pdev);
3244 	else
3245 		return otx2_sriov_enable(pdev, numvfs);
3246 }
3247 
otx2_ndc_sync(struct otx2_nic * pf)3248 static void otx2_ndc_sync(struct otx2_nic *pf)
3249 {
3250 	struct mbox *mbox = &pf->mbox;
3251 	struct ndc_sync_op *req;
3252 
3253 	mutex_lock(&mbox->lock);
3254 
3255 	req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
3256 	if (!req) {
3257 		mutex_unlock(&mbox->lock);
3258 		return;
3259 	}
3260 
3261 	req->nix_lf_tx_sync = 1;
3262 	req->nix_lf_rx_sync = 1;
3263 	req->npa_lf_sync = 1;
3264 
3265 	if (!otx2_sync_mbox_msg(mbox))
3266 		dev_err(pf->dev, "NDC sync operation failed\n");
3267 
3268 	mutex_unlock(&mbox->lock);
3269 }
3270 
otx2_remove(struct pci_dev * pdev)3271 static void otx2_remove(struct pci_dev *pdev)
3272 {
3273 	struct net_device *netdev = pci_get_drvdata(pdev);
3274 	struct otx2_nic *pf;
3275 
3276 	if (!netdev)
3277 		return;
3278 
3279 	pf = netdev_priv(netdev);
3280 
3281 	pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
3282 
3283 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
3284 		otx2_config_hw_tx_tstamp(pf, false);
3285 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
3286 		otx2_config_hw_rx_tstamp(pf, false);
3287 
3288 	/* Disable 802.3x pause frames */
3289 	if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
3290 	    (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
3291 		pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
3292 		pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
3293 		otx2_config_pause_frm(pf);
3294 	}
3295 
3296 #ifdef CONFIG_DCB
3297 	/* Disable PFC config */
3298 	if (pf->pfc_en) {
3299 		pf->pfc_en = 0;
3300 		otx2_config_priority_flow_ctrl(pf);
3301 	}
3302 #endif
3303 	cancel_work_sync(&pf->reset_task);
3304 	/* Disable link notifications */
3305 	otx2_cgx_config_linkevents(pf, false);
3306 
3307 	otx2_unregister_dl(pf);
3308 	unregister_netdev(netdev);
3309 	cn10k_mcs_free(pf);
3310 	otx2_sriov_disable(pf->pdev);
3311 	otx2_sriov_vfcfg_cleanup(pf);
3312 	if (pf->otx2_wq)
3313 		destroy_workqueue(pf->otx2_wq);
3314 
3315 	otx2_ptp_destroy(pf);
3316 	otx2_mcam_flow_del(pf);
3317 	otx2_shutdown_tc(pf);
3318 	otx2_shutdown_qos(pf);
3319 	otx2_ndc_sync(pf);
3320 	otx2_detach_resources(&pf->mbox);
3321 	if (pf->hw.lmt_info)
3322 		free_percpu(pf->hw.lmt_info);
3323 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3324 		qmem_free(pf->dev, pf->dync_lmt);
3325 	otx2_disable_mbox_intr(pf);
3326 	otx2_pfaf_mbox_destroy(pf);
3327 	pci_free_irq_vectors(pf->pdev);
3328 	pci_set_drvdata(pdev, NULL);
3329 	free_netdev(netdev);
3330 
3331 	pci_release_regions(pdev);
3332 }
3333 
3334 static struct pci_driver otx2_pf_driver = {
3335 	.name = DRV_NAME,
3336 	.id_table = otx2_pf_id_table,
3337 	.probe = otx2_probe,
3338 	.shutdown = otx2_remove,
3339 	.remove = otx2_remove,
3340 	.sriov_configure = otx2_sriov_configure
3341 };
3342 
otx2_rvupf_init_module(void)3343 static int __init otx2_rvupf_init_module(void)
3344 {
3345 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3346 
3347 	return pci_register_driver(&otx2_pf_driver);
3348 }
3349 
otx2_rvupf_cleanup_module(void)3350 static void __exit otx2_rvupf_cleanup_module(void)
3351 {
3352 	pci_unregister_driver(&otx2_pf_driver);
3353 }
3354 
3355 module_init(otx2_rvupf_init_module);
3356 module_exit(otx2_rvupf_cleanup_module);
3357