xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/etherdevice.h>
12 #include <linux/of.h>
13 #include <linux/if_vlan.h>
14 #include <linux/iommu.h>
15 #include <net/ip.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <linux/bitfield.h>
19 #include <net/page_pool/types.h>
20 
21 #include "otx2_reg.h"
22 #include "otx2_common.h"
23 #include "otx2_txrx.h"
24 #include "otx2_struct.h"
25 #include "otx2_ptp.h"
26 #include "cn10k.h"
27 #include "qos.h"
28 #include <rvu_trace.h>
29 
30 #define DRV_NAME	"rvu_nicpf"
31 #define DRV_STRING	"Marvell RVU NIC Physical Function Driver"
32 
33 /* Supported devices */
34 static const struct pci_device_id otx2_pf_id_table[] = {
35 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
36 	{ 0, }  /* end of table */
37 };
38 
39 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
40 MODULE_DESCRIPTION(DRV_STRING);
41 MODULE_LICENSE("GPL v2");
42 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
43 
44 static void otx2_vf_link_event_task(struct work_struct *work);
45 
46 enum {
47 	TYPE_PFAF,
48 	TYPE_PFVF,
49 };
50 
51 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
52 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
53 
54 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
55 {
56 	struct otx2_nic *pf = netdev_priv(netdev);
57 	bool if_up = netif_running(netdev);
58 	int err = 0;
59 
60 	if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
61 		netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
62 			    netdev->mtu);
63 		return -EINVAL;
64 	}
65 	if (if_up)
66 		otx2_stop(netdev);
67 
68 	netdev_info(netdev, "Changing MTU from %d to %d\n",
69 		    netdev->mtu, new_mtu);
70 	WRITE_ONCE(netdev->mtu, new_mtu);
71 
72 	if (if_up)
73 		err = otx2_open(netdev);
74 
75 	return err;
76 }
77 
78 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
79 {
80 	int irq, vfs = pf->total_vfs;
81 
82 	/* Disable VFs ME interrupts */
83 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
84 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
85 	free_irq(irq, pf);
86 
87 	/* Disable VFs FLR interrupts */
88 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
89 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
90 	free_irq(irq, pf);
91 
92 	if (vfs <= 64)
93 		return;
94 
95 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
96 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
97 	free_irq(irq, pf);
98 
99 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
100 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
101 	free_irq(irq, pf);
102 }
103 
104 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
105 {
106 	if (!pf->flr_wq)
107 		return;
108 	destroy_workqueue(pf->flr_wq);
109 	pf->flr_wq = NULL;
110 	devm_kfree(pf->dev, pf->flr_wrk);
111 }
112 
113 static void otx2_flr_handler(struct work_struct *work)
114 {
115 	struct flr_work *flrwork = container_of(work, struct flr_work, work);
116 	struct otx2_nic *pf = flrwork->pf;
117 	struct mbox *mbox = &pf->mbox;
118 	struct msg_req *req;
119 	int vf, reg = 0;
120 
121 	vf = flrwork - pf->flr_wrk;
122 
123 	mutex_lock(&mbox->lock);
124 	req = otx2_mbox_alloc_msg_vf_flr(mbox);
125 	if (!req) {
126 		mutex_unlock(&mbox->lock);
127 		return;
128 	}
129 	req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
130 	req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
131 
132 	if (!otx2_sync_mbox_msg(&pf->mbox)) {
133 		if (vf >= 64) {
134 			reg = 1;
135 			vf = vf - 64;
136 		}
137 		/* clear transcation pending bit */
138 		otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
139 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
140 	}
141 
142 	mutex_unlock(&mbox->lock);
143 }
144 
145 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
146 {
147 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
148 	int reg, dev, vf, start_vf, num_reg = 1;
149 	u64 intr;
150 
151 	if (pf->total_vfs > 64)
152 		num_reg = 2;
153 
154 	for (reg = 0; reg < num_reg; reg++) {
155 		intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
156 		if (!intr)
157 			continue;
158 		start_vf = 64 * reg;
159 		for (vf = 0; vf < 64; vf++) {
160 			if (!(intr & BIT_ULL(vf)))
161 				continue;
162 			dev = vf + start_vf;
163 			queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
164 			/* Clear interrupt */
165 			otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
166 			/* Disable the interrupt */
167 			otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
168 				     BIT_ULL(vf));
169 		}
170 	}
171 	return IRQ_HANDLED;
172 }
173 
174 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
175 {
176 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
177 	int vf, reg, num_reg = 1;
178 	u64 intr;
179 
180 	if (pf->total_vfs > 64)
181 		num_reg = 2;
182 
183 	for (reg = 0; reg < num_reg; reg++) {
184 		intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
185 		if (!intr)
186 			continue;
187 		for (vf = 0; vf < 64; vf++) {
188 			if (!(intr & BIT_ULL(vf)))
189 				continue;
190 			/* clear trpend bit */
191 			otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
192 			/* clear interrupt */
193 			otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
194 		}
195 	}
196 	return IRQ_HANDLED;
197 }
198 
199 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
200 {
201 	struct otx2_hw *hw = &pf->hw;
202 	char *irq_name;
203 	int ret;
204 
205 	/* Register ME interrupt handler*/
206 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
207 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
208 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
209 			  otx2_pf_me_intr_handler, 0, irq_name, pf);
210 	if (ret) {
211 		dev_err(pf->dev,
212 			"RVUPF: IRQ registration failed for ME0\n");
213 	}
214 
215 	/* Register FLR interrupt handler */
216 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
217 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
218 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
219 			  otx2_pf_flr_intr_handler, 0, irq_name, pf);
220 	if (ret) {
221 		dev_err(pf->dev,
222 			"RVUPF: IRQ registration failed for FLR0\n");
223 		return ret;
224 	}
225 
226 	if (numvfs > 64) {
227 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
228 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
229 			 rvu_get_pf(pf->pcifunc));
230 		ret = request_irq(pci_irq_vector
231 				  (pf->pdev, RVU_PF_INT_VEC_VFME1),
232 				  otx2_pf_me_intr_handler, 0, irq_name, pf);
233 		if (ret) {
234 			dev_err(pf->dev,
235 				"RVUPF: IRQ registration failed for ME1\n");
236 		}
237 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
238 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
239 			 rvu_get_pf(pf->pcifunc));
240 		ret = request_irq(pci_irq_vector
241 				  (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
242 				  otx2_pf_flr_intr_handler, 0, irq_name, pf);
243 		if (ret) {
244 			dev_err(pf->dev,
245 				"RVUPF: IRQ registration failed for FLR1\n");
246 			return ret;
247 		}
248 	}
249 
250 	/* Enable ME interrupt for all VFs*/
251 	otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
252 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
253 
254 	/* Enable FLR interrupt for all VFs*/
255 	otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
256 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
257 
258 	if (numvfs > 64) {
259 		numvfs -= 64;
260 
261 		otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
262 		otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
263 			     INTR_MASK(numvfs));
264 
265 		otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
266 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
267 			     INTR_MASK(numvfs));
268 	}
269 	return 0;
270 }
271 
272 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
273 {
274 	int vf;
275 
276 	pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
277 	if (!pf->flr_wq)
278 		return -ENOMEM;
279 
280 	pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
281 				   sizeof(struct flr_work), GFP_KERNEL);
282 	if (!pf->flr_wrk) {
283 		destroy_workqueue(pf->flr_wq);
284 		return -ENOMEM;
285 	}
286 
287 	for (vf = 0; vf < num_vfs; vf++) {
288 		pf->flr_wrk[vf].pf = pf;
289 		INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
290 	}
291 
292 	return 0;
293 }
294 
295 static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
296 			       int first, int mdevs, u64 intr)
297 {
298 	struct otx2_mbox_dev *mdev;
299 	struct otx2_mbox *mbox;
300 	struct mbox_hdr *hdr;
301 	int i;
302 
303 	for (i = first; i < mdevs; i++) {
304 		/* start from 0 */
305 		if (!(intr & BIT_ULL(i - first)))
306 			continue;
307 
308 		mbox = &mw->mbox;
309 		mdev = &mbox->dev[i];
310 		hdr = mdev->mbase + mbox->rx_start;
311 		/* The hdr->num_msgs is set to zero immediately in the interrupt
312 		 * handler to ensure that it holds a correct value next time
313 		 * when the interrupt handler is called. pf->mw[i].num_msgs
314 		 * holds the data for use in otx2_pfvf_mbox_handler and
315 		 * pf->mw[i].up_num_msgs holds the data for use in
316 		 * otx2_pfvf_mbox_up_handler.
317 		 */
318 		if (hdr->num_msgs) {
319 			mw[i].num_msgs = hdr->num_msgs;
320 			hdr->num_msgs = 0;
321 			queue_work(mbox_wq, &mw[i].mbox_wrk);
322 		}
323 
324 		mbox = &mw->mbox_up;
325 		mdev = &mbox->dev[i];
326 		hdr = mdev->mbase + mbox->rx_start;
327 		if (hdr->num_msgs) {
328 			mw[i].up_num_msgs = hdr->num_msgs;
329 			hdr->num_msgs = 0;
330 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
331 		}
332 	}
333 }
334 
335 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
336 				  struct otx2_mbox *pfvf_mbox, void *bbuf_base,
337 				  int devid)
338 {
339 	struct otx2_mbox_dev *src_mdev = mdev;
340 	int offset;
341 
342 	/* Msgs are already copied, trigger VF's mbox irq */
343 	smp_wmb();
344 
345 	otx2_mbox_wait_for_zero(pfvf_mbox, devid);
346 
347 	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
348 	writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
349 
350 	/* Restore VF's mbox bounce buffer region address */
351 	src_mdev->mbase = bbuf_base;
352 }
353 
354 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
355 				     struct otx2_mbox *src_mbox,
356 				     int dir, int vf, int num_msgs)
357 {
358 	struct otx2_mbox_dev *src_mdev, *dst_mdev;
359 	struct mbox_hdr *mbox_hdr;
360 	struct mbox_hdr *req_hdr;
361 	struct mbox *dst_mbox;
362 	int dst_size, err;
363 
364 	if (dir == MBOX_DIR_PFAF) {
365 		/* Set VF's mailbox memory as PF's bounce buffer memory, so
366 		 * that explicit copying of VF's msgs to PF=>AF mbox region
367 		 * and AF=>PF responses to VF's mbox region can be avoided.
368 		 */
369 		src_mdev = &src_mbox->dev[vf];
370 		mbox_hdr = src_mbox->hwbase +
371 				src_mbox->rx_start + (vf * MBOX_SIZE);
372 
373 		dst_mbox = &pf->mbox;
374 		dst_size = dst_mbox->mbox.tx_size -
375 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
376 		/* Check if msgs fit into destination area and has valid size */
377 		if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
378 			return -EINVAL;
379 
380 		dst_mdev = &dst_mbox->mbox.dev[0];
381 
382 		mutex_lock(&pf->mbox.lock);
383 		dst_mdev->mbase = src_mdev->mbase;
384 		dst_mdev->msg_size = mbox_hdr->msg_size;
385 		dst_mdev->num_msgs = num_msgs;
386 		err = otx2_sync_mbox_msg(dst_mbox);
387 		/* Error code -EIO indicate there is a communication failure
388 		 * to the AF. Rest of the error codes indicate that AF processed
389 		 * VF messages and set the error codes in response messages
390 		 * (if any) so simply forward responses to VF.
391 		 */
392 		if (err == -EIO) {
393 			dev_warn(pf->dev,
394 				 "AF not responding to VF%d messages\n", vf);
395 			/* restore PF mbase and exit */
396 			dst_mdev->mbase = pf->mbox.bbuf_base;
397 			mutex_unlock(&pf->mbox.lock);
398 			return err;
399 		}
400 		/* At this point, all the VF messages sent to AF are acked
401 		 * with proper responses and responses are copied to VF
402 		 * mailbox hence raise interrupt to VF.
403 		 */
404 		req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
405 					      dst_mbox->mbox.rx_start);
406 		req_hdr->num_msgs = num_msgs;
407 
408 		otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
409 				      pf->mbox.bbuf_base, vf);
410 		mutex_unlock(&pf->mbox.lock);
411 	} else if (dir == MBOX_DIR_PFVF_UP) {
412 		src_mdev = &src_mbox->dev[0];
413 		mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
414 		req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
415 					      src_mbox->rx_start);
416 		req_hdr->num_msgs = num_msgs;
417 
418 		dst_mbox = &pf->mbox_pfvf[0];
419 		dst_size = dst_mbox->mbox_up.tx_size -
420 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
421 		/* Check if msgs fit into destination area */
422 		if (mbox_hdr->msg_size > dst_size)
423 			return -EINVAL;
424 
425 		dst_mdev = &dst_mbox->mbox_up.dev[vf];
426 		dst_mdev->mbase = src_mdev->mbase;
427 		dst_mdev->msg_size = mbox_hdr->msg_size;
428 		dst_mdev->num_msgs = mbox_hdr->num_msgs;
429 		err = otx2_sync_mbox_up_msg(dst_mbox, vf);
430 		if (err) {
431 			dev_warn(pf->dev,
432 				 "VF%d is not responding to mailbox\n", vf);
433 			return err;
434 		}
435 	} else if (dir == MBOX_DIR_VFPF_UP) {
436 		req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
437 					      src_mbox->rx_start);
438 		req_hdr->num_msgs = num_msgs;
439 		otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
440 				      &pf->mbox.mbox_up,
441 				      pf->mbox_pfvf[vf].bbuf_base,
442 				      0);
443 	}
444 
445 	return 0;
446 }
447 
448 static void otx2_pfvf_mbox_handler(struct work_struct *work)
449 {
450 	struct mbox_msghdr *msg = NULL;
451 	int offset, vf_idx, id, err;
452 	struct otx2_mbox_dev *mdev;
453 	struct otx2_mbox *mbox;
454 	struct mbox *vf_mbox;
455 	struct otx2_nic *pf;
456 
457 	vf_mbox = container_of(work, struct mbox, mbox_wrk);
458 	pf = vf_mbox->pfvf;
459 	vf_idx = vf_mbox - pf->mbox_pfvf;
460 
461 	mbox = &pf->mbox_pfvf[0].mbox;
462 	mdev = &mbox->dev[vf_idx];
463 
464 	offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
465 
466 	for (id = 0; id < vf_mbox->num_msgs; id++) {
467 		msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
468 					     offset);
469 
470 		if (msg->sig != OTX2_MBOX_REQ_SIG)
471 			goto inval_msg;
472 
473 		/* Set VF's number in each of the msg */
474 		msg->pcifunc &= RVU_PFVF_FUNC_MASK;
475 		msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
476 		offset = msg->next_msgoff;
477 	}
478 	err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
479 					vf_mbox->num_msgs);
480 	if (err)
481 		goto inval_msg;
482 	return;
483 
484 inval_msg:
485 	otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
486 	otx2_mbox_msg_send(mbox, vf_idx);
487 }
488 
489 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
490 {
491 	struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
492 	struct otx2_nic *pf = vf_mbox->pfvf;
493 	struct otx2_mbox_dev *mdev;
494 	int offset, id, vf_idx = 0;
495 	struct mbox_msghdr *msg;
496 	struct otx2_mbox *mbox;
497 
498 	vf_idx = vf_mbox - pf->mbox_pfvf;
499 	mbox = &pf->mbox_pfvf[0].mbox_up;
500 	mdev = &mbox->dev[vf_idx];
501 
502 	offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
503 
504 	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
505 		msg = mdev->mbase + offset;
506 
507 		if (msg->id >= MBOX_MSG_MAX) {
508 			dev_err(pf->dev,
509 				"Mbox msg with unknown ID 0x%x\n", msg->id);
510 			goto end;
511 		}
512 
513 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
514 			dev_err(pf->dev,
515 				"Mbox msg with wrong signature %x, ID 0x%x\n",
516 				msg->sig, msg->id);
517 			goto end;
518 		}
519 
520 		switch (msg->id) {
521 		case MBOX_MSG_CGX_LINK_EVENT:
522 		case MBOX_MSG_REP_EVENT_UP_NOTIFY:
523 			break;
524 		default:
525 			if (msg->rc)
526 				dev_err(pf->dev,
527 					"Mbox msg response has err %d, ID 0x%x\n",
528 					msg->rc, msg->id);
529 			break;
530 		}
531 
532 end:
533 		offset = mbox->rx_start + msg->next_msgoff;
534 		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
535 			__otx2_mbox_reset(mbox, vf_idx);
536 		mdev->msgs_acked++;
537 	}
538 }
539 
540 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
541 {
542 	struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
543 	int vfs = pf->total_vfs;
544 	struct mbox *mbox;
545 	u64 intr;
546 
547 	mbox = pf->mbox_pfvf;
548 	/* Handle VF interrupts */
549 	if (vfs > 64) {
550 		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
551 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
552 		otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
553 		if (intr)
554 			trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
555 		vfs = 64;
556 	}
557 
558 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
559 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
560 
561 	otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
562 
563 	if (intr)
564 		trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
565 
566 	return IRQ_HANDLED;
567 }
568 
569 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
570 {
571 	void __iomem *hwbase;
572 	struct mbox *mbox;
573 	int err, vf;
574 	u64 base;
575 
576 	if (!numvfs)
577 		return -EINVAL;
578 
579 	pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
580 				     sizeof(struct mbox), GFP_KERNEL);
581 	if (!pf->mbox_pfvf)
582 		return -ENOMEM;
583 
584 	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
585 					   WQ_UNBOUND | WQ_HIGHPRI |
586 					   WQ_MEM_RECLAIM, 0);
587 	if (!pf->mbox_pfvf_wq)
588 		return -ENOMEM;
589 
590 	/* On CN10K platform, PF <-> VF mailbox region follows after
591 	 * PF <-> AF mailbox region.
592 	 */
593 	if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
594 		base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
595 		       MBOX_SIZE;
596 	else
597 		base = readq((void __iomem *)((u64)pf->reg_base +
598 					      RVU_PF_VF_BAR4_ADDR));
599 
600 	hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
601 	if (!hwbase) {
602 		err = -ENOMEM;
603 		goto free_wq;
604 	}
605 
606 	mbox = &pf->mbox_pfvf[0];
607 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
608 			     MBOX_DIR_PFVF, numvfs);
609 	if (err)
610 		goto free_iomem;
611 
612 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
613 			     MBOX_DIR_PFVF_UP, numvfs);
614 	if (err)
615 		goto free_iomem;
616 
617 	for (vf = 0; vf < numvfs; vf++) {
618 		mbox->pfvf = pf;
619 		INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
620 		INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
621 		mbox++;
622 	}
623 
624 	return 0;
625 
626 free_iomem:
627 	if (hwbase)
628 		iounmap(hwbase);
629 free_wq:
630 	destroy_workqueue(pf->mbox_pfvf_wq);
631 	return err;
632 }
633 
634 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
635 {
636 	struct mbox *mbox = &pf->mbox_pfvf[0];
637 
638 	if (!mbox)
639 		return;
640 
641 	if (pf->mbox_pfvf_wq) {
642 		destroy_workqueue(pf->mbox_pfvf_wq);
643 		pf->mbox_pfvf_wq = NULL;
644 	}
645 
646 	if (mbox->mbox.hwbase)
647 		iounmap(mbox->mbox.hwbase);
648 
649 	otx2_mbox_destroy(&mbox->mbox);
650 }
651 
652 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
653 {
654 	/* Clear PF <=> VF mailbox IRQ */
655 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
656 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
657 
658 	/* Enable PF <=> VF mailbox IRQ */
659 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
660 	if (numvfs > 64) {
661 		numvfs -= 64;
662 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
663 			     INTR_MASK(numvfs));
664 	}
665 }
666 
667 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
668 {
669 	int vector;
670 
671 	/* Disable PF <=> VF mailbox IRQ */
672 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
673 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
674 
675 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
676 	vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
677 	free_irq(vector, pf);
678 
679 	if (numvfs > 64) {
680 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
681 		vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
682 		free_irq(vector, pf);
683 	}
684 }
685 
686 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
687 {
688 	struct otx2_hw *hw = &pf->hw;
689 	char *irq_name;
690 	int err;
691 
692 	/* Register MBOX0 interrupt handler */
693 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
694 	if (pf->pcifunc)
695 		snprintf(irq_name, NAME_SIZE,
696 			 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
697 	else
698 		snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
699 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
700 			  otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
701 	if (err) {
702 		dev_err(pf->dev,
703 			"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
704 		return err;
705 	}
706 
707 	if (numvfs > 64) {
708 		/* Register MBOX1 interrupt handler */
709 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
710 		if (pf->pcifunc)
711 			snprintf(irq_name, NAME_SIZE,
712 				 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
713 		else
714 			snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
715 		err = request_irq(pci_irq_vector(pf->pdev,
716 						 RVU_PF_INT_VEC_VFPF_MBOX1),
717 						 otx2_pfvf_mbox_intr_handler,
718 						 0, irq_name, pf);
719 		if (err) {
720 			dev_err(pf->dev,
721 				"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
722 			return err;
723 		}
724 	}
725 
726 	otx2_enable_pfvf_mbox_intr(pf, numvfs);
727 
728 	return 0;
729 }
730 
731 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
732 				       struct mbox_msghdr *msg)
733 {
734 	int devid;
735 
736 	if (msg->id >= MBOX_MSG_MAX) {
737 		dev_err(pf->dev,
738 			"Mbox msg with unknown ID 0x%x\n", msg->id);
739 		return;
740 	}
741 
742 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
743 		dev_err(pf->dev,
744 			"Mbox msg with wrong signature %x, ID 0x%x\n",
745 			 msg->sig, msg->id);
746 		return;
747 	}
748 
749 	/* message response heading VF */
750 	devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
751 	if (devid) {
752 		struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
753 		struct delayed_work *dwork;
754 
755 		switch (msg->id) {
756 		case MBOX_MSG_NIX_LF_START_RX:
757 			config->intf_down = false;
758 			dwork = &config->link_event_work;
759 			schedule_delayed_work(dwork, msecs_to_jiffies(100));
760 			break;
761 		case MBOX_MSG_NIX_LF_STOP_RX:
762 			config->intf_down = true;
763 			break;
764 		}
765 
766 		return;
767 	}
768 
769 	switch (msg->id) {
770 	case MBOX_MSG_READY:
771 		pf->pcifunc = msg->pcifunc;
772 		break;
773 	case MBOX_MSG_MSIX_OFFSET:
774 		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
775 		break;
776 	case MBOX_MSG_NPA_LF_ALLOC:
777 		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
778 		break;
779 	case MBOX_MSG_NIX_LF_ALLOC:
780 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
781 		break;
782 	case MBOX_MSG_NIX_BP_ENABLE:
783 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
784 		break;
785 	case MBOX_MSG_CGX_STATS:
786 		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
787 		break;
788 	case MBOX_MSG_CGX_FEC_STATS:
789 		mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
790 		break;
791 	default:
792 		if (msg->rc)
793 			dev_err(pf->dev,
794 				"Mbox msg response has err %d, ID 0x%x\n",
795 				msg->rc, msg->id);
796 		break;
797 	}
798 }
799 
800 static void otx2_pfaf_mbox_handler(struct work_struct *work)
801 {
802 	struct otx2_mbox_dev *mdev;
803 	struct mbox_hdr *rsp_hdr;
804 	struct mbox_msghdr *msg;
805 	struct otx2_mbox *mbox;
806 	struct mbox *af_mbox;
807 	struct otx2_nic *pf;
808 	int offset, id;
809 	u16 num_msgs;
810 
811 	af_mbox = container_of(work, struct mbox, mbox_wrk);
812 	mbox = &af_mbox->mbox;
813 	mdev = &mbox->dev[0];
814 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
815 	num_msgs = rsp_hdr->num_msgs;
816 
817 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
818 	pf = af_mbox->pfvf;
819 
820 	for (id = 0; id < num_msgs; id++) {
821 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
822 		otx2_process_pfaf_mbox_msg(pf, msg);
823 		offset = mbox->rx_start + msg->next_msgoff;
824 		if (mdev->msgs_acked == (num_msgs - 1))
825 			__otx2_mbox_reset(mbox, 0);
826 		mdev->msgs_acked++;
827 	}
828 
829 }
830 
831 static void otx2_handle_link_event(struct otx2_nic *pf)
832 {
833 	struct cgx_link_user_info *linfo = &pf->linfo;
834 	struct net_device *netdev = pf->netdev;
835 
836 	if (pf->flags & OTX2_FLAG_PORT_UP)
837 		return;
838 
839 	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
840 		linfo->link_up ? "UP" : "DOWN", linfo->speed,
841 		linfo->full_duplex ? "Full" : "Half");
842 	if (linfo->link_up) {
843 		netif_carrier_on(netdev);
844 		netif_tx_start_all_queues(netdev);
845 	} else {
846 		netif_tx_stop_all_queues(netdev);
847 		netif_carrier_off(netdev);
848 	}
849 }
850 
851 static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf,
852 						    struct rep_event *info,
853 						    struct msg_rsp *rsp)
854 {
855 	struct net_device *netdev = pf->netdev;
856 
857 	if (info->event == RVU_EVENT_MTU_CHANGE) {
858 		netdev->mtu = info->evt_data.mtu;
859 		return 0;
860 	}
861 
862 	if (info->event == RVU_EVENT_PORT_STATE) {
863 		if (info->evt_data.port_state) {
864 			pf->flags |= OTX2_FLAG_PORT_UP;
865 			netif_carrier_on(netdev);
866 			netif_tx_start_all_queues(netdev);
867 		} else {
868 			pf->flags &= ~OTX2_FLAG_PORT_UP;
869 			netif_tx_stop_all_queues(netdev);
870 			netif_carrier_off(netdev);
871 		}
872 		return 0;
873 	}
874 #ifdef CONFIG_RVU_ESWITCH
875 	rvu_event_up_notify(pf, info);
876 #endif
877 	return 0;
878 }
879 
880 int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
881 					 struct mcs_intr_info *event,
882 					 struct msg_rsp *rsp)
883 {
884 	cn10k_handle_mcs_event(pf, event);
885 
886 	return 0;
887 }
888 
889 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
890 					struct cgx_link_info_msg *msg,
891 					struct msg_rsp *rsp)
892 {
893 	int i;
894 
895 	/* Copy the link info sent by AF */
896 	pf->linfo = msg->link_info;
897 
898 	/* notify VFs about link event */
899 	for (i = 0; i < pci_num_vf(pf->pdev); i++) {
900 		struct otx2_vf_config *config = &pf->vf_configs[i];
901 		struct delayed_work *dwork = &config->link_event_work;
902 
903 		if (config->intf_down)
904 			continue;
905 
906 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
907 	}
908 
909 	/* interface has not been fully configured yet */
910 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
911 		return 0;
912 
913 	otx2_handle_link_event(pf);
914 	return 0;
915 }
916 
917 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
918 				    struct mbox_msghdr *req)
919 {
920 	/* Check if valid, if not reply with a invalid msg */
921 	if (req->sig != OTX2_MBOX_REQ_SIG) {
922 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
923 		return -ENODEV;
924 	}
925 
926 	switch (req->id) {
927 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
928 	case _id: {							\
929 		struct _rsp_type *rsp;					\
930 		int err;						\
931 									\
932 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
933 			&pf->mbox.mbox_up, 0,				\
934 			sizeof(struct _rsp_type));			\
935 		if (!rsp)						\
936 			return -ENOMEM;					\
937 									\
938 		rsp->hdr.id = _id;					\
939 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
940 		rsp->hdr.pcifunc = 0;					\
941 		rsp->hdr.rc = 0;					\
942 									\
943 		err = otx2_mbox_up_handler_ ## _fn_name(		\
944 			pf, (struct _req_type *)req, rsp);		\
945 		return err;						\
946 	}
947 MBOX_UP_CGX_MESSAGES
948 MBOX_UP_MCS_MESSAGES
949 MBOX_UP_REP_MESSAGES
950 #undef M
951 		break;
952 	default:
953 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
954 		return -ENODEV;
955 	}
956 	return 0;
957 }
958 
959 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
960 {
961 	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
962 	struct otx2_mbox *mbox = &af_mbox->mbox_up;
963 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
964 	struct otx2_nic *pf = af_mbox->pfvf;
965 	int offset, id, devid = 0;
966 	struct mbox_hdr *rsp_hdr;
967 	struct mbox_msghdr *msg;
968 	u16 num_msgs;
969 
970 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
971 	num_msgs = rsp_hdr->num_msgs;
972 
973 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
974 
975 	for (id = 0; id < num_msgs; id++) {
976 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
977 
978 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
979 		/* Skip processing VF's messages */
980 		if (!devid)
981 			otx2_process_mbox_msg_up(pf, msg);
982 		offset = mbox->rx_start + msg->next_msgoff;
983 	}
984 	/* Forward to VF iff VFs are really present */
985 	if (devid && pci_num_vf(pf->pdev)) {
986 		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
987 					  MBOX_DIR_PFVF_UP, devid - 1,
988 					  num_msgs);
989 		return;
990 	}
991 
992 	otx2_mbox_msg_send(mbox, 0);
993 }
994 
995 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
996 {
997 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
998 	struct mbox *mw = &pf->mbox;
999 	struct otx2_mbox_dev *mdev;
1000 	struct otx2_mbox *mbox;
1001 	struct mbox_hdr *hdr;
1002 	u64 mbox_data;
1003 
1004 	/* Clear the IRQ */
1005 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1006 
1007 
1008 	mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
1009 
1010 	if (mbox_data & MBOX_UP_MSG) {
1011 		mbox_data &= ~MBOX_UP_MSG;
1012 		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
1013 
1014 		mbox = &mw->mbox_up;
1015 		mdev = &mbox->dev[0];
1016 		otx2_sync_mbox_bbuf(mbox, 0);
1017 
1018 		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
1019 		if (hdr->num_msgs)
1020 			queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
1021 
1022 		trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
1023 					 BIT_ULL(0));
1024 	}
1025 
1026 	if (mbox_data & MBOX_DOWN_MSG) {
1027 		mbox_data &= ~MBOX_DOWN_MSG;
1028 		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
1029 
1030 		mbox = &mw->mbox;
1031 		mdev = &mbox->dev[0];
1032 		otx2_sync_mbox_bbuf(mbox, 0);
1033 
1034 		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
1035 		if (hdr->num_msgs)
1036 			queue_work(pf->mbox_wq, &mw->mbox_wrk);
1037 
1038 		trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
1039 					 BIT_ULL(0));
1040 	}
1041 
1042 	return IRQ_HANDLED;
1043 }
1044 
1045 void otx2_disable_mbox_intr(struct otx2_nic *pf)
1046 {
1047 	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
1048 
1049 	/* Disable AF => PF mailbox IRQ */
1050 	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
1051 	free_irq(vector, pf);
1052 }
1053 EXPORT_SYMBOL(otx2_disable_mbox_intr);
1054 
1055 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
1056 {
1057 	struct otx2_hw *hw = &pf->hw;
1058 	struct msg_req *req;
1059 	char *irq_name;
1060 	int err;
1061 
1062 	/* Register mailbox interrupt handler */
1063 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
1064 	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
1065 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
1066 			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
1067 	if (err) {
1068 		dev_err(pf->dev,
1069 			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
1070 		return err;
1071 	}
1072 
1073 	/* Enable mailbox interrupt for msgs coming from AF.
1074 	 * First clear to avoid spurious interrupts, if any.
1075 	 */
1076 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1077 	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1078 
1079 	if (!probe_af)
1080 		return 0;
1081 
1082 	/* Check mailbox communication with AF */
1083 	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1084 	if (!req) {
1085 		otx2_disable_mbox_intr(pf);
1086 		return -ENOMEM;
1087 	}
1088 	err = otx2_sync_mbox_msg(&pf->mbox);
1089 	if (err) {
1090 		dev_warn(pf->dev,
1091 			 "AF not responding to mailbox, deferring probe\n");
1092 		otx2_disable_mbox_intr(pf);
1093 		return -EPROBE_DEFER;
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1100 {
1101 	struct mbox *mbox = &pf->mbox;
1102 
1103 	if (pf->mbox_wq) {
1104 		destroy_workqueue(pf->mbox_wq);
1105 		pf->mbox_wq = NULL;
1106 	}
1107 
1108 	if (mbox->mbox.hwbase)
1109 		iounmap((void __iomem *)mbox->mbox.hwbase);
1110 
1111 	otx2_mbox_destroy(&mbox->mbox);
1112 	otx2_mbox_destroy(&mbox->mbox_up);
1113 }
1114 EXPORT_SYMBOL(otx2_pfaf_mbox_destroy);
1115 
1116 int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1117 {
1118 	struct mbox *mbox = &pf->mbox;
1119 	void __iomem *hwbase;
1120 	int err;
1121 
1122 	mbox->pfvf = pf;
1123 	pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
1124 					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
1125 	if (!pf->mbox_wq)
1126 		return -ENOMEM;
1127 
1128 	/* Mailbox is a reserved memory (in RAM) region shared between
1129 	 * admin function (i.e AF) and this PF, shouldn't be mapped as
1130 	 * device memory to allow unaligned accesses.
1131 	 */
1132 	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1133 			    MBOX_SIZE);
1134 	if (!hwbase) {
1135 		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1136 		err = -ENOMEM;
1137 		goto exit;
1138 	}
1139 
1140 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1141 			     MBOX_DIR_PFAF, 1);
1142 	if (err)
1143 		goto exit;
1144 
1145 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1146 			     MBOX_DIR_PFAF_UP, 1);
1147 	if (err)
1148 		goto exit;
1149 
1150 	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1151 	if (err)
1152 		goto exit;
1153 
1154 	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1155 	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1156 	mutex_init(&mbox->lock);
1157 
1158 	return 0;
1159 exit:
1160 	otx2_pfaf_mbox_destroy(pf);
1161 	return err;
1162 }
1163 
1164 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1165 {
1166 	struct msg_req *msg;
1167 	int err;
1168 
1169 	mutex_lock(&pf->mbox.lock);
1170 	if (enable)
1171 		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1172 	else
1173 		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1174 
1175 	if (!msg) {
1176 		mutex_unlock(&pf->mbox.lock);
1177 		return -ENOMEM;
1178 	}
1179 
1180 	err = otx2_sync_mbox_msg(&pf->mbox);
1181 	mutex_unlock(&pf->mbox.lock);
1182 	return err;
1183 }
1184 
1185 int otx2_reset_mac_stats(struct otx2_nic *pfvf)
1186 {
1187 	struct msg_req *req;
1188 	int err;
1189 
1190 	mutex_lock(&pfvf->mbox.lock);
1191 	req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox);
1192 	if (!req) {
1193 		mutex_unlock(&pfvf->mbox.lock);
1194 		return -ENOMEM;
1195 	}
1196 
1197 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1198 	mutex_unlock(&pfvf->mbox.lock);
1199 	return err;
1200 }
1201 
1202 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1203 {
1204 	struct msg_req *msg;
1205 	int err;
1206 
1207 	if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
1208 				    pf->flow_cfg->dmacflt_max_flows))
1209 		netdev_warn(pf->netdev,
1210 			    "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1211 
1212 	mutex_lock(&pf->mbox.lock);
1213 	if (enable)
1214 		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1215 	else
1216 		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1217 
1218 	if (!msg) {
1219 		mutex_unlock(&pf->mbox.lock);
1220 		return -ENOMEM;
1221 	}
1222 
1223 	err = otx2_sync_mbox_msg(&pf->mbox);
1224 	mutex_unlock(&pf->mbox.lock);
1225 	return err;
1226 }
1227 
1228 int otx2_set_real_num_queues(struct net_device *netdev,
1229 			     int tx_queues, int rx_queues)
1230 {
1231 	int err;
1232 
1233 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
1234 	if (err) {
1235 		netdev_err(netdev,
1236 			   "Failed to set no of Tx queues: %d\n", tx_queues);
1237 		return err;
1238 	}
1239 
1240 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
1241 	if (err)
1242 		netdev_err(netdev,
1243 			   "Failed to set no of Rx queues: %d\n", rx_queues);
1244 	return err;
1245 }
1246 EXPORT_SYMBOL(otx2_set_real_num_queues);
1247 
1248 static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
1249 	"NIX_SQOPERR_OOR",
1250 	"NIX_SQOPERR_CTX_FAULT",
1251 	"NIX_SQOPERR_CTX_POISON",
1252 	"NIX_SQOPERR_DISABLED",
1253 	"NIX_SQOPERR_SIZE_ERR",
1254 	"NIX_SQOPERR_OFLOW",
1255 	"NIX_SQOPERR_SQB_NULL",
1256 	"NIX_SQOPERR_SQB_FAULT",
1257 	"NIX_SQOPERR_SQE_SZ_ZERO",
1258 };
1259 
1260 static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
1261 	"NIX_MNQERR_SQ_CTX_FAULT",
1262 	"NIX_MNQERR_SQ_CTX_POISON",
1263 	"NIX_MNQERR_SQB_FAULT",
1264 	"NIX_MNQERR_SQB_POISON",
1265 	"NIX_MNQERR_TOTAL_ERR",
1266 	"NIX_MNQERR_LSO_ERR",
1267 	"NIX_MNQERR_CQ_QUERY_ERR",
1268 	"NIX_MNQERR_MAX_SQE_SIZE_ERR",
1269 	"NIX_MNQERR_MAXLEN_ERR",
1270 	"NIX_MNQERR_SQE_SIZEM1_ZERO",
1271 };
1272 
1273 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  {
1274 	[NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
1275 	[NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
1276 	[NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
1277 	[NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
1278 	[NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
1279 	[NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
1280 	[NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
1281 	[NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
1282 	[NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
1283 	[NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
1284 	[NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
1285 	[NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
1286 	[NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
1287 	[NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
1288 	[NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
1289 	[NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
1290 	[NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
1291 	[NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
1292 	[NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
1293 	[NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
1294 	[NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
1295 	[NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
1296 	[NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
1297 	[NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
1298 	[NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
1299 	[NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
1300 };
1301 
1302 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1303 {
1304 	struct otx2_nic *pf = data;
1305 	struct otx2_snd_queue *sq;
1306 	u64 val, *ptr;
1307 	u64 qidx = 0;
1308 
1309 	/* CQ */
1310 	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1311 		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1312 		val = otx2_atomic64_add((qidx << 44), ptr);
1313 
1314 		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1315 			     (val & NIX_CQERRINT_BITS));
1316 		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1317 			continue;
1318 
1319 		if (val & BIT_ULL(42)) {
1320 			netdev_err(pf->netdev,
1321 				   "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1322 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1323 		} else {
1324 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1325 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1326 					   qidx);
1327 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1328 				netdev_err(pf->netdev,
1329 					   "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1330 					   qidx);
1331 		}
1332 
1333 		schedule_work(&pf->reset_task);
1334 	}
1335 
1336 	/* SQ */
1337 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1338 		u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
1339 		u8 sq_op_err_code, mnq_err_code, snd_err_code;
1340 
1341 		sq = &pf->qset.sq[qidx];
1342 		if (!sq->sqb_ptrs)
1343 			continue;
1344 
1345 		/* Below debug registers captures first errors corresponding to
1346 		 * those registers. We don't have to check against SQ qid as
1347 		 * these are fatal errors.
1348 		 */
1349 
1350 		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1351 		val = otx2_atomic64_add((qidx << 44), ptr);
1352 		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1353 			     (val & NIX_SQINT_BITS));
1354 
1355 		if (val & BIT_ULL(42)) {
1356 			netdev_err(pf->netdev,
1357 				   "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1358 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1359 			goto done;
1360 		}
1361 
1362 		sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
1363 		if (!(sq_op_err_dbg & BIT(44)))
1364 			goto chk_mnq_err_dbg;
1365 
1366 		sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
1367 		netdev_err(pf->netdev,
1368 			   "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx)  err=%s(%#x)\n",
1369 			   qidx, sq_op_err_dbg,
1370 			   nix_sqoperr_e_str[sq_op_err_code],
1371 			   sq_op_err_code);
1372 
1373 		otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
1374 
1375 		if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
1376 			goto chk_mnq_err_dbg;
1377 
1378 		/* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
1379 		 * TODO: But we are in irq context. How to call mbox functions which does sleep
1380 		 */
1381 
1382 chk_mnq_err_dbg:
1383 		mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
1384 		if (!(mnq_err_dbg & BIT(44)))
1385 			goto chk_snd_err_dbg;
1386 
1387 		mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
1388 		netdev_err(pf->netdev,
1389 			   "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx)  err=%s(%#x)\n",
1390 			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code],
1391 			   mnq_err_code);
1392 		otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
1393 
1394 chk_snd_err_dbg:
1395 		snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
1396 		if (snd_err_dbg & BIT(44)) {
1397 			snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
1398 			netdev_err(pf->netdev,
1399 				   "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
1400 				   qidx, snd_err_dbg,
1401 				   nix_snd_status_e_str[snd_err_code],
1402 				   snd_err_code);
1403 			otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
1404 		}
1405 
1406 done:
1407 		/* Print values and reset */
1408 		if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1409 			netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1410 				   qidx);
1411 
1412 		schedule_work(&pf->reset_task);
1413 	}
1414 
1415 	return IRQ_HANDLED;
1416 }
1417 
1418 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1419 {
1420 	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1421 	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1422 	int qidx = cq_poll->cint_idx;
1423 
1424 	/* Disable interrupts.
1425 	 *
1426 	 * Completion interrupts behave in a level-triggered interrupt
1427 	 * fashion, and hence have to be cleared only after it is serviced.
1428 	 */
1429 	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1430 
1431 	/* Schedule NAPI */
1432 	pf->napi_events++;
1433 	napi_schedule_irqoff(&cq_poll->napi);
1434 
1435 	return IRQ_HANDLED;
1436 }
1437 EXPORT_SYMBOL(otx2_cq_intr_handler);
1438 
1439 void otx2_disable_napi(struct otx2_nic *pf)
1440 {
1441 	struct otx2_qset *qset = &pf->qset;
1442 	struct otx2_cq_poll *cq_poll;
1443 	struct work_struct *work;
1444 	int qidx;
1445 
1446 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1447 		cq_poll = &qset->napi[qidx];
1448 		work = &cq_poll->dim.work;
1449 		if (work->func)
1450 			cancel_work_sync(work);
1451 		napi_disable(&cq_poll->napi);
1452 		netif_napi_del(&cq_poll->napi);
1453 	}
1454 }
1455 EXPORT_SYMBOL(otx2_disable_napi);
1456 
1457 static void otx2_free_cq_res(struct otx2_nic *pf)
1458 {
1459 	struct otx2_qset *qset = &pf->qset;
1460 	struct otx2_cq_queue *cq;
1461 	int qidx;
1462 
1463 	/* Disable CQs */
1464 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1465 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1466 		cq = &qset->cq[qidx];
1467 		qmem_free(pf->dev, cq->cqe);
1468 	}
1469 }
1470 
1471 static void otx2_free_sq_res(struct otx2_nic *pf)
1472 {
1473 	struct otx2_qset *qset = &pf->qset;
1474 	struct otx2_snd_queue *sq;
1475 	int qidx;
1476 
1477 	/* Disable SQs */
1478 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1479 	/* Free SQB pointers */
1480 	otx2_sq_free_sqbs(pf);
1481 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1482 		sq = &qset->sq[qidx];
1483 		/* Skip freeing Qos queues if they are not initialized */
1484 		if (!sq->sqe)
1485 			continue;
1486 		qmem_free(pf->dev, sq->sqe);
1487 		qmem_free(pf->dev, sq->tso_hdrs);
1488 		kfree(sq->sg);
1489 		kfree(sq->sqb_ptrs);
1490 	}
1491 }
1492 
1493 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1494 {
1495 	int frame_size;
1496 	int total_size;
1497 	int rbuf_size;
1498 
1499 	if (pf->hw.rbuf_len)
1500 		return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
1501 
1502 	/* The data transferred by NIX to memory consists of actual packet
1503 	 * plus additional data which has timestamp and/or EDSA/HIGIG2
1504 	 * headers if interface is configured in corresponding modes.
1505 	 * NIX transfers entire data using 6 segments/buffers and writes
1506 	 * a CQE_RX descriptor with those segment addresses. First segment
1507 	 * has additional data prepended to packet. Also software omits a
1508 	 * headroom of 128 bytes in each segment. Hence the total size of
1509 	 * memory needed to receive a packet with 'mtu' is:
1510 	 * frame size =  mtu + additional data;
1511 	 * memory = frame_size + headroom * 6;
1512 	 * each receive buffer size = memory / 6;
1513 	 */
1514 	frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1515 	total_size = frame_size + OTX2_HEAD_ROOM * 6;
1516 	rbuf_size = total_size / 6;
1517 
1518 	return ALIGN(rbuf_size, 2048);
1519 }
1520 
1521 int otx2_init_hw_resources(struct otx2_nic *pf)
1522 {
1523 	struct nix_lf_free_req *free_req;
1524 	struct mbox *mbox = &pf->mbox;
1525 	struct otx2_hw *hw = &pf->hw;
1526 	struct msg_req *req;
1527 	int err = 0, lvl;
1528 
1529 	/* Set required NPA LF's pool counts
1530 	 * Auras and Pools are used in a 1:1 mapping,
1531 	 * so, aura count = pool count.
1532 	 */
1533 	hw->rqpool_cnt = hw->rx_queues;
1534 	hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
1535 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1536 
1537 	if (!otx2_rep_dev(pf->pdev)) {
1538 		/* Maximum hardware supported transmit length */
1539 		pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
1540 		pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1541 	}
1542 
1543 	mutex_lock(&mbox->lock);
1544 	/* NPA init */
1545 	err = otx2_config_npa(pf);
1546 	if (err)
1547 		goto exit;
1548 
1549 	/* NIX init */
1550 	err = otx2_config_nix(pf);
1551 	if (err)
1552 		goto err_free_npa_lf;
1553 
1554 	/* Enable backpressure for CGX mapped PF/VFs */
1555 	if (!is_otx2_lbkvf(pf->pdev))
1556 		otx2_nix_config_bp(pf, true);
1557 
1558 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1559 	err = otx2_rq_aura_pool_init(pf);
1560 	if (err) {
1561 		mutex_unlock(&mbox->lock);
1562 		goto err_free_nix_lf;
1563 	}
1564 	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
1565 	err = otx2_sq_aura_pool_init(pf);
1566 	if (err) {
1567 		mutex_unlock(&mbox->lock);
1568 		goto err_free_rq_ptrs;
1569 	}
1570 
1571 	err = otx2_txsch_alloc(pf);
1572 	if (err) {
1573 		mutex_unlock(&mbox->lock);
1574 		goto err_free_sq_ptrs;
1575 	}
1576 
1577 #ifdef CONFIG_DCB
1578 	if (pf->pfc_en) {
1579 		err = otx2_pfc_txschq_alloc(pf);
1580 		if (err) {
1581 			mutex_unlock(&mbox->lock);
1582 			goto err_free_sq_ptrs;
1583 		}
1584 	}
1585 #endif
1586 
1587 	err = otx2_config_nix_queues(pf);
1588 	if (err) {
1589 		mutex_unlock(&mbox->lock);
1590 		goto err_free_txsch;
1591 	}
1592 
1593 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1594 		int idx;
1595 
1596 		for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) {
1597 			err = otx2_txschq_config(pf, lvl, idx, false);
1598 			if (err) {
1599 				dev_err(pf->dev, "Failed to config TXSCH\n");
1600 				mutex_unlock(&mbox->lock);
1601 				goto err_free_nix_queues;
1602 			}
1603 		}
1604 	}
1605 
1606 #ifdef CONFIG_DCB
1607 	if (pf->pfc_en) {
1608 		err = otx2_pfc_txschq_config(pf);
1609 		if (err) {
1610 			mutex_unlock(&mbox->lock);
1611 			goto err_free_nix_queues;
1612 		}
1613 	}
1614 #endif
1615 
1616 	mutex_unlock(&mbox->lock);
1617 	return err;
1618 
1619 err_free_nix_queues:
1620 	otx2_free_sq_res(pf);
1621 	otx2_free_cq_res(pf);
1622 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1623 err_free_txsch:
1624 	otx2_txschq_stop(pf);
1625 err_free_sq_ptrs:
1626 	otx2_sq_free_sqbs(pf);
1627 err_free_rq_ptrs:
1628 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1629 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1630 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1631 	otx2_aura_pool_free(pf);
1632 err_free_nix_lf:
1633 	mutex_lock(&mbox->lock);
1634 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1635 	if (free_req) {
1636 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1637 		if (otx2_sync_mbox_msg(mbox))
1638 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1639 	}
1640 err_free_npa_lf:
1641 	/* Reset NPA LF */
1642 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1643 	if (req) {
1644 		if (otx2_sync_mbox_msg(mbox))
1645 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1646 	}
1647 exit:
1648 	mutex_unlock(&mbox->lock);
1649 	return err;
1650 }
1651 EXPORT_SYMBOL(otx2_init_hw_resources);
1652 
1653 void otx2_free_hw_resources(struct otx2_nic *pf)
1654 {
1655 	struct otx2_qset *qset = &pf->qset;
1656 	struct nix_lf_free_req *free_req;
1657 	struct mbox *mbox = &pf->mbox;
1658 	struct otx2_cq_queue *cq;
1659 	struct otx2_pool *pool;
1660 	struct msg_req *req;
1661 	int pool_id;
1662 	int qidx;
1663 
1664 	/* Ensure all SQE are processed */
1665 	otx2_sqb_flush(pf);
1666 
1667 	/* Stop transmission */
1668 	otx2_txschq_stop(pf);
1669 
1670 #ifdef CONFIG_DCB
1671 	if (pf->pfc_en)
1672 		otx2_pfc_txschq_stop(pf);
1673 #endif
1674 
1675 	if (!otx2_rep_dev(pf->pdev))
1676 		otx2_clean_qos_queues(pf);
1677 
1678 	mutex_lock(&mbox->lock);
1679 	/* Disable backpressure */
1680 	if (!is_otx2_lbkvf(pf->pdev))
1681 		otx2_nix_config_bp(pf, false);
1682 	mutex_unlock(&mbox->lock);
1683 
1684 	/* Disable RQs */
1685 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1686 
1687 	/*Dequeue all CQEs */
1688 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1689 		cq = &qset->cq[qidx];
1690 		if (cq->cq_type == CQ_RX)
1691 			otx2_cleanup_rx_cqes(pf, cq, qidx);
1692 		else
1693 			otx2_cleanup_tx_cqes(pf, cq);
1694 	}
1695 	otx2_free_pending_sqe(pf);
1696 
1697 	otx2_free_sq_res(pf);
1698 
1699 	/* Free RQ buffer pointers*/
1700 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1701 
1702 	for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
1703 		pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
1704 		pool = &pf->qset.pool[pool_id];
1705 		page_pool_destroy(pool->page_pool);
1706 		pool->page_pool = NULL;
1707 	}
1708 
1709 	otx2_free_cq_res(pf);
1710 
1711 	/* Free all ingress bandwidth profiles allocated */
1712 	if (!otx2_rep_dev(pf->pdev))
1713 		cn10k_free_all_ipolicers(pf);
1714 
1715 	mutex_lock(&mbox->lock);
1716 	/* Reset NIX LF */
1717 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1718 	if (free_req) {
1719 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1720 		if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1721 			free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1722 		if (otx2_sync_mbox_msg(mbox))
1723 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1724 	}
1725 	mutex_unlock(&mbox->lock);
1726 
1727 	/* Disable NPA Pool and Aura hw context */
1728 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1729 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1730 	otx2_aura_pool_free(pf);
1731 
1732 	mutex_lock(&mbox->lock);
1733 	/* Reset NPA LF */
1734 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1735 	if (req) {
1736 		if (otx2_sync_mbox_msg(mbox))
1737 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1738 	}
1739 	mutex_unlock(&mbox->lock);
1740 }
1741 EXPORT_SYMBOL(otx2_free_hw_resources);
1742 
1743 static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
1744 {
1745 	int vf;
1746 
1747 	/* The AF driver will determine whether to allow the VF netdev or not */
1748 	if (is_otx2_vf(pfvf->pcifunc))
1749 		return true;
1750 
1751 	/* check if there are any trusted VFs associated with the PF netdev */
1752 	for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
1753 		if (pfvf->vf_configs[vf].trusted)
1754 			return true;
1755 	return false;
1756 }
1757 
1758 static void otx2_do_set_rx_mode(struct otx2_nic *pf)
1759 {
1760 	struct net_device *netdev = pf->netdev;
1761 	struct nix_rx_mode *req;
1762 	bool promisc = false;
1763 
1764 	if (!(netdev->flags & IFF_UP))
1765 		return;
1766 
1767 	if ((netdev->flags & IFF_PROMISC) ||
1768 	    (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) {
1769 		promisc = true;
1770 	}
1771 
1772 	/* Write unicast address to mcam entries or del from mcam */
1773 	if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1774 		__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1775 
1776 	mutex_lock(&pf->mbox.lock);
1777 	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1778 	if (!req) {
1779 		mutex_unlock(&pf->mbox.lock);
1780 		return;
1781 	}
1782 
1783 	req->mode = NIX_RX_MODE_UCAST;
1784 
1785 	if (promisc)
1786 		req->mode |= NIX_RX_MODE_PROMISC;
1787 	if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1788 		req->mode |= NIX_RX_MODE_ALLMULTI;
1789 
1790 	if (otx2_promisc_use_mce_list(pf))
1791 		req->mode |= NIX_RX_MODE_USE_MCE;
1792 
1793 	otx2_sync_mbox_msg(&pf->mbox);
1794 	mutex_unlock(&pf->mbox.lock);
1795 }
1796 
1797 static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
1798 {
1799 	int cint;
1800 
1801 	for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
1802 		otx2_config_irq_coalescing(pfvf, cint);
1803 }
1804 
1805 static void otx2_dim_work(struct work_struct *w)
1806 {
1807 	struct dim_cq_moder cur_moder;
1808 	struct otx2_cq_poll *cq_poll;
1809 	struct otx2_nic *pfvf;
1810 	struct dim *dim;
1811 
1812 	dim = container_of(w, struct dim, work);
1813 	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1814 	cq_poll = container_of(dim, struct otx2_cq_poll, dim);
1815 	pfvf = (struct otx2_nic *)cq_poll->dev;
1816 	pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
1817 		CQ_TIMER_THRESH_MAX : cur_moder.usec;
1818 	pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
1819 		NAPI_POLL_WEIGHT : cur_moder.pkts;
1820 	otx2_set_irq_coalesce(pfvf);
1821 	dim->state = DIM_START_MEASURE;
1822 }
1823 
1824 void otx2_free_queue_mem(struct otx2_qset *qset)
1825 {
1826 	kfree(qset->sq);
1827 	qset->sq = NULL;
1828 	kfree(qset->cq);
1829 	qset->cq = NULL;
1830 	kfree(qset->rq);
1831 	qset->rq = NULL;
1832 	kfree(qset->napi);
1833 	qset->napi = NULL;
1834 }
1835 EXPORT_SYMBOL(otx2_free_queue_mem);
1836 
1837 int otx2_alloc_queue_mem(struct otx2_nic *pf)
1838 {
1839 	struct otx2_qset *qset = &pf->qset;
1840 	struct otx2_cq_poll *cq_poll;
1841 
1842 
1843 	/* RQ and SQs are mapped to different CQs,
1844 	 * so find out max CQ IRQs (i.e CINTs) needed.
1845 	 */
1846 	pf->hw.non_qos_queues =  pf->hw.tx_queues + pf->hw.xdp_queues;
1847 	pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
1848 			       pf->hw.tc_tx_queues);
1849 
1850 	pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
1851 
1852 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1853 	if (!qset->napi)
1854 		return -ENOMEM;
1855 
1856 	/* CQ size of RQ */
1857 	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1858 	/* CQ size of SQ */
1859 	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1860 
1861 	qset->cq = kcalloc(pf->qset.cq_cnt,
1862 			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
1863 	if (!qset->cq)
1864 		goto err_free_mem;
1865 
1866 	qset->sq = kcalloc(otx2_get_total_tx_queues(pf),
1867 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
1868 	if (!qset->sq)
1869 		goto err_free_mem;
1870 
1871 	qset->rq = kcalloc(pf->hw.rx_queues,
1872 			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1873 	if (!qset->rq)
1874 		goto err_free_mem;
1875 
1876 	return 0;
1877 
1878 err_free_mem:
1879 	otx2_free_queue_mem(qset);
1880 	return -ENOMEM;
1881 }
1882 EXPORT_SYMBOL(otx2_alloc_queue_mem);
1883 
1884 int otx2_open(struct net_device *netdev)
1885 {
1886 	struct otx2_nic *pf = netdev_priv(netdev);
1887 	struct otx2_cq_poll *cq_poll = NULL;
1888 	struct otx2_qset *qset = &pf->qset;
1889 	int err = 0, qidx, vec;
1890 	char *irq_name;
1891 
1892 	netif_carrier_off(netdev);
1893 
1894 	err = otx2_alloc_queue_mem(pf);
1895 	if (err)
1896 		return err;
1897 
1898 	err = otx2_init_hw_resources(pf);
1899 	if (err)
1900 		goto err_free_mem;
1901 
1902 	/* Register NAPI handler */
1903 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1904 		cq_poll = &qset->napi[qidx];
1905 		cq_poll->cint_idx = qidx;
1906 		/* RQ0 & SQ0 are mapped to CINT0 and so on..
1907 		 * 'cq_ids[0]' points to RQ's CQ and
1908 		 * 'cq_ids[1]' points to SQ's CQ and
1909 		 * 'cq_ids[2]' points to XDP's CQ and
1910 		 */
1911 		cq_poll->cq_ids[CQ_RX] =
1912 			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1913 		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1914 				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1915 		if (pf->xdp_prog)
1916 			cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
1917 						  (qidx + pf->hw.rx_queues +
1918 						  pf->hw.tx_queues) :
1919 						  CINT_INVALID_CQ;
1920 		else
1921 			cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
1922 
1923 		cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
1924 					  (qidx + pf->hw.rx_queues +
1925 					   pf->hw.non_qos_queues) :
1926 					  CINT_INVALID_CQ;
1927 
1928 		cq_poll->dev = (void *)pf;
1929 		cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1930 		INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
1931 		netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
1932 		napi_enable(&cq_poll->napi);
1933 	}
1934 
1935 	/* Set maximum frame size allowed in HW */
1936 	err = otx2_hw_set_mtu(pf, netdev->mtu);
1937 	if (err)
1938 		goto err_disable_napi;
1939 
1940 	/* Setup segmentation algorithms, if failed, clear offload capability */
1941 	otx2_setup_segmentation(pf);
1942 
1943 	/* Initialize RSS */
1944 	err = otx2_rss_init(pf);
1945 	if (err)
1946 		goto err_disable_napi;
1947 
1948 	/* Register Queue IRQ handlers */
1949 	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1950 	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1951 
1952 	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1953 
1954 	err = request_irq(pci_irq_vector(pf->pdev, vec),
1955 			  otx2_q_intr_handler, 0, irq_name, pf);
1956 	if (err) {
1957 		dev_err(pf->dev,
1958 			"RVUPF%d: IRQ registration failed for QERR\n",
1959 			rvu_get_pf(pf->pcifunc));
1960 		goto err_disable_napi;
1961 	}
1962 
1963 	/* Enable QINT IRQ */
1964 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1965 
1966 	/* Register CQ IRQ handlers */
1967 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1968 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1969 		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1970 		int name_len;
1971 
1972 		name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d",
1973 				    pf->netdev->name, qidx);
1974 		if (name_len >= NAME_SIZE) {
1975 			dev_err(pf->dev,
1976 				"RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
1977 				rvu_get_pf(pf->pcifunc), qidx);
1978 			err = -EINVAL;
1979 			goto err_free_cints;
1980 		}
1981 
1982 		err = request_irq(pci_irq_vector(pf->pdev, vec),
1983 				  otx2_cq_intr_handler, 0, irq_name,
1984 				  &qset->napi[qidx]);
1985 		if (err) {
1986 			dev_err(pf->dev,
1987 				"RVUPF%d: IRQ registration failed for CQ%d\n",
1988 				rvu_get_pf(pf->pcifunc), qidx);
1989 			goto err_free_cints;
1990 		}
1991 		vec++;
1992 
1993 		otx2_config_irq_coalescing(pf, qidx);
1994 
1995 		/* Enable CQ IRQ */
1996 		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1997 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1998 	}
1999 
2000 	otx2_set_cints_affinity(pf);
2001 
2002 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2003 		otx2_enable_rxvlan(pf, true);
2004 
2005 	/* When reinitializing enable time stamping if it is enabled before */
2006 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
2007 		pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
2008 		otx2_config_hw_tx_tstamp(pf, true);
2009 	}
2010 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
2011 		pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
2012 		otx2_config_hw_rx_tstamp(pf, true);
2013 	}
2014 
2015 	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
2016 	pf->flags &= ~OTX2_FLAG_PORT_UP;
2017 	/* 'intf_down' may be checked on any cpu */
2018 	smp_wmb();
2019 
2020 	/* Enable QoS configuration before starting tx queues */
2021 	otx2_qos_config_txschq(pf);
2022 
2023 	/* we have already received link status notification */
2024 	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
2025 		otx2_handle_link_event(pf);
2026 
2027 	/* Install DMAC Filters */
2028 	if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
2029 		otx2_dmacflt_reinstall_flows(pf);
2030 
2031 	otx2_tc_apply_ingress_police_rules(pf);
2032 
2033 	err = otx2_rxtx_enable(pf, true);
2034 	/* If a mbox communication error happens at this point then interface
2035 	 * will end up in a state such that it is in down state but hardware
2036 	 * mcam entries are enabled to receive the packets. Hence disable the
2037 	 * packet I/O.
2038 	 */
2039 	if (err == -EIO)
2040 		goto err_disable_rxtx;
2041 	else if (err)
2042 		goto err_tx_stop_queues;
2043 
2044 	otx2_do_set_rx_mode(pf);
2045 
2046 	return 0;
2047 
2048 err_disable_rxtx:
2049 	otx2_rxtx_enable(pf, false);
2050 err_tx_stop_queues:
2051 	netif_tx_stop_all_queues(netdev);
2052 	netif_carrier_off(netdev);
2053 	pf->flags |= OTX2_FLAG_INTF_DOWN;
2054 err_free_cints:
2055 	otx2_free_cints(pf, qidx);
2056 	vec = pci_irq_vector(pf->pdev,
2057 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
2058 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
2059 	free_irq(vec, pf);
2060 err_disable_napi:
2061 	otx2_disable_napi(pf);
2062 	otx2_free_hw_resources(pf);
2063 err_free_mem:
2064 	otx2_free_queue_mem(qset);
2065 	return err;
2066 }
2067 EXPORT_SYMBOL(otx2_open);
2068 
2069 int otx2_stop(struct net_device *netdev)
2070 {
2071 	struct otx2_nic *pf = netdev_priv(netdev);
2072 	struct otx2_cq_poll *cq_poll = NULL;
2073 	struct otx2_qset *qset = &pf->qset;
2074 	struct otx2_rss_info *rss;
2075 	int qidx, vec, wrk;
2076 
2077 	/* If the DOWN flag is set resources are already freed */
2078 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
2079 		return 0;
2080 
2081 	netif_carrier_off(netdev);
2082 	netif_tx_stop_all_queues(netdev);
2083 
2084 	pf->flags |= OTX2_FLAG_INTF_DOWN;
2085 	/* 'intf_down' may be checked on any cpu */
2086 	smp_wmb();
2087 
2088 	/* First stop packet Rx/Tx */
2089 	otx2_rxtx_enable(pf, false);
2090 
2091 	/* Clear RSS enable flag */
2092 	rss = &pf->hw.rss_info;
2093 	rss->enable = false;
2094 	if (!netif_is_rxfh_configured(netdev))
2095 		kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
2096 
2097 	/* Cleanup Queue IRQ */
2098 	vec = pci_irq_vector(pf->pdev,
2099 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
2100 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
2101 	free_irq(vec, pf);
2102 
2103 	/* Cleanup CQ NAPI and IRQ */
2104 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
2105 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
2106 		/* Disable interrupt */
2107 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
2108 
2109 		synchronize_irq(pci_irq_vector(pf->pdev, vec));
2110 
2111 		cq_poll = &qset->napi[qidx];
2112 		napi_synchronize(&cq_poll->napi);
2113 		vec++;
2114 	}
2115 
2116 	netif_tx_disable(netdev);
2117 
2118 	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
2119 		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
2120 	devm_kfree(pf->dev, pf->refill_wrk);
2121 
2122 	otx2_free_hw_resources(pf);
2123 	otx2_free_cints(pf, pf->hw.cint_cnt);
2124 	otx2_disable_napi(pf);
2125 
2126 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
2127 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
2128 
2129 	otx2_free_queue_mem(qset);
2130 	/* Do not clear RQ/SQ ringsize settings */
2131 	memset_startat(qset, 0, sqe_cnt);
2132 	return 0;
2133 }
2134 EXPORT_SYMBOL(otx2_stop);
2135 
2136 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
2137 {
2138 	struct otx2_nic *pf = netdev_priv(netdev);
2139 	int qidx = skb_get_queue_mapping(skb);
2140 	struct otx2_snd_queue *sq;
2141 	struct netdev_queue *txq;
2142 	int sq_idx;
2143 
2144 	/* XDP SQs are not mapped with TXQs
2145 	 * advance qid to derive correct sq mapped with QOS
2146 	 */
2147 	sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
2148 
2149 	/* Check for minimum and maximum packet length */
2150 	if (skb->len <= ETH_HLEN ||
2151 	    (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
2152 		dev_kfree_skb(skb);
2153 		return NETDEV_TX_OK;
2154 	}
2155 
2156 	sq = &pf->qset.sq[sq_idx];
2157 	txq = netdev_get_tx_queue(netdev, qidx);
2158 
2159 	if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) {
2160 		netif_tx_stop_queue(txq);
2161 
2162 		/* Check again, incase SQBs got freed up */
2163 		smp_mb();
2164 		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
2165 							> sq->sqe_thresh)
2166 			netif_tx_wake_queue(txq);
2167 
2168 		return NETDEV_TX_BUSY;
2169 	}
2170 
2171 	return NETDEV_TX_OK;
2172 }
2173 
2174 static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb,
2175 				     u16 htb_maj_id)
2176 {
2177 	u16 classid;
2178 
2179 	if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
2180 		classid = TC_H_MIN(skb->priority);
2181 	else
2182 		classid = READ_ONCE(pf->qos.defcls);
2183 
2184 	if (!classid)
2185 		return 0;
2186 
2187 	return otx2_get_txq_by_classid(pf, classid);
2188 }
2189 
2190 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
2191 		      struct net_device *sb_dev)
2192 {
2193 	struct otx2_nic *pf = netdev_priv(netdev);
2194 	bool qos_enabled;
2195 #ifdef CONFIG_DCB
2196 	u8 vlan_prio;
2197 #endif
2198 	int txq;
2199 
2200 	qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues;
2201 	if (unlikely(qos_enabled)) {
2202 		/* This smp_load_acquire() pairs with smp_store_release() in
2203 		 * otx2_qos_root_add() called from htb offload root creation
2204 		 */
2205 		u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id);
2206 
2207 		if (unlikely(htb_maj_id)) {
2208 			txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id);
2209 			if (txq > 0)
2210 				return txq;
2211 			goto process_pfc;
2212 		}
2213 	}
2214 
2215 process_pfc:
2216 #ifdef CONFIG_DCB
2217 	if (!skb_vlan_tag_present(skb))
2218 		goto pick_tx;
2219 
2220 	vlan_prio = skb->vlan_tci >> 13;
2221 	if ((vlan_prio > pf->hw.tx_queues - 1) ||
2222 	    !pf->pfc_alloc_status[vlan_prio])
2223 		goto pick_tx;
2224 
2225 	return vlan_prio;
2226 
2227 pick_tx:
2228 #endif
2229 	txq = netdev_pick_tx(netdev, skb, NULL);
2230 	if (unlikely(qos_enabled))
2231 		return txq % pf->hw.tx_queues;
2232 
2233 	return txq;
2234 }
2235 EXPORT_SYMBOL(otx2_select_queue);
2236 
2237 static netdev_features_t otx2_fix_features(struct net_device *dev,
2238 					   netdev_features_t features)
2239 {
2240 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2241 		features |= NETIF_F_HW_VLAN_STAG_RX;
2242 	else
2243 		features &= ~NETIF_F_HW_VLAN_STAG_RX;
2244 
2245 	return features;
2246 }
2247 
2248 static void otx2_set_rx_mode(struct net_device *netdev)
2249 {
2250 	struct otx2_nic *pf = netdev_priv(netdev);
2251 
2252 	queue_work(pf->otx2_wq, &pf->rx_mode_work);
2253 }
2254 
2255 static void otx2_rx_mode_wrk_handler(struct work_struct *work)
2256 {
2257 	struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
2258 
2259 	otx2_do_set_rx_mode(pf);
2260 }
2261 
2262 static int otx2_set_features(struct net_device *netdev,
2263 			     netdev_features_t features)
2264 {
2265 	netdev_features_t changed = features ^ netdev->features;
2266 	struct otx2_nic *pf = netdev_priv(netdev);
2267 
2268 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
2269 		return otx2_cgx_config_loopback(pf,
2270 						features & NETIF_F_LOOPBACK);
2271 
2272 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
2273 		return otx2_enable_rxvlan(pf,
2274 					  features & NETIF_F_HW_VLAN_CTAG_RX);
2275 
2276 	return otx2_handle_ntuple_tc_features(netdev, features);
2277 }
2278 
2279 static void otx2_reset_task(struct work_struct *work)
2280 {
2281 	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
2282 
2283 	if (!netif_running(pf->netdev))
2284 		return;
2285 
2286 	rtnl_lock();
2287 	otx2_stop(pf->netdev);
2288 	pf->reset_count++;
2289 	otx2_open(pf->netdev);
2290 	netif_trans_update(pf->netdev);
2291 	rtnl_unlock();
2292 }
2293 
2294 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
2295 {
2296 	struct msg_req *req;
2297 	int err;
2298 
2299 	if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
2300 		return 0;
2301 
2302 	mutex_lock(&pfvf->mbox.lock);
2303 	if (enable)
2304 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
2305 	else
2306 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
2307 	if (!req) {
2308 		mutex_unlock(&pfvf->mbox.lock);
2309 		return -ENOMEM;
2310 	}
2311 
2312 	err = otx2_sync_mbox_msg(&pfvf->mbox);
2313 	if (err) {
2314 		mutex_unlock(&pfvf->mbox.lock);
2315 		return err;
2316 	}
2317 
2318 	mutex_unlock(&pfvf->mbox.lock);
2319 	if (enable)
2320 		pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
2321 	else
2322 		pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
2323 	return 0;
2324 }
2325 
2326 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
2327 {
2328 	struct msg_req *req;
2329 	int err;
2330 
2331 	if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
2332 		return 0;
2333 
2334 	mutex_lock(&pfvf->mbox.lock);
2335 	if (enable)
2336 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
2337 	else
2338 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
2339 	if (!req) {
2340 		mutex_unlock(&pfvf->mbox.lock);
2341 		return -ENOMEM;
2342 	}
2343 
2344 	err = otx2_sync_mbox_msg(&pfvf->mbox);
2345 	if (err) {
2346 		mutex_unlock(&pfvf->mbox.lock);
2347 		return err;
2348 	}
2349 
2350 	mutex_unlock(&pfvf->mbox.lock);
2351 	if (enable)
2352 		pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
2353 	else
2354 		pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
2355 	return 0;
2356 }
2357 
2358 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
2359 {
2360 	struct otx2_nic *pfvf = netdev_priv(netdev);
2361 	struct hwtstamp_config config;
2362 
2363 	if (!pfvf->ptp)
2364 		return -ENODEV;
2365 
2366 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2367 		return -EFAULT;
2368 
2369 	switch (config.tx_type) {
2370 	case HWTSTAMP_TX_OFF:
2371 		if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
2372 			pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
2373 
2374 		cancel_delayed_work(&pfvf->ptp->synctstamp_work);
2375 		otx2_config_hw_tx_tstamp(pfvf, false);
2376 		break;
2377 	case HWTSTAMP_TX_ONESTEP_SYNC:
2378 		if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
2379 			return -ERANGE;
2380 		pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
2381 		schedule_delayed_work(&pfvf->ptp->synctstamp_work,
2382 				      msecs_to_jiffies(500));
2383 		fallthrough;
2384 	case HWTSTAMP_TX_ON:
2385 		otx2_config_hw_tx_tstamp(pfvf, true);
2386 		break;
2387 	default:
2388 		return -ERANGE;
2389 	}
2390 
2391 	switch (config.rx_filter) {
2392 	case HWTSTAMP_FILTER_NONE:
2393 		otx2_config_hw_rx_tstamp(pfvf, false);
2394 		break;
2395 	case HWTSTAMP_FILTER_ALL:
2396 	case HWTSTAMP_FILTER_SOME:
2397 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2398 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2399 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2400 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2401 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2402 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2403 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2404 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2405 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2406 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2407 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2408 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2409 		otx2_config_hw_rx_tstamp(pfvf, true);
2410 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2411 		break;
2412 	default:
2413 		return -ERANGE;
2414 	}
2415 
2416 	memcpy(&pfvf->tstamp, &config, sizeof(config));
2417 
2418 	return copy_to_user(ifr->ifr_data, &config,
2419 			    sizeof(config)) ? -EFAULT : 0;
2420 }
2421 EXPORT_SYMBOL(otx2_config_hwtstamp);
2422 
2423 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2424 {
2425 	struct otx2_nic *pfvf = netdev_priv(netdev);
2426 	struct hwtstamp_config *cfg = &pfvf->tstamp;
2427 
2428 	switch (cmd) {
2429 	case SIOCSHWTSTAMP:
2430 		return otx2_config_hwtstamp(netdev, req);
2431 	case SIOCGHWTSTAMP:
2432 		return copy_to_user(req->ifr_data, cfg,
2433 				    sizeof(*cfg)) ? -EFAULT : 0;
2434 	default:
2435 		return -EOPNOTSUPP;
2436 	}
2437 }
2438 EXPORT_SYMBOL(otx2_ioctl);
2439 
2440 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2441 {
2442 	struct npc_install_flow_req *req;
2443 	int err;
2444 
2445 	mutex_lock(&pf->mbox.lock);
2446 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2447 	if (!req) {
2448 		err = -ENOMEM;
2449 		goto out;
2450 	}
2451 
2452 	ether_addr_copy(req->packet.dmac, mac);
2453 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2454 	req->features = BIT_ULL(NPC_DMAC);
2455 	req->channel = pf->hw.rx_chan_base;
2456 	req->intf = NIX_INTF_RX;
2457 	req->default_rule = 1;
2458 	req->append = 1;
2459 	req->vf = vf + 1;
2460 	req->op = NIX_RX_ACTION_DEFAULT;
2461 
2462 	err = otx2_sync_mbox_msg(&pf->mbox);
2463 out:
2464 	mutex_unlock(&pf->mbox.lock);
2465 	return err;
2466 }
2467 
2468 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2469 {
2470 	struct otx2_nic *pf = netdev_priv(netdev);
2471 	struct pci_dev *pdev = pf->pdev;
2472 	struct otx2_vf_config *config;
2473 	int ret;
2474 
2475 	if (!netif_running(netdev))
2476 		return -EAGAIN;
2477 
2478 	if (vf >= pf->total_vfs)
2479 		return -EINVAL;
2480 
2481 	if (!is_valid_ether_addr(mac))
2482 		return -EINVAL;
2483 
2484 	config = &pf->vf_configs[vf];
2485 	ether_addr_copy(config->mac, mac);
2486 
2487 	ret = otx2_do_set_vf_mac(pf, vf, mac);
2488 	if (ret == 0)
2489 		dev_info(&pdev->dev,
2490 			 "Load/Reload VF driver\n");
2491 
2492 	return ret;
2493 }
2494 
2495 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2496 			       __be16 proto)
2497 {
2498 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2499 	struct nix_vtag_config_rsp *vtag_rsp;
2500 	struct npc_delete_flow_req *del_req;
2501 	struct nix_vtag_config *vtag_req;
2502 	struct npc_install_flow_req *req;
2503 	struct otx2_vf_config *config;
2504 	int err = 0;
2505 	u32 idx;
2506 
2507 	config = &pf->vf_configs[vf];
2508 
2509 	if (!vlan && !config->vlan)
2510 		goto out;
2511 
2512 	mutex_lock(&pf->mbox.lock);
2513 
2514 	/* free old tx vtag entry */
2515 	if (config->vlan) {
2516 		vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2517 		if (!vtag_req) {
2518 			err = -ENOMEM;
2519 			goto out;
2520 		}
2521 		vtag_req->cfg_type = 0;
2522 		vtag_req->tx.free_vtag0 = 1;
2523 		vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2524 
2525 		err = otx2_sync_mbox_msg(&pf->mbox);
2526 		if (err)
2527 			goto out;
2528 	}
2529 
2530 	if (!vlan && config->vlan) {
2531 		/* rx */
2532 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2533 		if (!del_req) {
2534 			err = -ENOMEM;
2535 			goto out;
2536 		}
2537 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2538 		del_req->entry =
2539 			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2540 		err = otx2_sync_mbox_msg(&pf->mbox);
2541 		if (err)
2542 			goto out;
2543 
2544 		/* tx */
2545 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2546 		if (!del_req) {
2547 			err = -ENOMEM;
2548 			goto out;
2549 		}
2550 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2551 		del_req->entry =
2552 			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2553 		err = otx2_sync_mbox_msg(&pf->mbox);
2554 
2555 		goto out;
2556 	}
2557 
2558 	/* rx */
2559 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2560 	if (!req) {
2561 		err = -ENOMEM;
2562 		goto out;
2563 	}
2564 
2565 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2566 	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2567 	req->packet.vlan_tci = htons(vlan);
2568 	req->mask.vlan_tci = htons(VLAN_VID_MASK);
2569 	/* af fills the destination mac addr */
2570 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2571 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2572 	req->channel = pf->hw.rx_chan_base;
2573 	req->intf = NIX_INTF_RX;
2574 	req->vf = vf + 1;
2575 	req->op = NIX_RX_ACTION_DEFAULT;
2576 	req->vtag0_valid = true;
2577 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2578 	req->set_cntr = 1;
2579 
2580 	err = otx2_sync_mbox_msg(&pf->mbox);
2581 	if (err)
2582 		goto out;
2583 
2584 	/* tx */
2585 	vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2586 	if (!vtag_req) {
2587 		err = -ENOMEM;
2588 		goto out;
2589 	}
2590 
2591 	/* configure tx vtag params */
2592 	vtag_req->vtag_size = VTAGSIZE_T4;
2593 	vtag_req->cfg_type = 0; /* tx vlan cfg */
2594 	vtag_req->tx.cfg_vtag0 = 1;
2595 	vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2596 
2597 	err = otx2_sync_mbox_msg(&pf->mbox);
2598 	if (err)
2599 		goto out;
2600 
2601 	vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2602 			(&pf->mbox.mbox, 0, &vtag_req->hdr);
2603 	if (IS_ERR(vtag_rsp)) {
2604 		err = PTR_ERR(vtag_rsp);
2605 		goto out;
2606 	}
2607 	config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2608 
2609 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2610 	if (!req) {
2611 		err = -ENOMEM;
2612 		goto out;
2613 	}
2614 
2615 	eth_zero_addr((u8 *)&req->mask.dmac);
2616 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2617 	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2618 	req->features = BIT_ULL(NPC_DMAC);
2619 	req->channel = pf->hw.tx_chan_base;
2620 	req->intf = NIX_INTF_TX;
2621 	req->vf = vf + 1;
2622 	req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2623 	req->vtag0_def = vtag_rsp->vtag0_idx;
2624 	req->vtag0_op = VTAG_INSERT;
2625 	req->set_cntr = 1;
2626 
2627 	err = otx2_sync_mbox_msg(&pf->mbox);
2628 out:
2629 	config->vlan = vlan;
2630 	mutex_unlock(&pf->mbox.lock);
2631 	return err;
2632 }
2633 
2634 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2635 			    __be16 proto)
2636 {
2637 	struct otx2_nic *pf = netdev_priv(netdev);
2638 	struct pci_dev *pdev = pf->pdev;
2639 
2640 	if (!netif_running(netdev))
2641 		return -EAGAIN;
2642 
2643 	if (vf >= pci_num_vf(pdev))
2644 		return -EINVAL;
2645 
2646 	/* qos is currently unsupported */
2647 	if (vlan >= VLAN_N_VID || qos)
2648 		return -EINVAL;
2649 
2650 	if (proto != htons(ETH_P_8021Q))
2651 		return -EPROTONOSUPPORT;
2652 
2653 	if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2654 		return -EOPNOTSUPP;
2655 
2656 	return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2657 }
2658 
2659 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2660 			      struct ifla_vf_info *ivi)
2661 {
2662 	struct otx2_nic *pf = netdev_priv(netdev);
2663 	struct pci_dev *pdev = pf->pdev;
2664 	struct otx2_vf_config *config;
2665 
2666 	if (!netif_running(netdev))
2667 		return -EAGAIN;
2668 
2669 	if (vf >= pci_num_vf(pdev))
2670 		return -EINVAL;
2671 
2672 	config = &pf->vf_configs[vf];
2673 	ivi->vf = vf;
2674 	ether_addr_copy(ivi->mac, config->mac);
2675 	ivi->vlan = config->vlan;
2676 	ivi->trusted = config->trusted;
2677 
2678 	return 0;
2679 }
2680 
2681 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
2682 			    int qidx)
2683 {
2684 	struct page *page;
2685 	u64 dma_addr;
2686 	int err = 0;
2687 
2688 	dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
2689 				     offset_in_page(xdpf->data), xdpf->len,
2690 				     DMA_TO_DEVICE);
2691 	if (dma_mapping_error(pf->dev, dma_addr))
2692 		return -ENOMEM;
2693 
2694 	err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
2695 	if (!err) {
2696 		otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
2697 		page = virt_to_page(xdpf->data);
2698 		put_page(page);
2699 		return -ENOMEM;
2700 	}
2701 	return 0;
2702 }
2703 
2704 static int otx2_xdp_xmit(struct net_device *netdev, int n,
2705 			 struct xdp_frame **frames, u32 flags)
2706 {
2707 	struct otx2_nic *pf = netdev_priv(netdev);
2708 	int qidx = smp_processor_id();
2709 	struct otx2_snd_queue *sq;
2710 	int drops = 0, i;
2711 
2712 	if (!netif_running(netdev))
2713 		return -ENETDOWN;
2714 
2715 	qidx += pf->hw.tx_queues;
2716 	sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
2717 
2718 	/* Abort xmit if xdp queue is not */
2719 	if (unlikely(!sq))
2720 		return -ENXIO;
2721 
2722 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2723 		return -EINVAL;
2724 
2725 	for (i = 0; i < n; i++) {
2726 		struct xdp_frame *xdpf = frames[i];
2727 		int err;
2728 
2729 		err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
2730 		if (err)
2731 			drops++;
2732 	}
2733 	return n - drops;
2734 }
2735 
2736 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
2737 {
2738 	struct net_device *dev = pf->netdev;
2739 	bool if_up = netif_running(pf->netdev);
2740 	struct bpf_prog *old_prog;
2741 
2742 	if (prog && dev->mtu > MAX_XDP_MTU) {
2743 		netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
2744 		return -EOPNOTSUPP;
2745 	}
2746 
2747 	if (if_up)
2748 		otx2_stop(pf->netdev);
2749 
2750 	old_prog = xchg(&pf->xdp_prog, prog);
2751 
2752 	if (old_prog)
2753 		bpf_prog_put(old_prog);
2754 
2755 	if (pf->xdp_prog)
2756 		bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
2757 
2758 	/* Network stack and XDP shared same rx queues.
2759 	 * Use separate tx queues for XDP and network stack.
2760 	 */
2761 	if (pf->xdp_prog) {
2762 		pf->hw.xdp_queues = pf->hw.rx_queues;
2763 		xdp_features_set_redirect_target(dev, false);
2764 	} else {
2765 		pf->hw.xdp_queues = 0;
2766 		xdp_features_clear_redirect_target(dev);
2767 	}
2768 
2769 	if (if_up)
2770 		otx2_open(pf->netdev);
2771 
2772 	return 0;
2773 }
2774 
2775 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2776 {
2777 	struct otx2_nic *pf = netdev_priv(netdev);
2778 
2779 	switch (xdp->command) {
2780 	case XDP_SETUP_PROG:
2781 		return otx2_xdp_setup(pf, xdp->prog);
2782 	default:
2783 		return -EINVAL;
2784 	}
2785 }
2786 
2787 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2788 				   int req_perm)
2789 {
2790 	struct set_vf_perm *req;
2791 	int rc;
2792 
2793 	mutex_lock(&pf->mbox.lock);
2794 	req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2795 	if (!req) {
2796 		rc = -ENOMEM;
2797 		goto out;
2798 	}
2799 
2800 	/* Let AF reset VF permissions as sriov is disabled */
2801 	if (req_perm == OTX2_RESET_VF_PERM) {
2802 		req->flags |= RESET_VF_PERM;
2803 	} else if (req_perm == OTX2_TRUSTED_VF) {
2804 		if (pf->vf_configs[vf].trusted)
2805 			req->flags |= VF_TRUSTED;
2806 	}
2807 
2808 	req->vf = vf;
2809 	rc = otx2_sync_mbox_msg(&pf->mbox);
2810 out:
2811 	mutex_unlock(&pf->mbox.lock);
2812 	return rc;
2813 }
2814 
2815 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2816 				 bool enable)
2817 {
2818 	struct otx2_nic *pf = netdev_priv(netdev);
2819 	struct pci_dev *pdev = pf->pdev;
2820 	int rc;
2821 
2822 	if (vf >= pci_num_vf(pdev))
2823 		return -EINVAL;
2824 
2825 	if (pf->vf_configs[vf].trusted == enable)
2826 		return 0;
2827 
2828 	pf->vf_configs[vf].trusted = enable;
2829 	rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2830 
2831 	if (rc) {
2832 		pf->vf_configs[vf].trusted = !enable;
2833 	} else {
2834 		netdev_info(pf->netdev, "VF %d is %strusted\n",
2835 			    vf, enable ? "" : "not ");
2836 		otx2_set_rx_mode(netdev);
2837 	}
2838 
2839 	return rc;
2840 }
2841 
2842 static const struct net_device_ops otx2_netdev_ops = {
2843 	.ndo_open		= otx2_open,
2844 	.ndo_stop		= otx2_stop,
2845 	.ndo_start_xmit		= otx2_xmit,
2846 	.ndo_select_queue	= otx2_select_queue,
2847 	.ndo_fix_features	= otx2_fix_features,
2848 	.ndo_set_mac_address    = otx2_set_mac_address,
2849 	.ndo_change_mtu		= otx2_change_mtu,
2850 	.ndo_set_rx_mode	= otx2_set_rx_mode,
2851 	.ndo_set_features	= otx2_set_features,
2852 	.ndo_tx_timeout		= otx2_tx_timeout,
2853 	.ndo_get_stats64	= otx2_get_stats64,
2854 	.ndo_eth_ioctl		= otx2_ioctl,
2855 	.ndo_set_vf_mac		= otx2_set_vf_mac,
2856 	.ndo_set_vf_vlan	= otx2_set_vf_vlan,
2857 	.ndo_get_vf_config	= otx2_get_vf_config,
2858 	.ndo_bpf		= otx2_xdp,
2859 	.ndo_xdp_xmit           = otx2_xdp_xmit,
2860 	.ndo_setup_tc		= otx2_setup_tc,
2861 	.ndo_set_vf_trust	= otx2_ndo_set_vf_trust,
2862 };
2863 
2864 int otx2_wq_init(struct otx2_nic *pf)
2865 {
2866 	pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2867 	if (!pf->otx2_wq)
2868 		return -ENOMEM;
2869 
2870 	INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
2871 	INIT_WORK(&pf->reset_task, otx2_reset_task);
2872 	return 0;
2873 }
2874 
2875 int otx2_check_pf_usable(struct otx2_nic *nic)
2876 {
2877 	u64 rev;
2878 
2879 	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2880 	rev = (rev >> 12) & 0xFF;
2881 	/* Check if AF has setup revision for RVUM block,
2882 	 * otherwise this driver probe should be deferred
2883 	 * until AF driver comes up.
2884 	 */
2885 	if (!rev) {
2886 		dev_warn(nic->dev,
2887 			 "AF is not initialized, deferring probe\n");
2888 		return -EPROBE_DEFER;
2889 	}
2890 	return 0;
2891 }
2892 
2893 int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2894 {
2895 	struct otx2_hw *hw = &pf->hw;
2896 	int num_vec, err;
2897 
2898 	/* NPA interrupts are inot registered, so alloc only
2899 	 * upto NIX vector offset.
2900 	 */
2901 	num_vec = hw->nix_msixoff;
2902 	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2903 
2904 	otx2_disable_mbox_intr(pf);
2905 	pci_free_irq_vectors(hw->pdev);
2906 	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2907 	if (err < 0) {
2908 		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2909 			__func__, num_vec);
2910 		return err;
2911 	}
2912 
2913 	return otx2_register_mbox_intr(pf, false);
2914 }
2915 EXPORT_SYMBOL(otx2_realloc_msix_vectors);
2916 
2917 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2918 {
2919 	int i;
2920 
2921 	pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2922 				      sizeof(struct otx2_vf_config),
2923 				      GFP_KERNEL);
2924 	if (!pf->vf_configs)
2925 		return -ENOMEM;
2926 
2927 	for (i = 0; i < pf->total_vfs; i++) {
2928 		pf->vf_configs[i].pf = pf;
2929 		pf->vf_configs[i].intf_down = true;
2930 		pf->vf_configs[i].trusted = false;
2931 		INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2932 				  otx2_vf_link_event_task);
2933 	}
2934 
2935 	return 0;
2936 }
2937 
2938 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2939 {
2940 	int i;
2941 
2942 	if (!pf->vf_configs)
2943 		return;
2944 
2945 	for (i = 0; i < pf->total_vfs; i++) {
2946 		cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2947 		otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2948 	}
2949 }
2950 
2951 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
2952 {
2953 	struct device *dev = &pdev->dev;
2954 	struct otx2_hw *hw = &pf->hw;
2955 	int num_vec, err;
2956 
2957 	num_vec = pci_msix_vec_count(pdev);
2958 	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2959 					  GFP_KERNEL);
2960 	if (!hw->irq_name)
2961 		return -ENOMEM;
2962 
2963 	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2964 					 sizeof(cpumask_var_t), GFP_KERNEL);
2965 	if (!hw->affinity_mask)
2966 		return -ENOMEM;
2967 
2968 	/* Map CSRs */
2969 	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2970 	if (!pf->reg_base) {
2971 		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2972 		return -ENOMEM;
2973 	}
2974 
2975 	err = otx2_check_pf_usable(pf);
2976 	if (err)
2977 		return err;
2978 
2979 	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2980 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2981 	if (err < 0) {
2982 		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2983 			__func__, num_vec);
2984 		return err;
2985 	}
2986 
2987 	otx2_setup_dev_hw_settings(pf);
2988 
2989 	/* Init PF <=> AF mailbox stuff */
2990 	err = otx2_pfaf_mbox_init(pf);
2991 	if (err)
2992 		goto err_free_irq_vectors;
2993 
2994 	/* Register mailbox interrupt */
2995 	err = otx2_register_mbox_intr(pf, true);
2996 	if (err)
2997 		goto err_mbox_destroy;
2998 
2999 	/* Request AF to attach NPA and NIX LFs to this PF.
3000 	 * NIX and NPA LFs are needed for this PF to function as a NIC.
3001 	 */
3002 	err = otx2_attach_npa_nix(pf);
3003 	if (err)
3004 		goto err_disable_mbox_intr;
3005 
3006 	err = otx2_realloc_msix_vectors(pf);
3007 	if (err)
3008 		goto err_detach_rsrc;
3009 
3010 	err = cn10k_lmtst_init(pf);
3011 	if (err)
3012 		goto err_detach_rsrc;
3013 
3014 	return 0;
3015 
3016 err_detach_rsrc:
3017 	if (pf->hw.lmt_info)
3018 		free_percpu(pf->hw.lmt_info);
3019 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3020 		qmem_free(pf->dev, pf->dync_lmt);
3021 	otx2_detach_resources(&pf->mbox);
3022 err_disable_mbox_intr:
3023 	otx2_disable_mbox_intr(pf);
3024 err_mbox_destroy:
3025 	otx2_pfaf_mbox_destroy(pf);
3026 err_free_irq_vectors:
3027 	pci_free_irq_vectors(hw->pdev);
3028 
3029 	return err;
3030 }
3031 EXPORT_SYMBOL(otx2_init_rsrc);
3032 
3033 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3034 {
3035 	struct device *dev = &pdev->dev;
3036 	int err, qcount, qos_txqs;
3037 	struct net_device *netdev;
3038 	struct otx2_nic *pf;
3039 	struct otx2_hw *hw;
3040 
3041 	err = pcim_enable_device(pdev);
3042 	if (err) {
3043 		dev_err(dev, "Failed to enable PCI device\n");
3044 		return err;
3045 	}
3046 
3047 	err = pci_request_regions(pdev, DRV_NAME);
3048 	if (err) {
3049 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
3050 		return err;
3051 	}
3052 
3053 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3054 	if (err) {
3055 		dev_err(dev, "DMA mask config failed, abort\n");
3056 		goto err_release_regions;
3057 	}
3058 
3059 	pci_set_master(pdev);
3060 
3061 	/* Set number of queues */
3062 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
3063 	qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
3064 
3065 	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
3066 	if (!netdev) {
3067 		err = -ENOMEM;
3068 		goto err_release_regions;
3069 	}
3070 
3071 	pci_set_drvdata(pdev, netdev);
3072 	SET_NETDEV_DEV(netdev, &pdev->dev);
3073 	pf = netdev_priv(netdev);
3074 	pf->netdev = netdev;
3075 	pf->pdev = pdev;
3076 	pf->dev = dev;
3077 	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
3078 	pf->flags |= OTX2_FLAG_INTF_DOWN;
3079 
3080 	hw = &pf->hw;
3081 	hw->pdev = pdev;
3082 	hw->rx_queues = qcount;
3083 	hw->tx_queues = qcount;
3084 	hw->non_qos_queues = qcount;
3085 	hw->max_queues = qcount;
3086 	hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
3087 	/* Use CQE of 128 byte descriptor size by default */
3088 	hw->xqe_size = 128;
3089 
3090 	err = otx2_init_rsrc(pdev, pf);
3091 	if (err)
3092 		goto err_free_netdev;
3093 
3094 	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
3095 	if (err)
3096 		goto err_detach_rsrc;
3097 
3098 	/* Assign default mac address */
3099 	otx2_get_mac_from_af(netdev);
3100 
3101 	/* Don't check for error.  Proceed without ptp */
3102 	otx2_ptp_init(pf);
3103 
3104 	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
3105 	 * HW allocates buffer pointer from stack and uses it for DMA'ing
3106 	 * ingress packet. In some scenarios HW can free back allocated buffer
3107 	 * pointers to pool. This makes it impossible for SW to maintain a
3108 	 * parallel list where physical addresses of buffer pointers (IOVAs)
3109 	 * given to HW can be saved for later reference.
3110 	 *
3111 	 * So the only way to convert Rx packet's buffer address is to use
3112 	 * IOMMU's iova_to_phys() handler which translates the address by
3113 	 * walking through the translation tables.
3114 	 */
3115 	pf->iommu_domain = iommu_get_domain_for_dev(dev);
3116 
3117 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3118 			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
3119 			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3120 			       NETIF_F_GSO_UDP_L4);
3121 	netdev->features |= netdev->hw_features;
3122 
3123 	err = otx2_mcam_flow_init(pf);
3124 	if (err)
3125 		goto err_ptp_destroy;
3126 
3127 	err = cn10k_mcs_init(pf);
3128 	if (err)
3129 		goto err_del_mcam_entries;
3130 
3131 	if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
3132 		netdev->hw_features |= NETIF_F_NTUPLE;
3133 
3134 	if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
3135 		netdev->priv_flags |= IFF_UNICAST_FLT;
3136 
3137 	/* Support TSO on tag interface */
3138 	netdev->vlan_features |= netdev->features;
3139 	netdev->hw_features  |= NETIF_F_HW_VLAN_CTAG_TX |
3140 				NETIF_F_HW_VLAN_STAG_TX;
3141 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
3142 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
3143 				       NETIF_F_HW_VLAN_STAG_RX;
3144 	netdev->features |= netdev->hw_features;
3145 
3146 	/* HW supports tc offload but mutually exclusive with n-tuple filters */
3147 	if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
3148 		netdev->hw_features |= NETIF_F_HW_TC;
3149 
3150 	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
3151 
3152 	netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
3153 	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
3154 
3155 	netdev->netdev_ops = &otx2_netdev_ops;
3156 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
3157 
3158 	netdev->min_mtu = OTX2_MIN_MTU;
3159 	netdev->max_mtu = otx2_get_max_mtu(pf);
3160 	hw->max_mtu = netdev->max_mtu;
3161 
3162 	/* reset CGX/RPM MAC stats */
3163 	otx2_reset_mac_stats(pf);
3164 
3165 	err = register_netdev(netdev);
3166 	if (err) {
3167 		dev_err(dev, "Failed to register netdevice\n");
3168 		goto err_mcs_free;
3169 	}
3170 
3171 	err = otx2_wq_init(pf);
3172 	if (err)
3173 		goto err_unreg_netdev;
3174 
3175 	otx2_set_ethtool_ops(netdev);
3176 
3177 	err = otx2_init_tc(pf);
3178 	if (err)
3179 		goto err_mcam_flow_del;
3180 
3181 	err = otx2_register_dl(pf);
3182 	if (err)
3183 		goto err_mcam_flow_del;
3184 
3185 	/* Initialize SR-IOV resources */
3186 	err = otx2_sriov_vfcfg_init(pf);
3187 	if (err)
3188 		goto err_pf_sriov_init;
3189 
3190 	/* Enable link notifications */
3191 	otx2_cgx_config_linkevents(pf, true);
3192 
3193 #ifdef CONFIG_DCB
3194 	err = otx2_dcbnl_set_ops(netdev);
3195 	if (err)
3196 		goto err_pf_sriov_init;
3197 #endif
3198 
3199 	otx2_qos_init(pf, qos_txqs);
3200 
3201 	return 0;
3202 
3203 err_pf_sriov_init:
3204 	otx2_shutdown_tc(pf);
3205 err_mcam_flow_del:
3206 	otx2_mcam_flow_del(pf);
3207 err_unreg_netdev:
3208 	unregister_netdev(netdev);
3209 err_mcs_free:
3210 	cn10k_mcs_free(pf);
3211 err_del_mcam_entries:
3212 	otx2_mcam_flow_del(pf);
3213 err_ptp_destroy:
3214 	otx2_ptp_destroy(pf);
3215 err_detach_rsrc:
3216 	if (pf->hw.lmt_info)
3217 		free_percpu(pf->hw.lmt_info);
3218 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3219 		qmem_free(pf->dev, pf->dync_lmt);
3220 	otx2_detach_resources(&pf->mbox);
3221 	otx2_disable_mbox_intr(pf);
3222 	otx2_pfaf_mbox_destroy(pf);
3223 	pci_free_irq_vectors(hw->pdev);
3224 err_free_netdev:
3225 	pci_set_drvdata(pdev, NULL);
3226 	free_netdev(netdev);
3227 err_release_regions:
3228 	pci_release_regions(pdev);
3229 	return err;
3230 }
3231 
3232 static void otx2_vf_link_event_task(struct work_struct *work)
3233 {
3234 	struct otx2_vf_config *config;
3235 	struct cgx_link_info_msg *req;
3236 	struct mbox_msghdr *msghdr;
3237 	struct delayed_work *dwork;
3238 	struct otx2_nic *pf;
3239 	int vf_idx;
3240 
3241 	config = container_of(work, struct otx2_vf_config,
3242 			      link_event_work.work);
3243 	vf_idx = config - config->pf->vf_configs;
3244 	pf = config->pf;
3245 
3246 	if (config->intf_down)
3247 		return;
3248 
3249 	mutex_lock(&pf->mbox.lock);
3250 
3251 	dwork = &config->link_event_work;
3252 
3253 	if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
3254 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
3255 		mutex_unlock(&pf->mbox.lock);
3256 		return;
3257 	}
3258 
3259 	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
3260 					 sizeof(*req), sizeof(struct msg_rsp));
3261 	if (!msghdr) {
3262 		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
3263 		mutex_unlock(&pf->mbox.lock);
3264 		return;
3265 	}
3266 
3267 	req = (struct cgx_link_info_msg *)msghdr;
3268 	req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
3269 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
3270 	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
3271 
3272 	otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
3273 
3274 	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
3275 
3276 	mutex_unlock(&pf->mbox.lock);
3277 }
3278 
3279 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
3280 {
3281 	struct net_device *netdev = pci_get_drvdata(pdev);
3282 	struct otx2_nic *pf = netdev_priv(netdev);
3283 	int ret;
3284 
3285 	/* Init PF <=> VF mailbox stuff */
3286 	ret = otx2_pfvf_mbox_init(pf, numvfs);
3287 	if (ret)
3288 		return ret;
3289 
3290 	ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
3291 	if (ret)
3292 		goto free_mbox;
3293 
3294 	ret = otx2_pf_flr_init(pf, numvfs);
3295 	if (ret)
3296 		goto free_intr;
3297 
3298 	ret = otx2_register_flr_me_intr(pf, numvfs);
3299 	if (ret)
3300 		goto free_flr;
3301 
3302 	ret = pci_enable_sriov(pdev, numvfs);
3303 	if (ret)
3304 		goto free_flr_intr;
3305 
3306 	return numvfs;
3307 free_flr_intr:
3308 	otx2_disable_flr_me_intr(pf);
3309 free_flr:
3310 	otx2_flr_wq_destroy(pf);
3311 free_intr:
3312 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
3313 free_mbox:
3314 	otx2_pfvf_mbox_destroy(pf);
3315 	return ret;
3316 }
3317 
3318 static int otx2_sriov_disable(struct pci_dev *pdev)
3319 {
3320 	struct net_device *netdev = pci_get_drvdata(pdev);
3321 	struct otx2_nic *pf = netdev_priv(netdev);
3322 	int numvfs = pci_num_vf(pdev);
3323 
3324 	if (!numvfs)
3325 		return 0;
3326 
3327 	pci_disable_sriov(pdev);
3328 
3329 	otx2_disable_flr_me_intr(pf);
3330 	otx2_flr_wq_destroy(pf);
3331 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
3332 	otx2_pfvf_mbox_destroy(pf);
3333 
3334 	return 0;
3335 }
3336 
3337 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
3338 {
3339 	if (numvfs == 0)
3340 		return otx2_sriov_disable(pdev);
3341 	else
3342 		return otx2_sriov_enable(pdev, numvfs);
3343 }
3344 
3345 static void otx2_ndc_sync(struct otx2_nic *pf)
3346 {
3347 	struct mbox *mbox = &pf->mbox;
3348 	struct ndc_sync_op *req;
3349 
3350 	mutex_lock(&mbox->lock);
3351 
3352 	req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
3353 	if (!req) {
3354 		mutex_unlock(&mbox->lock);
3355 		return;
3356 	}
3357 
3358 	req->nix_lf_tx_sync = 1;
3359 	req->nix_lf_rx_sync = 1;
3360 	req->npa_lf_sync = 1;
3361 
3362 	if (!otx2_sync_mbox_msg(mbox))
3363 		dev_err(pf->dev, "NDC sync operation failed\n");
3364 
3365 	mutex_unlock(&mbox->lock);
3366 }
3367 
3368 static void otx2_remove(struct pci_dev *pdev)
3369 {
3370 	struct net_device *netdev = pci_get_drvdata(pdev);
3371 	struct otx2_nic *pf;
3372 
3373 	if (!netdev)
3374 		return;
3375 
3376 	pf = netdev_priv(netdev);
3377 
3378 	pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
3379 
3380 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
3381 		otx2_config_hw_tx_tstamp(pf, false);
3382 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
3383 		otx2_config_hw_rx_tstamp(pf, false);
3384 
3385 	/* Disable 802.3x pause frames */
3386 	if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
3387 	    (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
3388 		pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
3389 		pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
3390 		otx2_config_pause_frm(pf);
3391 	}
3392 
3393 #ifdef CONFIG_DCB
3394 	/* Disable PFC config */
3395 	if (pf->pfc_en) {
3396 		pf->pfc_en = 0;
3397 		otx2_config_priority_flow_ctrl(pf);
3398 	}
3399 #endif
3400 	cancel_work_sync(&pf->reset_task);
3401 	/* Disable link notifications */
3402 	otx2_cgx_config_linkevents(pf, false);
3403 
3404 	otx2_unregister_dl(pf);
3405 	unregister_netdev(netdev);
3406 	cn10k_mcs_free(pf);
3407 	otx2_sriov_disable(pf->pdev);
3408 	otx2_sriov_vfcfg_cleanup(pf);
3409 	if (pf->otx2_wq)
3410 		destroy_workqueue(pf->otx2_wq);
3411 
3412 	otx2_ptp_destroy(pf);
3413 	otx2_mcam_flow_del(pf);
3414 	otx2_shutdown_tc(pf);
3415 	otx2_shutdown_qos(pf);
3416 	otx2_ndc_sync(pf);
3417 	otx2_detach_resources(&pf->mbox);
3418 	if (pf->hw.lmt_info)
3419 		free_percpu(pf->hw.lmt_info);
3420 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3421 		qmem_free(pf->dev, pf->dync_lmt);
3422 	otx2_disable_mbox_intr(pf);
3423 	otx2_pfaf_mbox_destroy(pf);
3424 	pci_free_irq_vectors(pf->pdev);
3425 	pci_set_drvdata(pdev, NULL);
3426 	free_netdev(netdev);
3427 
3428 	pci_release_regions(pdev);
3429 }
3430 
3431 static struct pci_driver otx2_pf_driver = {
3432 	.name = DRV_NAME,
3433 	.id_table = otx2_pf_id_table,
3434 	.probe = otx2_probe,
3435 	.shutdown = otx2_remove,
3436 	.remove = otx2_remove,
3437 	.sriov_configure = otx2_sriov_configure
3438 };
3439 
3440 static int __init otx2_rvupf_init_module(void)
3441 {
3442 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3443 
3444 	return pci_register_driver(&otx2_pf_driver);
3445 }
3446 
3447 static void __exit otx2_rvupf_cleanup_module(void)
3448 {
3449 	pci_unregister_driver(&otx2_pf_driver);
3450 }
3451 
3452 module_init(otx2_rvupf_init_module);
3453 module_exit(otx2_rvupf_cleanup_module);
3454