xref: /freebsd/sys/dev/oce/oce_if.c (revision f5f7c05209ca2c3748fd8b27c5e80ffad49120eb)
1 /*-
2  * Copyright (C) 2012 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 /* $FreeBSD$ */
40 
41 #include "opt_inet6.h"
42 #include "opt_inet.h"
43 
44 #include "oce_if.h"
45 
46 
47 /* Driver entry points prototypes */
48 static int  oce_probe(device_t dev);
49 static int  oce_attach(device_t dev);
50 static int  oce_detach(device_t dev);
51 static int  oce_shutdown(device_t dev);
52 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
53 static void oce_init(void *xsc);
54 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
55 static void oce_multiq_flush(struct ifnet *ifp);
56 
57 /* Driver interrupt routines protypes */
58 static void oce_intr(void *arg, int pending);
59 static int  oce_setup_intr(POCE_SOFTC sc);
60 static int  oce_fast_isr(void *arg);
61 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
62 			  void (*isr) (void *arg, int pending));
63 
64 /* Media callbacks prototypes */
65 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
66 static int  oce_media_change(struct ifnet *ifp);
67 
68 /* Transmit routines prototypes */
69 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
70 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
71 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
72 					uint32_t status);
73 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
74 				 struct oce_wq *wq);
75 
76 /* Receive routines prototypes */
77 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
78 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
79 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
81 						struct oce_nic_rx_cqe *cqe);
82 
83 /* Helper function prototypes in this file */
84 static int  oce_attach_ifp(POCE_SOFTC sc);
85 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
86 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87 static int  oce_vid_config(POCE_SOFTC sc);
88 static void oce_mac_addr_set(POCE_SOFTC sc);
89 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90 static void oce_local_timer(void *arg);
91 static void oce_if_deactivate(POCE_SOFTC sc);
92 static void oce_if_activate(POCE_SOFTC sc);
93 static void setup_max_queues_want(POCE_SOFTC sc);
94 static void update_queues_got(POCE_SOFTC sc);
95 static void process_link_state(POCE_SOFTC sc,
96 		 struct oce_async_cqe_link_state *acqe);
97 
98 
99 /* IP specific */
100 #if defined(INET6) || defined(INET)
101 static int  oce_init_lro(POCE_SOFTC sc);
102 static void oce_rx_flush_lro(struct oce_rq *rq);
103 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104 #endif
105 
106 static device_method_t oce_dispatch[] = {
107 	DEVMETHOD(device_probe, oce_probe),
108 	DEVMETHOD(device_attach, oce_attach),
109 	DEVMETHOD(device_detach, oce_detach),
110 	DEVMETHOD(device_shutdown, oce_shutdown),
111 
112 	DEVMETHOD_END
113 };
114 
115 static driver_t oce_driver = {
116 	"oce",
117 	oce_dispatch,
118 	sizeof(OCE_SOFTC)
119 };
120 static devclass_t oce_devclass;
121 
122 
123 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
124 MODULE_DEPEND(oce, pci, 1, 1, 1);
125 MODULE_DEPEND(oce, ether, 1, 1, 1);
126 MODULE_VERSION(oce, 1);
127 
128 
129 /* global vars */
130 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
131 
132 /* Module capabilites and parameters */
133 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
134 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
135 
136 
137 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
138 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
139 
140 
141 /* Supported devices table */
142 static uint32_t supportedDevices[] =  {
143 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
144 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
145 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
146 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
147 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
148 };
149 
150 
151 
152 
153 /*****************************************************************************
154  *			Driver entry points functions                        *
155  *****************************************************************************/
156 
157 static int
158 oce_probe(device_t dev)
159 {
160 	uint16_t vendor = 0;
161 	uint16_t device = 0;
162 	int i = 0;
163 	char str[256] = {0};
164 	POCE_SOFTC sc;
165 
166 	sc = device_get_softc(dev);
167 	bzero(sc, sizeof(OCE_SOFTC));
168 	sc->dev = dev;
169 
170 	vendor = pci_get_vendor(dev);
171 	device = pci_get_device(dev);
172 
173 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
174 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
175 			if (device == (supportedDevices[i] & 0xffff)) {
176 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
177 					component_revision);
178 				device_set_desc_copy(dev, str);
179 
180 				switch (device) {
181 				case PCI_PRODUCT_BE2:
182 					sc->flags |= OCE_FLAGS_BE2;
183 					break;
184 				case PCI_PRODUCT_BE3:
185 					sc->flags |= OCE_FLAGS_BE3;
186 					break;
187 				case PCI_PRODUCT_XE201:
188 				case PCI_PRODUCT_XE201_VF:
189 					sc->flags |= OCE_FLAGS_XE201;
190 					break;
191 				default:
192 					return ENXIO;
193 				}
194 				return BUS_PROBE_DEFAULT;
195 			}
196 		}
197 	}
198 
199 	return ENXIO;
200 }
201 
202 
203 static int
204 oce_attach(device_t dev)
205 {
206 	POCE_SOFTC sc;
207 	int rc = 0;
208 
209 	sc = device_get_softc(dev);
210 
211 	rc = oce_hw_pci_alloc(sc);
212 	if (rc)
213 		return rc;
214 
215 	sc->rss_enable 	 = oce_enable_rss;
216 	sc->tx_ring_size = OCE_TX_RING_SIZE;
217 	sc->rx_ring_size = OCE_RX_RING_SIZE;
218 	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
219 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
220 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
221 
222 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
223 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
224 
225 	/* initialise the hardware */
226 	rc = oce_hw_init(sc);
227 	if (rc)
228 		goto pci_res_free;
229 
230 	setup_max_queues_want(sc);
231 
232 	rc = oce_setup_intr(sc);
233 	if (rc)
234 		goto mbox_free;
235 
236 	rc = oce_queue_init_all(sc);
237 	if (rc)
238 		goto intr_free;
239 
240 	rc = oce_attach_ifp(sc);
241 	if (rc)
242 		goto queues_free;
243 
244 #if defined(INET6) || defined(INET)
245 	rc = oce_init_lro(sc);
246 	if (rc)
247 		goto ifp_free;
248 #endif
249 
250 	rc = oce_hw_start(sc);
251 	if (rc)
252 		goto lro_free;
253 
254 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
255 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
256 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
257 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
258 
259 	rc = oce_stats_init(sc);
260 	if (rc)
261 		goto vlan_free;
262 
263 	oce_add_sysctls(sc);
264 
265 	callout_init(&sc->timer, CALLOUT_MPSAFE);
266 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
267 	if (rc)
268 		goto stats_free;
269 #ifdef DEV_NETMAP
270 #endif /* DEV_NETMAP */
271 
272 	return 0;
273 
274 stats_free:
275 	callout_drain(&sc->timer);
276 	oce_stats_free(sc);
277 vlan_free:
278 	if (sc->vlan_attach)
279 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
280 	if (sc->vlan_detach)
281 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
282 	oce_hw_intr_disable(sc);
283 lro_free:
284 #if defined(INET6) || defined(INET)
285 	oce_free_lro(sc);
286 ifp_free:
287 #endif
288 	ether_ifdetach(sc->ifp);
289 	if_free(sc->ifp);
290 queues_free:
291 	oce_queue_release_all(sc);
292 intr_free:
293 	oce_intr_free(sc);
294 mbox_free:
295 	oce_dma_free(sc, &sc->bsmbx);
296 pci_res_free:
297 	oce_hw_pci_free(sc);
298 	LOCK_DESTROY(&sc->dev_lock);
299 	LOCK_DESTROY(&sc->bmbx_lock);
300 	return rc;
301 
302 }
303 
304 
305 static int
306 oce_detach(device_t dev)
307 {
308 	POCE_SOFTC sc = device_get_softc(dev);
309 
310 	LOCK(&sc->dev_lock);
311 	oce_if_deactivate(sc);
312 	UNLOCK(&sc->dev_lock);
313 
314 	callout_drain(&sc->timer);
315 
316 	if (sc->vlan_attach != NULL)
317 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
318 	if (sc->vlan_detach != NULL)
319 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
320 
321 	ether_ifdetach(sc->ifp);
322 
323 	if_free(sc->ifp);
324 
325 	oce_hw_shutdown(sc);
326 
327 	bus_generic_detach(dev);
328 
329 	return 0;
330 }
331 
332 
333 static int
334 oce_shutdown(device_t dev)
335 {
336 	int rc;
337 
338 	rc = oce_detach(dev);
339 
340 	return rc;
341 }
342 
343 
344 static int
345 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
346 {
347 	struct ifreq *ifr = (struct ifreq *)data;
348 	POCE_SOFTC sc = ifp->if_softc;
349 	int rc = 0;
350 	uint32_t u;
351 
352 	switch (command) {
353 
354 	case SIOCGIFMEDIA:
355 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
356 		break;
357 
358 	case SIOCSIFMTU:
359 		if (ifr->ifr_mtu > OCE_MAX_MTU)
360 			rc = EINVAL;
361 		else
362 			ifp->if_mtu = ifr->ifr_mtu;
363 		break;
364 
365 	case SIOCSIFFLAGS:
366 		if (ifp->if_flags & IFF_UP) {
367 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
368 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
369 				oce_init(sc);
370 			}
371 			device_printf(sc->dev, "Interface Up\n");
372 		} else {
373 			LOCK(&sc->dev_lock);
374 
375 			sc->ifp->if_drv_flags &=
376 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
377 			oce_if_deactivate(sc);
378 
379 			UNLOCK(&sc->dev_lock);
380 
381 			device_printf(sc->dev, "Interface Down\n");
382 		}
383 
384 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
385 			sc->promisc = TRUE;
386 			oce_rxf_set_promiscuous(sc, sc->promisc);
387 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
388 			sc->promisc = FALSE;
389 			oce_rxf_set_promiscuous(sc, sc->promisc);
390 		}
391 
392 		break;
393 
394 	case SIOCADDMULTI:
395 	case SIOCDELMULTI:
396 		rc = oce_hw_update_multicast(sc);
397 		if (rc)
398 			device_printf(sc->dev,
399 				"Update multicast address failed\n");
400 		break;
401 
402 	case SIOCSIFCAP:
403 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
404 
405 		if (u & IFCAP_TXCSUM) {
406 			ifp->if_capenable ^= IFCAP_TXCSUM;
407 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
408 
409 			if (IFCAP_TSO & ifp->if_capenable &&
410 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
411 				ifp->if_capenable &= ~IFCAP_TSO;
412 				ifp->if_hwassist &= ~CSUM_TSO;
413 				if_printf(ifp,
414 					 "TSO disabled due to -txcsum.\n");
415 			}
416 		}
417 
418 		if (u & IFCAP_RXCSUM)
419 			ifp->if_capenable ^= IFCAP_RXCSUM;
420 
421 		if (u & IFCAP_TSO4) {
422 			ifp->if_capenable ^= IFCAP_TSO4;
423 
424 			if (IFCAP_TSO & ifp->if_capenable) {
425 				if (IFCAP_TXCSUM & ifp->if_capenable)
426 					ifp->if_hwassist |= CSUM_TSO;
427 				else {
428 					ifp->if_capenable &= ~IFCAP_TSO;
429 					ifp->if_hwassist &= ~CSUM_TSO;
430 					if_printf(ifp,
431 					    "Enable txcsum first.\n");
432 					rc = EAGAIN;
433 				}
434 			} else
435 				ifp->if_hwassist &= ~CSUM_TSO;
436 		}
437 
438 		if (u & IFCAP_VLAN_HWTAGGING)
439 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
440 
441 		if (u & IFCAP_VLAN_HWFILTER) {
442 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
443 			oce_vid_config(sc);
444 		}
445 #if defined(INET6) || defined(INET)
446 		if (u & IFCAP_LRO)
447 			ifp->if_capenable ^= IFCAP_LRO;
448 #endif
449 
450 		break;
451 
452 	case SIOCGPRIVATE_0:
453 		rc = oce_handle_passthrough(ifp, data);
454 		break;
455 	default:
456 		rc = ether_ioctl(ifp, command, data);
457 		break;
458 	}
459 
460 	return rc;
461 }
462 
463 
464 static void
465 oce_init(void *arg)
466 {
467 	POCE_SOFTC sc = arg;
468 
469 	LOCK(&sc->dev_lock);
470 
471 	if (sc->ifp->if_flags & IFF_UP) {
472 		oce_if_deactivate(sc);
473 		oce_if_activate(sc);
474 	}
475 
476 	UNLOCK(&sc->dev_lock);
477 
478 }
479 
480 
481 static int
482 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
483 {
484 	POCE_SOFTC sc = ifp->if_softc;
485 	struct oce_wq *wq = NULL;
486 	int queue_index = 0;
487 	int status = 0;
488 
489 	if ((m->m_flags & M_FLOWID) != 0)
490 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
491 
492 	wq = sc->wq[queue_index];
493 
494 	if (TRY_LOCK(&wq->tx_lock)) {
495 		status = oce_multiq_transmit(ifp, m, wq);
496 		UNLOCK(&wq->tx_lock);
497 	} else {
498 		status = drbr_enqueue(ifp, wq->br, m);
499 	}
500 	return status;
501 
502 }
503 
504 
505 static void
506 oce_multiq_flush(struct ifnet *ifp)
507 {
508 	POCE_SOFTC sc = ifp->if_softc;
509 	struct mbuf     *m;
510 	int i = 0;
511 
512 	for (i = 0; i < sc->nwqs; i++) {
513 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
514 			m_freem(m);
515 	}
516 	if_qflush(ifp);
517 }
518 
519 
520 
521 /*****************************************************************************
522  *                   Driver interrupt routines functions                     *
523  *****************************************************************************/
524 
525 static void
526 oce_intr(void *arg, int pending)
527 {
528 
529 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
530 	POCE_SOFTC sc = ii->sc;
531 	struct oce_eq *eq = ii->eq;
532 	struct oce_eqe *eqe;
533 	struct oce_cq *cq = NULL;
534 	int i, num_eqes = 0;
535 
536 
537 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
538 				 BUS_DMASYNC_POSTWRITE);
539 	do {
540 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
541 		if (eqe->evnt == 0)
542 			break;
543 		eqe->evnt = 0;
544 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
545 					BUS_DMASYNC_POSTWRITE);
546 		RING_GET(eq->ring, 1);
547 		num_eqes++;
548 
549 	} while (TRUE);
550 
551 	if (!num_eqes)
552 		goto eq_arm; /* Spurious */
553 
554  	/* Clear EQ entries, but dont arm */
555 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
556 
557 	/* Process TX, RX and MCC. But dont arm CQ*/
558 	for (i = 0; i < eq->cq_valid; i++) {
559 		cq = eq->cq[i];
560 		(*cq->cq_handler)(cq->cb_arg);
561 	}
562 
563 	/* Arm all cqs connected to this EQ */
564 	for (i = 0; i < eq->cq_valid; i++) {
565 		cq = eq->cq[i];
566 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
567 	}
568 
569 eq_arm:
570 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
571 	return;
572 }
573 
574 
575 static int
576 oce_setup_intr(POCE_SOFTC sc)
577 {
578 	int rc = 0, use_intx = 0;
579 	int vector = 0, req_vectors = 0;
580 
581 	if (sc->rss_enable)
582 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
583 	else
584 		req_vectors = 1;
585 
586 	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
587 		sc->intr_count = req_vectors;
588 		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
589 		if (rc != 0) {
590 			use_intx = 1;
591 			pci_release_msi(sc->dev);
592 		} else
593 			sc->flags |= OCE_FLAGS_USING_MSIX;
594 	} else
595 		use_intx = 1;
596 
597 	if (use_intx)
598 		sc->intr_count = 1;
599 
600 	/* Scale number of queues based on intr we got */
601 	update_queues_got(sc);
602 
603 	if (use_intx) {
604 		device_printf(sc->dev, "Using legacy interrupt\n");
605 		rc = oce_alloc_intr(sc, vector, oce_intr);
606 		if (rc)
607 			goto error;
608 	} else {
609 		for (; vector < sc->intr_count; vector++) {
610 			rc = oce_alloc_intr(sc, vector, oce_intr);
611 			if (rc)
612 				goto error;
613 		}
614 	}
615 
616 	return 0;
617 error:
618 	oce_intr_free(sc);
619 	return rc;
620 }
621 
622 
623 static int
624 oce_fast_isr(void *arg)
625 {
626 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
627 	POCE_SOFTC sc = ii->sc;
628 
629 	if (ii->eq == NULL)
630 		return FILTER_STRAY;
631 
632 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
633 
634 	taskqueue_enqueue_fast(ii->tq, &ii->task);
635 
636 	return FILTER_HANDLED;
637 }
638 
639 
640 static int
641 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
642 {
643 	POCE_INTR_INFO ii = &sc->intrs[vector];
644 	int rc = 0, rr;
645 
646 	if (vector >= OCE_MAX_EQ)
647 		return (EINVAL);
648 
649 	/* Set the resource id for the interrupt.
650 	 * MSIx is vector + 1 for the resource id,
651 	 * INTx is 0 for the resource id.
652 	 */
653 	if (sc->flags & OCE_FLAGS_USING_MSIX)
654 		rr = vector + 1;
655 	else
656 		rr = 0;
657 	ii->intr_res = bus_alloc_resource_any(sc->dev,
658 					      SYS_RES_IRQ,
659 					      &rr, RF_ACTIVE|RF_SHAREABLE);
660 	ii->irq_rr = rr;
661 	if (ii->intr_res == NULL) {
662 		device_printf(sc->dev,
663 			  "Could not allocate interrupt\n");
664 		rc = ENXIO;
665 		return rc;
666 	}
667 
668 	TASK_INIT(&ii->task, 0, isr, ii);
669 	ii->vector = vector;
670 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
671 	ii->tq = taskqueue_create_fast(ii->task_name,
672 			M_NOWAIT,
673 			taskqueue_thread_enqueue,
674 			&ii->tq);
675 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
676 			device_get_nameunit(sc->dev));
677 
678 	ii->sc = sc;
679 	rc = bus_setup_intr(sc->dev,
680 			ii->intr_res,
681 			INTR_TYPE_NET,
682 			oce_fast_isr, NULL, ii, &ii->tag);
683 	return rc;
684 
685 }
686 
687 
688 void
689 oce_intr_free(POCE_SOFTC sc)
690 {
691 	int i = 0;
692 
693 	for (i = 0; i < sc->intr_count; i++) {
694 
695 		if (sc->intrs[i].tag != NULL)
696 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
697 						sc->intrs[i].tag);
698 		if (sc->intrs[i].tq != NULL)
699 			taskqueue_free(sc->intrs[i].tq);
700 
701 		if (sc->intrs[i].intr_res != NULL)
702 			bus_release_resource(sc->dev, SYS_RES_IRQ,
703 						sc->intrs[i].irq_rr,
704 						sc->intrs[i].intr_res);
705 		sc->intrs[i].tag = NULL;
706 		sc->intrs[i].intr_res = NULL;
707 	}
708 
709 	if (sc->flags & OCE_FLAGS_USING_MSIX)
710 		pci_release_msi(sc->dev);
711 
712 }
713 
714 
715 
716 /******************************************************************************
717 *			  Media callbacks functions 			      *
718 ******************************************************************************/
719 
720 static void
721 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
722 {
723 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
724 
725 
726 	req->ifm_status = IFM_AVALID;
727 	req->ifm_active = IFM_ETHER;
728 
729 	if (sc->link_status == 1)
730 		req->ifm_status |= IFM_ACTIVE;
731 	else
732 		return;
733 
734 	switch (sc->link_speed) {
735 	case 1: /* 10 Mbps */
736 		req->ifm_active |= IFM_10_T | IFM_FDX;
737 		sc->speed = 10;
738 		break;
739 	case 2: /* 100 Mbps */
740 		req->ifm_active |= IFM_100_TX | IFM_FDX;
741 		sc->speed = 100;
742 		break;
743 	case 3: /* 1 Gbps */
744 		req->ifm_active |= IFM_1000_T | IFM_FDX;
745 		sc->speed = 1000;
746 		break;
747 	case 4: /* 10 Gbps */
748 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
749 		sc->speed = 10000;
750 		break;
751 	}
752 
753 	return;
754 }
755 
756 
757 int
758 oce_media_change(struct ifnet *ifp)
759 {
760 	return 0;
761 }
762 
763 
764 
765 
766 /*****************************************************************************
767  *			  Transmit routines functions			     *
768  *****************************************************************************/
769 
770 static int
771 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
772 {
773 	int rc = 0, i, retry_cnt = 0;
774 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
775 	struct mbuf *m, *m_temp;
776 	struct oce_wq *wq = sc->wq[wq_index];
777 	struct oce_packet_desc *pd;
778 	uint32_t out;
779 	struct oce_nic_hdr_wqe *nichdr;
780 	struct oce_nic_frag_wqe *nicfrag;
781 	int num_wqes;
782 	uint32_t reg_value;
783 
784 	m = *mpp;
785 	if (!m)
786 		return EINVAL;
787 
788 	if (!(m->m_flags & M_PKTHDR)) {
789 		rc = ENXIO;
790 		goto free_ret;
791 	}
792 
793 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
794 		/* consolidate packet buffers for TSO/LSO segment offload */
795 #if defined(INET6) || defined(INET)
796 		m = oce_tso_setup(sc, mpp);
797 #else
798 		m = NULL;
799 #endif
800 		if (m == NULL) {
801 			rc = ENXIO;
802 			goto free_ret;
803 		}
804 	}
805 
806 	out = wq->packets_out + 1;
807 	if (out == OCE_WQ_PACKET_ARRAY_SIZE)
808 		out = 0;
809 	if (out == wq->packets_in)
810 		return EBUSY;
811 
812 	pd = &wq->pckts[wq->packets_out];
813 retry:
814 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
815 				     pd->map,
816 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
817 	if (rc == 0) {
818 		num_wqes = pd->nsegs + 1;
819 		if (IS_BE(sc)) {
820 			/*Dummy required only for BE3.*/
821 			if (num_wqes & 1)
822 				num_wqes++;
823 		}
824 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
825 			bus_dmamap_unload(wq->tag, pd->map);
826 			return EBUSY;
827 		}
828 
829 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
830 		pd->mbuf = m;
831 		wq->packets_out = out;
832 
833 		nichdr =
834 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
835 		nichdr->u0.dw[0] = 0;
836 		nichdr->u0.dw[1] = 0;
837 		nichdr->u0.dw[2] = 0;
838 		nichdr->u0.dw[3] = 0;
839 
840 		nichdr->u0.s.complete = 1;
841 		nichdr->u0.s.event = 1;
842 		nichdr->u0.s.crc = 1;
843 		nichdr->u0.s.forward = 0;
844 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
845 		nichdr->u0.s.udpcs =
846 		    (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
847 		nichdr->u0.s.tcpcs =
848 		    (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
849 		nichdr->u0.s.num_wqe = num_wqes;
850 		nichdr->u0.s.total_length = m->m_pkthdr.len;
851 		if (m->m_flags & M_VLANTAG) {
852 			nichdr->u0.s.vlan = 1; /*Vlan present*/
853 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
854 		}
855 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
856 			if (m->m_pkthdr.tso_segsz) {
857 				nichdr->u0.s.lso = 1;
858 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
859 			}
860 			if (!IS_BE(sc))
861 				nichdr->u0.s.ipcs = 1;
862 		}
863 
864 		RING_PUT(wq->ring, 1);
865 		wq->ring->num_used++;
866 
867 		for (i = 0; i < pd->nsegs; i++) {
868 			nicfrag =
869 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
870 						      struct oce_nic_frag_wqe);
871 			nicfrag->u0.s.rsvd0 = 0;
872 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
873 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
874 			nicfrag->u0.s.frag_len = segs[i].ds_len;
875 			pd->wqe_idx = wq->ring->pidx;
876 			RING_PUT(wq->ring, 1);
877 			wq->ring->num_used++;
878 		}
879 		if (num_wqes > (pd->nsegs + 1)) {
880 			nicfrag =
881 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
882 						      struct oce_nic_frag_wqe);
883 			nicfrag->u0.dw[0] = 0;
884 			nicfrag->u0.dw[1] = 0;
885 			nicfrag->u0.dw[2] = 0;
886 			nicfrag->u0.dw[3] = 0;
887 			pd->wqe_idx = wq->ring->pidx;
888 			RING_PUT(wq->ring, 1);
889 			wq->ring->num_used++;
890 			pd->nsegs++;
891 		}
892 
893 		sc->ifp->if_opackets++;
894 		wq->tx_stats.tx_reqs++;
895 		wq->tx_stats.tx_wrbs += num_wqes;
896 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
897 		wq->tx_stats.tx_pkts++;
898 
899 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
900 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
901 		reg_value = (num_wqes << 16) | wq->wq_id;
902 		OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
903 
904 	} else if (rc == EFBIG)	{
905 		if (retry_cnt == 0) {
906 			m_temp = m_defrag(m, M_NOWAIT);
907 			if (m_temp == NULL)
908 				goto free_ret;
909 			m = m_temp;
910 			*mpp = m_temp;
911 			retry_cnt = retry_cnt + 1;
912 			goto retry;
913 		} else
914 			goto free_ret;
915 	} else if (rc == ENOMEM)
916 		return rc;
917 	else
918 		goto free_ret;
919 
920 	return 0;
921 
922 free_ret:
923 	m_freem(*mpp);
924 	*mpp = NULL;
925 	return rc;
926 }
927 
928 
929 static void
930 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
931 {
932 	uint32_t in;
933 	struct oce_packet_desc *pd;
934 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
935 	struct mbuf *m;
936 
937 	if (wq->packets_out == wq->packets_in)
938 		device_printf(sc->dev, "WQ transmit descriptor missing\n");
939 
940 	in = wq->packets_in + 1;
941 	if (in == OCE_WQ_PACKET_ARRAY_SIZE)
942 		in = 0;
943 
944 	pd = &wq->pckts[wq->packets_in];
945 	wq->packets_in = in;
946 	wq->ring->num_used -= (pd->nsegs + 1);
947 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
948 	bus_dmamap_unload(wq->tag, pd->map);
949 
950 	m = pd->mbuf;
951 	m_freem(m);
952 	pd->mbuf = NULL;
953 
954 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
955 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
956 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
957 			oce_tx_restart(sc, wq);
958 		}
959 	}
960 }
961 
962 
963 static void
964 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
965 {
966 
967 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
968 		return;
969 
970 #if __FreeBSD_version >= 800000
971 	if (!drbr_empty(sc->ifp, wq->br))
972 #else
973 	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
974 #endif
975 		taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
976 
977 }
978 
979 
980 #if defined(INET6) || defined(INET)
981 static struct mbuf *
982 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
983 {
984 	struct mbuf *m;
985 #ifdef INET
986 	struct ip *ip;
987 #endif
988 #ifdef INET6
989 	struct ip6_hdr *ip6;
990 #endif
991 	struct ether_vlan_header *eh;
992 	struct tcphdr *th;
993 	uint16_t etype;
994 	int total_len = 0, ehdrlen = 0;
995 
996 	m = *mpp;
997 
998 	if (M_WRITABLE(m) == 0) {
999 		m = m_dup(*mpp, M_NOWAIT);
1000 		if (!m)
1001 			return NULL;
1002 		m_freem(*mpp);
1003 		*mpp = m;
1004 	}
1005 
1006 	eh = mtod(m, struct ether_vlan_header *);
1007 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1008 		etype = ntohs(eh->evl_proto);
1009 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1010 	} else {
1011 		etype = ntohs(eh->evl_encap_proto);
1012 		ehdrlen = ETHER_HDR_LEN;
1013 	}
1014 
1015 	switch (etype) {
1016 #ifdef INET
1017 	case ETHERTYPE_IP:
1018 		ip = (struct ip *)(m->m_data + ehdrlen);
1019 		if (ip->ip_p != IPPROTO_TCP)
1020 			return NULL;
1021 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1022 
1023 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1024 		break;
1025 #endif
1026 #ifdef INET6
1027 	case ETHERTYPE_IPV6:
1028 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1029 		if (ip6->ip6_nxt != IPPROTO_TCP)
1030 			return NULL;
1031 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1032 
1033 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1034 		break;
1035 #endif
1036 	default:
1037 		return NULL;
1038 	}
1039 
1040 	m = m_pullup(m, total_len);
1041 	if (!m)
1042 		return NULL;
1043 	*mpp = m;
1044 	return m;
1045 
1046 }
1047 #endif /* INET6 || INET */
1048 
1049 void
1050 oce_tx_task(void *arg, int npending)
1051 {
1052 	struct oce_wq *wq = arg;
1053 	POCE_SOFTC sc = wq->parent;
1054 	struct ifnet *ifp = sc->ifp;
1055 	int rc = 0;
1056 
1057 #if __FreeBSD_version >= 800000
1058 	if (TRY_LOCK(&wq->tx_lock)) {
1059 		rc = oce_multiq_transmit(ifp, NULL, wq);
1060 		if (rc) {
1061 			device_printf(sc->dev,
1062 			 "TX[%d] restart failed\n", wq->queue_index);
1063 		}
1064 		UNLOCK(&wq->tx_lock);
1065 	}
1066 #else
1067 	oce_start(ifp);
1068 #endif
1069 
1070 }
1071 
1072 
1073 void
1074 oce_start(struct ifnet *ifp)
1075 {
1076 	POCE_SOFTC sc = ifp->if_softc;
1077 	struct mbuf *m;
1078 	int rc = 0;
1079 	int def_q = 0; /* Defualt tx queue is 0*/
1080 
1081 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1082 			IFF_DRV_RUNNING)
1083 		return;
1084 
1085 	do {
1086 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1087 		if (m == NULL)
1088 			break;
1089 
1090 		LOCK(&sc->wq[def_q]->tx_lock);
1091 		rc = oce_tx(sc, &m, def_q);
1092 		UNLOCK(&sc->wq[def_q]->tx_lock);
1093 		if (rc) {
1094 			if (m != NULL) {
1095 				sc->wq[def_q]->tx_stats.tx_stops ++;
1096 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1097 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1098 				m = NULL;
1099 			}
1100 			break;
1101 		}
1102 		if (m != NULL)
1103 			ETHER_BPF_MTAP(ifp, m);
1104 
1105 	} while (TRUE);
1106 
1107 	return;
1108 }
1109 
1110 
1111 /* Handle the Completion Queue for transmit */
1112 uint16_t
1113 oce_wq_handler(void *arg)
1114 {
1115 	struct oce_wq *wq = (struct oce_wq *)arg;
1116 	POCE_SOFTC sc = wq->parent;
1117 	struct oce_cq *cq = wq->cq;
1118 	struct oce_nic_tx_cqe *cqe;
1119 	int num_cqes = 0;
1120 
1121 	LOCK(&wq->tx_lock);
1122 	bus_dmamap_sync(cq->ring->dma.tag,
1123 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1124 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1125 	while (cqe->u0.dw[3]) {
1126 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1127 
1128 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1129 		if (wq->ring->cidx >= wq->ring->num_items)
1130 			wq->ring->cidx -= wq->ring->num_items;
1131 
1132 		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1133 		wq->tx_stats.tx_compl++;
1134 		cqe->u0.dw[3] = 0;
1135 		RING_GET(cq->ring, 1);
1136 		bus_dmamap_sync(cq->ring->dma.tag,
1137 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1138 		cqe =
1139 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1140 		num_cqes++;
1141 	}
1142 
1143 	if (num_cqes)
1144 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1145 	UNLOCK(&wq->tx_lock);
1146 
1147 	return 0;
1148 }
1149 
1150 
1151 static int
1152 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1153 {
1154 	POCE_SOFTC sc = ifp->if_softc;
1155 	int status = 0, queue_index = 0;
1156 	struct mbuf *next = NULL;
1157 	struct buf_ring *br = NULL;
1158 
1159 	br  = wq->br;
1160 	queue_index = wq->queue_index;
1161 
1162 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1163 		IFF_DRV_RUNNING) {
1164 		if (m != NULL)
1165 			status = drbr_enqueue(ifp, br, m);
1166 		return status;
1167 	}
1168 
1169 	 if (m != NULL) {
1170 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1171 			return status;
1172 	}
1173 	while ((next = drbr_peek(ifp, br)) != NULL) {
1174 		if (oce_tx(sc, &next, queue_index)) {
1175 			if (next == NULL) {
1176 				drbr_advance(ifp, br);
1177 			} else {
1178 				drbr_putback(ifp, br, next);
1179 				wq->tx_stats.tx_stops ++;
1180 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1181 				status = drbr_enqueue(ifp, br, next);
1182 			}
1183 			break;
1184 		}
1185 		drbr_advance(ifp, br);
1186 		ifp->if_obytes += next->m_pkthdr.len;
1187 		if (next->m_flags & M_MCAST)
1188 			ifp->if_omcasts++;
1189 		ETHER_BPF_MTAP(ifp, next);
1190 	}
1191 
1192 	return status;
1193 }
1194 
1195 
1196 
1197 
1198 /*****************************************************************************
1199  *			    Receive  routines functions 		     *
1200  *****************************************************************************/
1201 
1202 static void
1203 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1204 {
1205 	uint32_t out;
1206 	struct oce_packet_desc *pd;
1207 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1208 	int i, len, frag_len;
1209 	struct mbuf *m = NULL, *tail = NULL;
1210 	uint16_t vtag;
1211 
1212 	len = cqe->u0.s.pkt_size;
1213 	if (!len) {
1214 		/*partial DMA workaround for Lancer*/
1215 		oce_discard_rx_comp(rq, cqe);
1216 		goto exit;
1217 	}
1218 
1219 	 /* Get vlan_tag value */
1220 	if(IS_BE(sc))
1221 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1222 	else
1223 		vtag = cqe->u0.s.vlan_tag;
1224 
1225 
1226 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1227 
1228 		if (rq->packets_out == rq->packets_in) {
1229 			device_printf(sc->dev,
1230 				  "RQ transmit descriptor missing\n");
1231 		}
1232 		out = rq->packets_out + 1;
1233 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1234 			out = 0;
1235 		pd = &rq->pckts[rq->packets_out];
1236 		rq->packets_out = out;
1237 
1238 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1239 		bus_dmamap_unload(rq->tag, pd->map);
1240 		rq->pending--;
1241 
1242 		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1243 		pd->mbuf->m_len = frag_len;
1244 
1245 		if (tail != NULL) {
1246 			/* additional fragments */
1247 			pd->mbuf->m_flags &= ~M_PKTHDR;
1248 			tail->m_next = pd->mbuf;
1249 			tail = pd->mbuf;
1250 		} else {
1251 			/* first fragment, fill out much of the packet header */
1252 			pd->mbuf->m_pkthdr.len = len;
1253 			pd->mbuf->m_pkthdr.csum_flags = 0;
1254 			if (IF_CSUM_ENABLED(sc)) {
1255 				if (cqe->u0.s.l4_cksum_pass) {
1256 					pd->mbuf->m_pkthdr.csum_flags |=
1257 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1258 					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1259 				}
1260 				if (cqe->u0.s.ip_cksum_pass) {
1261 					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1262 						pd->mbuf->m_pkthdr.csum_flags |=
1263 						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1264 					}
1265 				}
1266 			}
1267 			m = tail = pd->mbuf;
1268 		}
1269 		pd->mbuf = NULL;
1270 		len -= frag_len;
1271 	}
1272 
1273 	if (m) {
1274 		if (!oce_cqe_portid_valid(sc, cqe)) {
1275 			 m_freem(m);
1276 			 goto exit;
1277 		}
1278 
1279 		m->m_pkthdr.rcvif = sc->ifp;
1280 #if __FreeBSD_version >= 800000
1281 		m->m_pkthdr.flowid = rq->queue_index;
1282 		m->m_flags |= M_FLOWID;
1283 #endif
1284 		/* This deternies if vlan tag is Valid */
1285 		if (oce_cqe_vtp_valid(sc, cqe)) {
1286 			if (sc->function_mode & FNM_FLEX10_MODE) {
1287 				/* FLEX10. If QnQ is not set, neglect VLAN */
1288 				if (cqe->u0.s.qnq) {
1289 					m->m_pkthdr.ether_vtag = vtag;
1290 					m->m_flags |= M_VLANTAG;
1291 				}
1292 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1293 				/* In UMC mode generally pvid will be striped by
1294 				   hw. But in some cases we have seen it comes
1295 				   with pvid. So if pvid == vlan, neglect vlan.
1296 				*/
1297 				m->m_pkthdr.ether_vtag = vtag;
1298 				m->m_flags |= M_VLANTAG;
1299 			}
1300 		}
1301 
1302 		sc->ifp->if_ipackets++;
1303 #if defined(INET6) || defined(INET)
1304 		/* Try to queue to LRO */
1305 		if (IF_LRO_ENABLED(sc) &&
1306 		    !(m->m_flags & M_VLANTAG) &&
1307 		    (cqe->u0.s.ip_cksum_pass) &&
1308 		    (cqe->u0.s.l4_cksum_pass) &&
1309 		    (!cqe->u0.s.ip_ver)       &&
1310 		    (rq->lro.lro_cnt != 0)) {
1311 
1312 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1313 				rq->lro_pkts_queued ++;
1314 				goto post_done;
1315 			}
1316 			/* If LRO posting fails then try to post to STACK */
1317 		}
1318 #endif
1319 
1320 		(*sc->ifp->if_input) (sc->ifp, m);
1321 #if defined(INET6) || defined(INET)
1322 post_done:
1323 #endif
1324 		/* Update rx stats per queue */
1325 		rq->rx_stats.rx_pkts++;
1326 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1327 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1328 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1329 			rq->rx_stats.rx_mcast_pkts++;
1330 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1331 			rq->rx_stats.rx_ucast_pkts++;
1332 	}
1333 exit:
1334 	return;
1335 }
1336 
1337 
1338 static void
1339 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1340 {
1341 	uint32_t out, i = 0;
1342 	struct oce_packet_desc *pd;
1343 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1344 	int num_frags = cqe->u0.s.num_fragments;
1345 
1346 	if (IS_XE201(sc) && cqe->u0.s.error) {
1347 		/* Lancer A0 workaround
1348 		* num_frags will be 1 more than actual in case of error
1349 		 */
1350 		if (num_frags)
1351 			num_frags -= 1;
1352 	}
1353 	for (i = 0; i < num_frags; i++) {
1354 		if (rq->packets_out == rq->packets_in) {
1355 			device_printf(sc->dev,
1356 				"RQ transmit descriptor missing\n");
1357 		}
1358 		out = rq->packets_out + 1;
1359 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1360 			out = 0;
1361 		pd = &rq->pckts[rq->packets_out];
1362 		rq->packets_out = out;
1363 
1364 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1365 		bus_dmamap_unload(rq->tag, pd->map);
1366 		rq->pending--;
1367 		m_freem(pd->mbuf);
1368 	}
1369 
1370 }
1371 
1372 
1373 static int
1374 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1375 {
1376 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1377 	int vtp = 0;
1378 
1379 	if (sc->be3_native) {
1380 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1381 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1382 	} else
1383 		vtp = cqe->u0.s.vlan_tag_present;
1384 
1385 	return vtp;
1386 
1387 }
1388 
1389 
1390 static int
1391 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1392 {
1393 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1394 	int port_id = 0;
1395 
1396 	if (sc->be3_native && IS_BE(sc)) {
1397 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1398 		port_id =  cqe_v1->u0.s.port;
1399 		if (sc->port_id != port_id)
1400 			return 0;
1401 	} else
1402 		;/* For BE3 legacy and Lancer this is dummy */
1403 
1404 	return 1;
1405 
1406 }
1407 
1408 #if defined(INET6) || defined(INET)
1409 static void
1410 oce_rx_flush_lro(struct oce_rq *rq)
1411 {
1412 	struct lro_ctrl	*lro = &rq->lro;
1413 	struct lro_entry *queued;
1414 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1415 
1416 	if (!IF_LRO_ENABLED(sc))
1417 		return;
1418 
1419 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1420 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1421 		tcp_lro_flush(lro, queued);
1422 	}
1423 	rq->lro_pkts_queued = 0;
1424 
1425 	return;
1426 }
1427 
1428 
1429 static int
1430 oce_init_lro(POCE_SOFTC sc)
1431 {
1432 	struct lro_ctrl *lro = NULL;
1433 	int i = 0, rc = 0;
1434 
1435 	for (i = 0; i < sc->nrqs; i++) {
1436 		lro = &sc->rq[i]->lro;
1437 		rc = tcp_lro_init(lro);
1438 		if (rc != 0) {
1439 			device_printf(sc->dev, "LRO init failed\n");
1440 			return rc;
1441 		}
1442 		lro->ifp = sc->ifp;
1443 	}
1444 
1445 	return rc;
1446 }
1447 
1448 
1449 void
1450 oce_free_lro(POCE_SOFTC sc)
1451 {
1452 	struct lro_ctrl *lro = NULL;
1453 	int i = 0;
1454 
1455 	for (i = 0; i < sc->nrqs; i++) {
1456 		lro = &sc->rq[i]->lro;
1457 		if (lro)
1458 			tcp_lro_free(lro);
1459 	}
1460 }
1461 #endif /* INET6 || INET */
1462 
1463 int
1464 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1465 {
1466 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1467 	int i, in, rc;
1468 	struct oce_packet_desc *pd;
1469 	bus_dma_segment_t segs[6];
1470 	int nsegs, added = 0;
1471 	struct oce_nic_rqe *rqe;
1472 	pd_rxulp_db_t rxdb_reg;
1473 
1474 
1475 	for (i = 0; i < count; i++) {
1476 		in = rq->packets_in + 1;
1477 		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1478 			in = 0;
1479 		if (in == rq->packets_out)
1480 			break;	/* no more room */
1481 
1482 		pd = &rq->pckts[rq->packets_in];
1483 		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1484 		if (pd->mbuf == NULL)
1485 			break;
1486 
1487 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1488 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1489 					     pd->map,
1490 					     pd->mbuf,
1491 					     segs, &nsegs, BUS_DMA_NOWAIT);
1492 		if (rc) {
1493 			m_free(pd->mbuf);
1494 			break;
1495 		}
1496 
1497 		if (nsegs != 1) {
1498 			i--;
1499 			continue;
1500 		}
1501 
1502 		rq->packets_in = in;
1503 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1504 
1505 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1506 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1507 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1508 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1509 		RING_PUT(rq->ring, 1);
1510 		added++;
1511 		rq->pending++;
1512 	}
1513 	if (added != 0) {
1514 		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1515 			DELAY(1);
1516 			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1517 			rxdb_reg.bits.qid = rq->rq_id;
1518 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1519 			added -= OCE_MAX_RQ_POSTS;
1520 		}
1521 		if (added > 0) {
1522 			DELAY(1);
1523 			rxdb_reg.bits.qid = rq->rq_id;
1524 			rxdb_reg.bits.num_posted = added;
1525 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1526 		}
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 
1533 /* Handle the Completion Queue for receive */
1534 uint16_t
1535 oce_rq_handler(void *arg)
1536 {
1537 	struct oce_rq *rq = (struct oce_rq *)arg;
1538 	struct oce_cq *cq = rq->cq;
1539 	POCE_SOFTC sc = rq->parent;
1540 	struct oce_nic_rx_cqe *cqe;
1541 	int num_cqes = 0, rq_buffers_used = 0;
1542 
1543 
1544 	LOCK(&rq->rx_lock);
1545 	bus_dmamap_sync(cq->ring->dma.tag,
1546 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1547 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1548 	while (cqe->u0.dw[2]) {
1549 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1550 
1551 		RING_GET(rq->ring, 1);
1552 		if (cqe->u0.s.error == 0) {
1553 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1554 		} else {
1555 			rq->rx_stats.rxcp_err++;
1556 			sc->ifp->if_ierrors++;
1557 			if (IS_XE201(sc))
1558 				/* Lancer A0 no buffer workaround */
1559 				oce_discard_rx_comp(rq, cqe);
1560 			else
1561 				/* Post L3/L4 errors to stack.*/
1562 				oce_rx(rq, cqe->u0.s.frag_index, cqe);
1563 
1564 		}
1565 		rq->rx_stats.rx_compl++;
1566 		cqe->u0.dw[2] = 0;
1567 
1568 #if defined(INET6) || defined(INET)
1569 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1570 			oce_rx_flush_lro(rq);
1571 		}
1572 #endif
1573 
1574 		RING_GET(cq->ring, 1);
1575 		bus_dmamap_sync(cq->ring->dma.tag,
1576 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1577 		cqe =
1578 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1579 		num_cqes++;
1580 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1581 			break;
1582 	}
1583 
1584 #if defined(INET6) || defined(INET)
1585 	if (IF_LRO_ENABLED(sc))
1586 		oce_rx_flush_lro(rq);
1587 #endif
1588 
1589 	if (num_cqes) {
1590 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1591 		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1592 		if (rq_buffers_used > 1)
1593 			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1594 	}
1595 
1596 	UNLOCK(&rq->rx_lock);
1597 
1598 	return 0;
1599 
1600 }
1601 
1602 
1603 
1604 
1605 /*****************************************************************************
1606  *		   Helper function prototypes in this file 		     *
1607  *****************************************************************************/
1608 
1609 static int
1610 oce_attach_ifp(POCE_SOFTC sc)
1611 {
1612 
1613 	sc->ifp = if_alloc(IFT_ETHER);
1614 	if (!sc->ifp)
1615 		return ENOMEM;
1616 
1617 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1618 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1619 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1620 
1621 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1622 	sc->ifp->if_ioctl = oce_ioctl;
1623 	sc->ifp->if_start = oce_start;
1624 	sc->ifp->if_init = oce_init;
1625 	sc->ifp->if_mtu = ETHERMTU;
1626 	sc->ifp->if_softc = sc;
1627 #if __FreeBSD_version >= 800000
1628 	sc->ifp->if_transmit = oce_multiq_start;
1629 	sc->ifp->if_qflush = oce_multiq_flush;
1630 #endif
1631 
1632 	if_initname(sc->ifp,
1633 		    device_get_name(sc->dev), device_get_unit(sc->dev));
1634 
1635 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1636 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1637 	IFQ_SET_READY(&sc->ifp->if_snd);
1638 
1639 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1640 	sc->ifp->if_hwassist |= CSUM_TSO;
1641 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1642 
1643 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1644 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1645 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1646 
1647 #if defined(INET6) || defined(INET)
1648 	sc->ifp->if_capabilities |= IFCAP_TSO;
1649 	sc->ifp->if_capabilities |= IFCAP_LRO;
1650 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1651 #endif
1652 
1653 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1654 	if_initbaudrate(sc->ifp, IF_Gbps(10));
1655 
1656 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1657 
1658 	return 0;
1659 }
1660 
1661 
1662 static void
1663 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1664 {
1665 	POCE_SOFTC sc = ifp->if_softc;
1666 
1667 	if (ifp->if_softc !=  arg)
1668 		return;
1669 	if ((vtag == 0) || (vtag > 4095))
1670 		return;
1671 
1672 	sc->vlan_tag[vtag] = 1;
1673 	sc->vlans_added++;
1674 	oce_vid_config(sc);
1675 }
1676 
1677 
1678 static void
1679 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1680 {
1681 	POCE_SOFTC sc = ifp->if_softc;
1682 
1683 	if (ifp->if_softc !=  arg)
1684 		return;
1685 	if ((vtag == 0) || (vtag > 4095))
1686 		return;
1687 
1688 	sc->vlan_tag[vtag] = 0;
1689 	sc->vlans_added--;
1690 	oce_vid_config(sc);
1691 }
1692 
1693 
1694 /*
1695  * A max of 64 vlans can be configured in BE. If the user configures
1696  * more, place the card in vlan promiscuous mode.
1697  */
1698 static int
1699 oce_vid_config(POCE_SOFTC sc)
1700 {
1701 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1702 	uint16_t ntags = 0, i;
1703 	int status = 0;
1704 
1705 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1706 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1707 		for (i = 0; i < MAX_VLANS; i++) {
1708 			if (sc->vlan_tag[i]) {
1709 				vtags[ntags].vtag = i;
1710 				ntags++;
1711 			}
1712 		}
1713 		if (ntags)
1714 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1715 						vtags, ntags, 1, 0);
1716 	} else
1717 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1718 					 	NULL, 0, 1, 1);
1719 	return status;
1720 }
1721 
1722 
1723 static void
1724 oce_mac_addr_set(POCE_SOFTC sc)
1725 {
1726 	uint32_t old_pmac_id = sc->pmac_id;
1727 	int status = 0;
1728 
1729 
1730 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1731 			 sc->macaddr.size_of_struct);
1732 	if (!status)
1733 		return;
1734 
1735 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1736 					sc->if_id, &sc->pmac_id);
1737 	if (!status) {
1738 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1739 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1740 				 sc->macaddr.size_of_struct);
1741 	}
1742 	if (status)
1743 		device_printf(sc->dev, "Failed update macaddress\n");
1744 
1745 }
1746 
1747 
1748 static int
1749 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1750 {
1751 	POCE_SOFTC sc = ifp->if_softc;
1752 	struct ifreq *ifr = (struct ifreq *)data;
1753 	int rc = ENXIO;
1754 	char cookie[32] = {0};
1755 	void *priv_data = (void *)ifr->ifr_data;
1756 	void *ioctl_ptr;
1757 	uint32_t req_size;
1758 	struct mbx_hdr req;
1759 	OCE_DMA_MEM dma_mem;
1760 
1761 
1762 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1763 		return EFAULT;
1764 
1765 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1766 		return EINVAL;
1767 
1768 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1769 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1770 		return EFAULT;
1771 
1772 	req_size = le32toh(req.u0.req.request_length);
1773 	if (req_size > 65536)
1774 		return EINVAL;
1775 
1776 	req_size += sizeof(struct mbx_hdr);
1777 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1778 	if (rc)
1779 		return ENOMEM;
1780 
1781 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1782 		rc = EFAULT;
1783 		goto dma_free;
1784 	}
1785 
1786 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1787 	if (rc) {
1788 		rc = EIO;
1789 		goto dma_free;
1790 	}
1791 
1792 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1793 		rc =  EFAULT;
1794 
1795 dma_free:
1796 	oce_dma_free(sc, &dma_mem);
1797 	return rc;
1798 
1799 }
1800 
1801 
1802 static void
1803 oce_local_timer(void *arg)
1804 {
1805 	POCE_SOFTC sc = arg;
1806 	int i = 0;
1807 
1808 	oce_refresh_nic_stats(sc);
1809 	oce_refresh_queue_stats(sc);
1810 	oce_mac_addr_set(sc);
1811 
1812 	/* TX Watch Dog*/
1813 	for (i = 0; i < sc->nwqs; i++)
1814 		oce_tx_restart(sc, sc->wq[i]);
1815 
1816 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
1817 }
1818 
1819 
1820 static void
1821 oce_if_deactivate(POCE_SOFTC sc)
1822 {
1823 	int i, mtime = 0;
1824 	int wait_req = 0;
1825 	struct oce_rq *rq;
1826 	struct oce_wq *wq;
1827 	struct oce_eq *eq;
1828 
1829 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1830 
1831 	/*Wait for max of 400ms for TX completions to be done */
1832 	while (mtime < 400) {
1833 		wait_req = 0;
1834 		for_all_wq_queues(sc, wq, i) {
1835 			if (wq->ring->num_used) {
1836 				wait_req = 1;
1837 				DELAY(1);
1838 				break;
1839 			}
1840 		}
1841 		mtime += 1;
1842 		if (!wait_req)
1843 			break;
1844 	}
1845 
1846 	/* Stop intrs and finish any bottom halves pending */
1847 	oce_hw_intr_disable(sc);
1848 
1849 	for (i = 0; i < sc->intr_count; i++) {
1850 		if (sc->intrs[i].tq != NULL) {
1851 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1852 		}
1853 	}
1854 
1855 	/* Delete RX queue in card with flush param */
1856 	oce_stop_rx(sc);
1857 
1858 	/* Invalidate any pending cq and eq entries*/
1859 	for_all_evnt_queues(sc, eq, i)
1860 		oce_drain_eq(eq);
1861 	for_all_rq_queues(sc, rq, i)
1862 		oce_drain_rq_cq(rq);
1863 	for_all_wq_queues(sc, wq, i)
1864 		oce_drain_wq_cq(wq);
1865 
1866 	/* But still we need to get MCC aync events.
1867 	   So enable intrs and also arm first EQ
1868         */
1869 	oce_hw_intr_enable(sc);
1870 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1871 
1872 	DELAY(10);
1873 }
1874 
1875 
1876 static void
1877 oce_if_activate(POCE_SOFTC sc)
1878 {
1879 	struct oce_eq *eq;
1880 	struct oce_rq *rq;
1881 	struct oce_wq *wq;
1882 	int i, rc = 0;
1883 
1884 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1885 
1886 	oce_hw_intr_disable(sc);
1887 
1888 	oce_start_rx(sc);
1889 
1890 	for_all_rq_queues(sc, rq, i) {
1891 		rc = oce_start_rq(rq);
1892 		if (rc)
1893 			device_printf(sc->dev, "Unable to start RX\n");
1894 	}
1895 
1896 	for_all_wq_queues(sc, wq, i) {
1897 		rc = oce_start_wq(wq);
1898 		if (rc)
1899 			device_printf(sc->dev, "Unable to start TX\n");
1900 	}
1901 
1902 
1903 	for_all_evnt_queues(sc, eq, i)
1904 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1905 
1906 	oce_hw_intr_enable(sc);
1907 
1908 }
1909 
1910 static void
1911 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1912 {
1913 	/* Update Link status */
1914 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1915 	     ASYNC_EVENT_LINK_UP) {
1916 		sc->link_status = ASYNC_EVENT_LINK_UP;
1917 		if_link_state_change(sc->ifp, LINK_STATE_UP);
1918 	} else {
1919 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
1920 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1921 	}
1922 
1923 	/* Update speed */
1924 	sc->link_speed = acqe->u0.s.speed;
1925 	sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
1926 
1927 }
1928 
1929 
1930 /* Handle the Completion Queue for the Mailbox/Async notifications */
1931 uint16_t
1932 oce_mq_handler(void *arg)
1933 {
1934 	struct oce_mq *mq = (struct oce_mq *)arg;
1935 	POCE_SOFTC sc = mq->parent;
1936 	struct oce_cq *cq = mq->cq;
1937 	int num_cqes = 0, evt_type = 0, optype = 0;
1938 	struct oce_mq_cqe *cqe;
1939 	struct oce_async_cqe_link_state *acqe;
1940 	struct oce_async_event_grp5_pvid_state *gcqe;
1941 
1942 
1943 	bus_dmamap_sync(cq->ring->dma.tag,
1944 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1945 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1946 
1947 	while (cqe->u0.dw[3]) {
1948 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1949 		if (cqe->u0.s.async_event) {
1950 			evt_type = cqe->u0.s.event_type;
1951 			optype = cqe->u0.s.async_type;
1952 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
1953 				/* Link status evt */
1954 				acqe = (struct oce_async_cqe_link_state *)cqe;
1955 				process_link_state(sc, acqe);
1956 			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
1957 				   (optype == ASYNC_EVENT_PVID_STATE)) {
1958 				/* GRP5 PVID */
1959 				gcqe =
1960 				(struct oce_async_event_grp5_pvid_state *)cqe;
1961 				if (gcqe->enabled)
1962 					sc->pvid = gcqe->tag & VLAN_VID_MASK;
1963 				else
1964 					sc->pvid = 0;
1965 
1966 			}
1967 		}
1968 		cqe->u0.dw[3] = 0;
1969 		RING_GET(cq->ring, 1);
1970 		bus_dmamap_sync(cq->ring->dma.tag,
1971 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1972 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1973 		num_cqes++;
1974 	}
1975 
1976 	if (num_cqes)
1977 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1978 
1979 	return 0;
1980 }
1981 
1982 
1983 static void
1984 setup_max_queues_want(POCE_SOFTC sc)
1985 {
1986 	int max_rss = 0;
1987 
1988 	/* Check if it is FLEX machine. Is so dont use RSS */
1989 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
1990 	    (sc->function_mode & FNM_UMC_MODE)    ||
1991 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
1992 	    (!sc->rss_enable)			  ||
1993 	    (sc->flags & OCE_FLAGS_BE2)) {
1994 		sc->nrqs = 1;
1995 		sc->nwqs = 1;
1996 		sc->rss_enable = 0;
1997 	} else {
1998 		/* For multiq, our deisgn is to have TX rings equal to
1999 		   RSS rings. So that we can pair up one RSS ring and TX
2000 		   to a single intr, which improves CPU cache efficiency.
2001 		 */
2002 		if (IS_BE(sc) && (!sc->be3_native))
2003 			max_rss = OCE_LEGACY_MODE_RSS;
2004 		else
2005 			max_rss = OCE_MAX_RSS;
2006 
2007 		sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2008 		sc->nwqs = MIN(OCE_NCPUS, max_rss);
2009 	}
2010 
2011 }
2012 
2013 
2014 static void
2015 update_queues_got(POCE_SOFTC sc)
2016 {
2017 	if (sc->rss_enable) {
2018 		sc->nrqs = sc->intr_count + 1;
2019 		sc->nwqs = sc->intr_count;
2020 	} else {
2021 		sc->nrqs = 1;
2022 		sc->nwqs = 1;
2023 	}
2024 }
2025 
2026