xref: /freebsd/sys/dev/oce/oce_if.c (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38 
39 /* $FreeBSD$ */
40 
41 #include "opt_inet6.h"
42 #include "opt_inet.h"
43 
44 #include "oce_if.h"
45 
46 /* UE Status Low CSR */
47 static char *ue_status_low_desc[] = {
48 	"CEV",
49 	"CTX",
50 	"DBUF",
51 	"ERX",
52 	"Host",
53 	"MPU",
54 	"NDMA",
55 	"PTC ",
56 	"RDMA ",
57 	"RXF ",
58 	"RXIPS ",
59 	"RXULP0 ",
60 	"RXULP1 ",
61 	"RXULP2 ",
62 	"TIM ",
63 	"TPOST ",
64 	"TPRE ",
65 	"TXIPS ",
66 	"TXULP0 ",
67 	"TXULP1 ",
68 	"UC ",
69 	"WDMA ",
70 	"TXULP2 ",
71 	"HOST1 ",
72 	"P0_OB_LINK ",
73 	"P1_OB_LINK ",
74 	"HOST_GPIO ",
75 	"MBOX ",
76 	"AXGMAC0",
77 	"AXGMAC1",
78 	"JTAG",
79 	"MPU_INTPEND"
80 };
81 
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84 	"LPCMEMHOST",
85 	"MGMT_MAC",
86 	"PCS0ONLINE",
87 	"MPU_IRAM",
88 	"PCS1ONLINE",
89 	"PCTL0",
90 	"PCTL1",
91 	"PMEM",
92 	"RR",
93 	"TXPB",
94 	"RXPP",
95 	"XAUI",
96 	"TXP",
97 	"ARM",
98 	"IPC",
99 	"HOST2",
100 	"HOST3",
101 	"HOST4",
102 	"HOST5",
103 	"HOST6",
104 	"HOST7",
105 	"HOST8",
106 	"HOST9",
107 	"NETC",
108 	"Unknown",
109 	"Unknown",
110 	"Unknown",
111 	"Unknown",
112 	"Unknown",
113 	"Unknown",
114 	"Unknown",
115 	"Unknown"
116 };
117 
118 
119 /* Driver entry points prototypes */
120 static int  oce_probe(device_t dev);
121 static int  oce_attach(device_t dev);
122 static int  oce_detach(device_t dev);
123 static int  oce_shutdown(device_t dev);
124 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125 static void oce_init(void *xsc);
126 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127 static void oce_multiq_flush(struct ifnet *ifp);
128 
129 /* Driver interrupt routines protypes */
130 static void oce_intr(void *arg, int pending);
131 static int  oce_setup_intr(POCE_SOFTC sc);
132 static int  oce_fast_isr(void *arg);
133 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
134 			  void (*isr) (void *arg, int pending));
135 
136 /* Media callbacks prototypes */
137 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138 static int  oce_media_change(struct ifnet *ifp);
139 
140 /* Transmit routines prototypes */
141 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144 					uint32_t status);
145 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146 				 struct oce_wq *wq);
147 
148 /* Receive routines prototypes */
149 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153 						struct oce_nic_rx_cqe *cqe);
154 
155 /* Helper function prototypes in this file */
156 static int  oce_attach_ifp(POCE_SOFTC sc);
157 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159 static int  oce_vid_config(POCE_SOFTC sc);
160 static void oce_mac_addr_set(POCE_SOFTC sc);
161 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162 static void oce_local_timer(void *arg);
163 static void oce_if_deactivate(POCE_SOFTC sc);
164 static void oce_if_activate(POCE_SOFTC sc);
165 static void setup_max_queues_want(POCE_SOFTC sc);
166 static void update_queues_got(POCE_SOFTC sc);
167 static void process_link_state(POCE_SOFTC sc,
168 		 struct oce_async_cqe_link_state *acqe);
169 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170 static void oce_get_config(POCE_SOFTC sc);
171 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172 
173 /* IP specific */
174 #if defined(INET6) || defined(INET)
175 static int  oce_init_lro(POCE_SOFTC sc);
176 static void oce_rx_flush_lro(struct oce_rq *rq);
177 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178 #endif
179 
180 static device_method_t oce_dispatch[] = {
181 	DEVMETHOD(device_probe, oce_probe),
182 	DEVMETHOD(device_attach, oce_attach),
183 	DEVMETHOD(device_detach, oce_detach),
184 	DEVMETHOD(device_shutdown, oce_shutdown),
185 
186 	DEVMETHOD_END
187 };
188 
189 static driver_t oce_driver = {
190 	"oce",
191 	oce_dispatch,
192 	sizeof(OCE_SOFTC)
193 };
194 static devclass_t oce_devclass;
195 
196 
197 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198 MODULE_DEPEND(oce, pci, 1, 1, 1);
199 MODULE_DEPEND(oce, ether, 1, 1, 1);
200 MODULE_VERSION(oce, 1);
201 
202 
203 /* global vars */
204 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205 
206 /* Module capabilites and parameters */
207 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209 
210 
211 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213 
214 
215 /* Supported devices table */
216 static uint32_t supportedDevices[] =  {
217 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223 };
224 
225 
226 
227 
228 /*****************************************************************************
229  *			Driver entry points functions                        *
230  *****************************************************************************/
231 
232 static int
233 oce_probe(device_t dev)
234 {
235 	uint16_t vendor = 0;
236 	uint16_t device = 0;
237 	int i = 0;
238 	char str[256] = {0};
239 	POCE_SOFTC sc;
240 
241 	sc = device_get_softc(dev);
242 	bzero(sc, sizeof(OCE_SOFTC));
243 	sc->dev = dev;
244 
245 	vendor = pci_get_vendor(dev);
246 	device = pci_get_device(dev);
247 
248 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250 			if (device == (supportedDevices[i] & 0xffff)) {
251 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
252 					component_revision);
253 				device_set_desc_copy(dev, str);
254 
255 				switch (device) {
256 				case PCI_PRODUCT_BE2:
257 					sc->flags |= OCE_FLAGS_BE2;
258 					break;
259 				case PCI_PRODUCT_BE3:
260 					sc->flags |= OCE_FLAGS_BE3;
261 					break;
262 				case PCI_PRODUCT_XE201:
263 				case PCI_PRODUCT_XE201_VF:
264 					sc->flags |= OCE_FLAGS_XE201;
265 					break;
266 				case PCI_PRODUCT_SH:
267 					sc->flags |= OCE_FLAGS_SH;
268 					break;
269 				default:
270 					return ENXIO;
271 				}
272 				return BUS_PROBE_DEFAULT;
273 			}
274 		}
275 	}
276 
277 	return ENXIO;
278 }
279 
280 
281 static int
282 oce_attach(device_t dev)
283 {
284 	POCE_SOFTC sc;
285 	int rc = 0;
286 
287 	sc = device_get_softc(dev);
288 
289 	rc = oce_hw_pci_alloc(sc);
290 	if (rc)
291 		return rc;
292 
293 	sc->tx_ring_size = OCE_TX_RING_SIZE;
294 	sc->rx_ring_size = OCE_RX_RING_SIZE;
295 	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
298 
299 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
301 
302 	/* initialise the hardware */
303 	rc = oce_hw_init(sc);
304 	if (rc)
305 		goto pci_res_free;
306 
307 	oce_get_config(sc);
308 
309 	setup_max_queues_want(sc);
310 
311 	rc = oce_setup_intr(sc);
312 	if (rc)
313 		goto mbox_free;
314 
315 	rc = oce_queue_init_all(sc);
316 	if (rc)
317 		goto intr_free;
318 
319 	rc = oce_attach_ifp(sc);
320 	if (rc)
321 		goto queues_free;
322 
323 #if defined(INET6) || defined(INET)
324 	rc = oce_init_lro(sc);
325 	if (rc)
326 		goto ifp_free;
327 #endif
328 
329 	rc = oce_hw_start(sc);
330 	if (rc)
331 		goto lro_free;
332 
333 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337 
338 	rc = oce_stats_init(sc);
339 	if (rc)
340 		goto vlan_free;
341 
342 	oce_add_sysctls(sc);
343 
344 	callout_init(&sc->timer, CALLOUT_MPSAFE);
345 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346 	if (rc)
347 		goto stats_free;
348 
349 	return 0;
350 
351 stats_free:
352 	callout_drain(&sc->timer);
353 	oce_stats_free(sc);
354 vlan_free:
355 	if (sc->vlan_attach)
356 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357 	if (sc->vlan_detach)
358 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359 	oce_hw_intr_disable(sc);
360 lro_free:
361 #if defined(INET6) || defined(INET)
362 	oce_free_lro(sc);
363 ifp_free:
364 #endif
365 	ether_ifdetach(sc->ifp);
366 	if_free(sc->ifp);
367 queues_free:
368 	oce_queue_release_all(sc);
369 intr_free:
370 	oce_intr_free(sc);
371 mbox_free:
372 	oce_dma_free(sc, &sc->bsmbx);
373 pci_res_free:
374 	oce_hw_pci_free(sc);
375 	LOCK_DESTROY(&sc->dev_lock);
376 	LOCK_DESTROY(&sc->bmbx_lock);
377 	return rc;
378 
379 }
380 
381 
382 static int
383 oce_detach(device_t dev)
384 {
385 	POCE_SOFTC sc = device_get_softc(dev);
386 
387 	LOCK(&sc->dev_lock);
388 	oce_if_deactivate(sc);
389 	UNLOCK(&sc->dev_lock);
390 
391 	callout_drain(&sc->timer);
392 
393 	if (sc->vlan_attach != NULL)
394 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395 	if (sc->vlan_detach != NULL)
396 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397 
398 	ether_ifdetach(sc->ifp);
399 
400 	if_free(sc->ifp);
401 
402 	oce_hw_shutdown(sc);
403 
404 	bus_generic_detach(dev);
405 
406 	return 0;
407 }
408 
409 
410 static int
411 oce_shutdown(device_t dev)
412 {
413 	int rc;
414 
415 	rc = oce_detach(dev);
416 
417 	return rc;
418 }
419 
420 
421 static int
422 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423 {
424 	struct ifreq *ifr = (struct ifreq *)data;
425 	POCE_SOFTC sc = ifp->if_softc;
426 	int rc = 0;
427 	uint32_t u;
428 
429 	switch (command) {
430 
431 	case SIOCGIFMEDIA:
432 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433 		break;
434 
435 	case SIOCSIFMTU:
436 		if (ifr->ifr_mtu > OCE_MAX_MTU)
437 			rc = EINVAL;
438 		else
439 			ifp->if_mtu = ifr->ifr_mtu;
440 		break;
441 
442 	case SIOCSIFFLAGS:
443 		if (ifp->if_flags & IFF_UP) {
444 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446 				oce_init(sc);
447 			}
448 			device_printf(sc->dev, "Interface Up\n");
449 		} else {
450 			LOCK(&sc->dev_lock);
451 
452 			sc->ifp->if_drv_flags &=
453 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454 			oce_if_deactivate(sc);
455 
456 			UNLOCK(&sc->dev_lock);
457 
458 			device_printf(sc->dev, "Interface Down\n");
459 		}
460 
461 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
462 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463 				sc->promisc = TRUE;
464 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
465 			if (!oce_rxf_set_promiscuous(sc, 0))
466 				sc->promisc = FALSE;
467 		}
468 
469 		break;
470 
471 	case SIOCADDMULTI:
472 	case SIOCDELMULTI:
473 		rc = oce_hw_update_multicast(sc);
474 		if (rc)
475 			device_printf(sc->dev,
476 				"Update multicast address failed\n");
477 		break;
478 
479 	case SIOCSIFCAP:
480 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
481 
482 		if (u & IFCAP_TXCSUM) {
483 			ifp->if_capenable ^= IFCAP_TXCSUM;
484 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485 
486 			if (IFCAP_TSO & ifp->if_capenable &&
487 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
488 				ifp->if_capenable &= ~IFCAP_TSO;
489 				ifp->if_hwassist &= ~CSUM_TSO;
490 				if_printf(ifp,
491 					 "TSO disabled due to -txcsum.\n");
492 			}
493 		}
494 
495 		if (u & IFCAP_RXCSUM)
496 			ifp->if_capenable ^= IFCAP_RXCSUM;
497 
498 		if (u & IFCAP_TSO4) {
499 			ifp->if_capenable ^= IFCAP_TSO4;
500 
501 			if (IFCAP_TSO & ifp->if_capenable) {
502 				if (IFCAP_TXCSUM & ifp->if_capenable)
503 					ifp->if_hwassist |= CSUM_TSO;
504 				else {
505 					ifp->if_capenable &= ~IFCAP_TSO;
506 					ifp->if_hwassist &= ~CSUM_TSO;
507 					if_printf(ifp,
508 					    "Enable txcsum first.\n");
509 					rc = EAGAIN;
510 				}
511 			} else
512 				ifp->if_hwassist &= ~CSUM_TSO;
513 		}
514 
515 		if (u & IFCAP_VLAN_HWTAGGING)
516 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517 
518 		if (u & IFCAP_VLAN_HWFILTER) {
519 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520 			oce_vid_config(sc);
521 		}
522 #if defined(INET6) || defined(INET)
523 		if (u & IFCAP_LRO)
524 			ifp->if_capenable ^= IFCAP_LRO;
525 #endif
526 
527 		break;
528 
529 	case SIOCGPRIVATE_0:
530 		rc = oce_handle_passthrough(ifp, data);
531 		break;
532 	default:
533 		rc = ether_ioctl(ifp, command, data);
534 		break;
535 	}
536 
537 	return rc;
538 }
539 
540 
541 static void
542 oce_init(void *arg)
543 {
544 	POCE_SOFTC sc = arg;
545 
546 	LOCK(&sc->dev_lock);
547 
548 	if (sc->ifp->if_flags & IFF_UP) {
549 		oce_if_deactivate(sc);
550 		oce_if_activate(sc);
551 	}
552 
553 	UNLOCK(&sc->dev_lock);
554 
555 }
556 
557 
558 static int
559 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560 {
561 	POCE_SOFTC sc = ifp->if_softc;
562 	struct oce_wq *wq = NULL;
563 	int queue_index = 0;
564 	int status = 0;
565 
566 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
567 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
568 
569 	wq = sc->wq[queue_index];
570 
571 	LOCK(&wq->tx_lock);
572 	status = oce_multiq_transmit(ifp, m, wq);
573 	UNLOCK(&wq->tx_lock);
574 
575 	return status;
576 
577 }
578 
579 
580 static void
581 oce_multiq_flush(struct ifnet *ifp)
582 {
583 	POCE_SOFTC sc = ifp->if_softc;
584 	struct mbuf     *m;
585 	int i = 0;
586 
587 	for (i = 0; i < sc->nwqs; i++) {
588 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
589 			m_freem(m);
590 	}
591 	if_qflush(ifp);
592 }
593 
594 
595 
596 /*****************************************************************************
597  *                   Driver interrupt routines functions                     *
598  *****************************************************************************/
599 
600 static void
601 oce_intr(void *arg, int pending)
602 {
603 
604 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
605 	POCE_SOFTC sc = ii->sc;
606 	struct oce_eq *eq = ii->eq;
607 	struct oce_eqe *eqe;
608 	struct oce_cq *cq = NULL;
609 	int i, num_eqes = 0;
610 
611 
612 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
613 				 BUS_DMASYNC_POSTWRITE);
614 	do {
615 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
616 		if (eqe->evnt == 0)
617 			break;
618 		eqe->evnt = 0;
619 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
620 					BUS_DMASYNC_POSTWRITE);
621 		RING_GET(eq->ring, 1);
622 		num_eqes++;
623 
624 	} while (TRUE);
625 
626 	if (!num_eqes)
627 		goto eq_arm; /* Spurious */
628 
629  	/* Clear EQ entries, but dont arm */
630 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
631 
632 	/* Process TX, RX and MCC. But dont arm CQ*/
633 	for (i = 0; i < eq->cq_valid; i++) {
634 		cq = eq->cq[i];
635 		(*cq->cq_handler)(cq->cb_arg);
636 	}
637 
638 	/* Arm all cqs connected to this EQ */
639 	for (i = 0; i < eq->cq_valid; i++) {
640 		cq = eq->cq[i];
641 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
642 	}
643 
644 eq_arm:
645 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
646 
647 	return;
648 }
649 
650 
651 static int
652 oce_setup_intr(POCE_SOFTC sc)
653 {
654 	int rc = 0, use_intx = 0;
655 	int vector = 0, req_vectors = 0;
656 
657 	if (is_rss_enabled(sc))
658 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
659 	else
660 		req_vectors = 1;
661 
662 	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
663 		sc->intr_count = req_vectors;
664 		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
665 		if (rc != 0) {
666 			use_intx = 1;
667 			pci_release_msi(sc->dev);
668 		} else
669 			sc->flags |= OCE_FLAGS_USING_MSIX;
670 	} else
671 		use_intx = 1;
672 
673 	if (use_intx)
674 		sc->intr_count = 1;
675 
676 	/* Scale number of queues based on intr we got */
677 	update_queues_got(sc);
678 
679 	if (use_intx) {
680 		device_printf(sc->dev, "Using legacy interrupt\n");
681 		rc = oce_alloc_intr(sc, vector, oce_intr);
682 		if (rc)
683 			goto error;
684 	} else {
685 		for (; vector < sc->intr_count; vector++) {
686 			rc = oce_alloc_intr(sc, vector, oce_intr);
687 			if (rc)
688 				goto error;
689 		}
690 	}
691 
692 	return 0;
693 error:
694 	oce_intr_free(sc);
695 	return rc;
696 }
697 
698 
699 static int
700 oce_fast_isr(void *arg)
701 {
702 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
703 	POCE_SOFTC sc = ii->sc;
704 
705 	if (ii->eq == NULL)
706 		return FILTER_STRAY;
707 
708 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
709 
710 	taskqueue_enqueue_fast(ii->tq, &ii->task);
711 
712  	ii->eq->intr++;
713 
714 	return FILTER_HANDLED;
715 }
716 
717 
718 static int
719 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
720 {
721 	POCE_INTR_INFO ii = &sc->intrs[vector];
722 	int rc = 0, rr;
723 
724 	if (vector >= OCE_MAX_EQ)
725 		return (EINVAL);
726 
727 	/* Set the resource id for the interrupt.
728 	 * MSIx is vector + 1 for the resource id,
729 	 * INTx is 0 for the resource id.
730 	 */
731 	if (sc->flags & OCE_FLAGS_USING_MSIX)
732 		rr = vector + 1;
733 	else
734 		rr = 0;
735 	ii->intr_res = bus_alloc_resource_any(sc->dev,
736 					      SYS_RES_IRQ,
737 					      &rr, RF_ACTIVE|RF_SHAREABLE);
738 	ii->irq_rr = rr;
739 	if (ii->intr_res == NULL) {
740 		device_printf(sc->dev,
741 			  "Could not allocate interrupt\n");
742 		rc = ENXIO;
743 		return rc;
744 	}
745 
746 	TASK_INIT(&ii->task, 0, isr, ii);
747 	ii->vector = vector;
748 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
749 	ii->tq = taskqueue_create_fast(ii->task_name,
750 			M_NOWAIT,
751 			taskqueue_thread_enqueue,
752 			&ii->tq);
753 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
754 			device_get_nameunit(sc->dev));
755 
756 	ii->sc = sc;
757 	rc = bus_setup_intr(sc->dev,
758 			ii->intr_res,
759 			INTR_TYPE_NET,
760 			oce_fast_isr, NULL, ii, &ii->tag);
761 	return rc;
762 
763 }
764 
765 
766 void
767 oce_intr_free(POCE_SOFTC sc)
768 {
769 	int i = 0;
770 
771 	for (i = 0; i < sc->intr_count; i++) {
772 
773 		if (sc->intrs[i].tag != NULL)
774 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
775 						sc->intrs[i].tag);
776 		if (sc->intrs[i].tq != NULL)
777 			taskqueue_free(sc->intrs[i].tq);
778 
779 		if (sc->intrs[i].intr_res != NULL)
780 			bus_release_resource(sc->dev, SYS_RES_IRQ,
781 						sc->intrs[i].irq_rr,
782 						sc->intrs[i].intr_res);
783 		sc->intrs[i].tag = NULL;
784 		sc->intrs[i].intr_res = NULL;
785 	}
786 
787 	if (sc->flags & OCE_FLAGS_USING_MSIX)
788 		pci_release_msi(sc->dev);
789 
790 }
791 
792 
793 
794 /******************************************************************************
795 *			  Media callbacks functions 			      *
796 ******************************************************************************/
797 
798 static void
799 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
800 {
801 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
802 
803 
804 	req->ifm_status = IFM_AVALID;
805 	req->ifm_active = IFM_ETHER;
806 
807 	if (sc->link_status == 1)
808 		req->ifm_status |= IFM_ACTIVE;
809 	else
810 		return;
811 
812 	switch (sc->link_speed) {
813 	case 1: /* 10 Mbps */
814 		req->ifm_active |= IFM_10_T | IFM_FDX;
815 		sc->speed = 10;
816 		break;
817 	case 2: /* 100 Mbps */
818 		req->ifm_active |= IFM_100_TX | IFM_FDX;
819 		sc->speed = 100;
820 		break;
821 	case 3: /* 1 Gbps */
822 		req->ifm_active |= IFM_1000_T | IFM_FDX;
823 		sc->speed = 1000;
824 		break;
825 	case 4: /* 10 Gbps */
826 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
827 		sc->speed = 10000;
828 		break;
829 	case 5: /* 20 Gbps */
830 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
831 		sc->speed = 20000;
832 		break;
833 	case 6: /* 25 Gbps */
834 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
835 		sc->speed = 25000;
836 		break;
837 	case 7: /* 40 Gbps */
838 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
839 		sc->speed = 40000;
840 		break;
841 	default:
842 		sc->speed = 0;
843 		break;
844 	}
845 
846 	return;
847 }
848 
849 
850 int
851 oce_media_change(struct ifnet *ifp)
852 {
853 	return 0;
854 }
855 
856 
857 
858 
859 /*****************************************************************************
860  *			  Transmit routines functions			     *
861  *****************************************************************************/
862 
863 static int
864 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
865 {
866 	int rc = 0, i, retry_cnt = 0;
867 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
868 	struct mbuf *m, *m_temp;
869 	struct oce_wq *wq = sc->wq[wq_index];
870 	struct oce_packet_desc *pd;
871 	struct oce_nic_hdr_wqe *nichdr;
872 	struct oce_nic_frag_wqe *nicfrag;
873 	int num_wqes;
874 	uint32_t reg_value;
875 	boolean_t complete = TRUE;
876 
877 	m = *mpp;
878 	if (!m)
879 		return EINVAL;
880 
881 	if (!(m->m_flags & M_PKTHDR)) {
882 		rc = ENXIO;
883 		goto free_ret;
884 	}
885 
886 	if(oce_tx_asic_stall_verify(sc, m)) {
887 		m = oce_insert_vlan_tag(sc, m, &complete);
888 		if(!m) {
889 			device_printf(sc->dev, "Insertion unsuccessful\n");
890 			return 0;
891 		}
892 
893 	}
894 
895 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
896 		/* consolidate packet buffers for TSO/LSO segment offload */
897 #if defined(INET6) || defined(INET)
898 		m = oce_tso_setup(sc, mpp);
899 #else
900 		m = NULL;
901 #endif
902 		if (m == NULL) {
903 			rc = ENXIO;
904 			goto free_ret;
905 		}
906 	}
907 
908 	pd = &wq->pckts[wq->pkt_desc_head];
909 retry:
910 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
911 				     pd->map,
912 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
913 	if (rc == 0) {
914 		num_wqes = pd->nsegs + 1;
915 		if (IS_BE(sc) || IS_SH(sc)) {
916 			/*Dummy required only for BE3.*/
917 			if (num_wqes & 1)
918 				num_wqes++;
919 		}
920 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
921 			bus_dmamap_unload(wq->tag, pd->map);
922 			return EBUSY;
923 		}
924 		atomic_store_rel_int(&wq->pkt_desc_head,
925 				     (wq->pkt_desc_head + 1) % \
926 				      OCE_WQ_PACKET_ARRAY_SIZE);
927 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
928 		pd->mbuf = m;
929 
930 		nichdr =
931 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
932 		nichdr->u0.dw[0] = 0;
933 		nichdr->u0.dw[1] = 0;
934 		nichdr->u0.dw[2] = 0;
935 		nichdr->u0.dw[3] = 0;
936 
937 		nichdr->u0.s.complete = complete;
938 		nichdr->u0.s.event = 1;
939 		nichdr->u0.s.crc = 1;
940 		nichdr->u0.s.forward = 0;
941 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
942 		nichdr->u0.s.udpcs =
943 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
944 		nichdr->u0.s.tcpcs =
945 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
946 		nichdr->u0.s.num_wqe = num_wqes;
947 		nichdr->u0.s.total_length = m->m_pkthdr.len;
948 
949 		if (m->m_flags & M_VLANTAG) {
950 			nichdr->u0.s.vlan = 1; /*Vlan present*/
951 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
952 		}
953 
954 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
955 			if (m->m_pkthdr.tso_segsz) {
956 				nichdr->u0.s.lso = 1;
957 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
958 			}
959 			if (!IS_BE(sc) || !IS_SH(sc))
960 				nichdr->u0.s.ipcs = 1;
961 		}
962 
963 		RING_PUT(wq->ring, 1);
964 		atomic_add_int(&wq->ring->num_used, 1);
965 
966 		for (i = 0; i < pd->nsegs; i++) {
967 			nicfrag =
968 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
969 						      struct oce_nic_frag_wqe);
970 			nicfrag->u0.s.rsvd0 = 0;
971 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
972 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
973 			nicfrag->u0.s.frag_len = segs[i].ds_len;
974 			pd->wqe_idx = wq->ring->pidx;
975 			RING_PUT(wq->ring, 1);
976 			atomic_add_int(&wq->ring->num_used, 1);
977 		}
978 		if (num_wqes > (pd->nsegs + 1)) {
979 			nicfrag =
980 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
981 						      struct oce_nic_frag_wqe);
982 			nicfrag->u0.dw[0] = 0;
983 			nicfrag->u0.dw[1] = 0;
984 			nicfrag->u0.dw[2] = 0;
985 			nicfrag->u0.dw[3] = 0;
986 			pd->wqe_idx = wq->ring->pidx;
987 			RING_PUT(wq->ring, 1);
988 			atomic_add_int(&wq->ring->num_used, 1);
989 			pd->nsegs++;
990 		}
991 
992 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
993 		wq->tx_stats.tx_reqs++;
994 		wq->tx_stats.tx_wrbs += num_wqes;
995 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
996 		wq->tx_stats.tx_pkts++;
997 
998 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
999 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1000 		reg_value = (num_wqes << 16) | wq->wq_id;
1001 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1002 
1003 	} else if (rc == EFBIG)	{
1004 		if (retry_cnt == 0) {
1005 			m_temp = m_defrag(m, M_NOWAIT);
1006 			if (m_temp == NULL)
1007 				goto free_ret;
1008 			m = m_temp;
1009 			*mpp = m_temp;
1010 			retry_cnt = retry_cnt + 1;
1011 			goto retry;
1012 		} else
1013 			goto free_ret;
1014 	} else if (rc == ENOMEM)
1015 		return rc;
1016 	else
1017 		goto free_ret;
1018 
1019 	return 0;
1020 
1021 free_ret:
1022 	m_freem(*mpp);
1023 	*mpp = NULL;
1024 	return rc;
1025 }
1026 
1027 
1028 static void
1029 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1030 {
1031 	struct oce_packet_desc *pd;
1032 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1033 	struct mbuf *m;
1034 
1035 	pd = &wq->pckts[wq->pkt_desc_tail];
1036 	atomic_store_rel_int(&wq->pkt_desc_tail,
1037 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1038 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1039 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1040 	bus_dmamap_unload(wq->tag, pd->map);
1041 
1042 	m = pd->mbuf;
1043 	m_freem(m);
1044 	pd->mbuf = NULL;
1045 
1046 
1047 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1048 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1049 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1050 			oce_tx_restart(sc, wq);
1051 		}
1052 	}
1053 }
1054 
1055 
1056 static void
1057 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1058 {
1059 
1060 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1061 		return;
1062 
1063 #if __FreeBSD_version >= 800000
1064 	if (!drbr_empty(sc->ifp, wq->br))
1065 #else
1066 	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1067 #endif
1068 		taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1069 
1070 }
1071 
1072 
1073 #if defined(INET6) || defined(INET)
1074 static struct mbuf *
1075 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1076 {
1077 	struct mbuf *m;
1078 #ifdef INET
1079 	struct ip *ip;
1080 #endif
1081 #ifdef INET6
1082 	struct ip6_hdr *ip6;
1083 #endif
1084 	struct ether_vlan_header *eh;
1085 	struct tcphdr *th;
1086 	uint16_t etype;
1087 	int total_len = 0, ehdrlen = 0;
1088 
1089 	m = *mpp;
1090 
1091 	if (M_WRITABLE(m) == 0) {
1092 		m = m_dup(*mpp, M_NOWAIT);
1093 		if (!m)
1094 			return NULL;
1095 		m_freem(*mpp);
1096 		*mpp = m;
1097 	}
1098 
1099 	eh = mtod(m, struct ether_vlan_header *);
1100 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1101 		etype = ntohs(eh->evl_proto);
1102 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1103 	} else {
1104 		etype = ntohs(eh->evl_encap_proto);
1105 		ehdrlen = ETHER_HDR_LEN;
1106 	}
1107 
1108 	switch (etype) {
1109 #ifdef INET
1110 	case ETHERTYPE_IP:
1111 		ip = (struct ip *)(m->m_data + ehdrlen);
1112 		if (ip->ip_p != IPPROTO_TCP)
1113 			return NULL;
1114 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1115 
1116 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1117 		break;
1118 #endif
1119 #ifdef INET6
1120 	case ETHERTYPE_IPV6:
1121 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1122 		if (ip6->ip6_nxt != IPPROTO_TCP)
1123 			return NULL;
1124 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1125 
1126 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1127 		break;
1128 #endif
1129 	default:
1130 		return NULL;
1131 	}
1132 
1133 	m = m_pullup(m, total_len);
1134 	if (!m)
1135 		return NULL;
1136 	*mpp = m;
1137 	return m;
1138 
1139 }
1140 #endif /* INET6 || INET */
1141 
1142 void
1143 oce_tx_task(void *arg, int npending)
1144 {
1145 	struct oce_wq *wq = arg;
1146 	POCE_SOFTC sc = wq->parent;
1147 	struct ifnet *ifp = sc->ifp;
1148 	int rc = 0;
1149 
1150 #if __FreeBSD_version >= 800000
1151 	LOCK(&wq->tx_lock);
1152 	rc = oce_multiq_transmit(ifp, NULL, wq);
1153 	if (rc) {
1154 		device_printf(sc->dev,
1155 				"TX[%d] restart failed\n", wq->queue_index);
1156 	}
1157 	UNLOCK(&wq->tx_lock);
1158 #else
1159 	oce_start(ifp);
1160 #endif
1161 
1162 }
1163 
1164 
1165 void
1166 oce_start(struct ifnet *ifp)
1167 {
1168 	POCE_SOFTC sc = ifp->if_softc;
1169 	struct mbuf *m;
1170 	int rc = 0;
1171 	int def_q = 0; /* Defualt tx queue is 0*/
1172 
1173 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1174 			IFF_DRV_RUNNING)
1175 		return;
1176 
1177 	if (!sc->link_status)
1178 		return;
1179 
1180 	do {
1181 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1182 		if (m == NULL)
1183 			break;
1184 
1185 		LOCK(&sc->wq[def_q]->tx_lock);
1186 		rc = oce_tx(sc, &m, def_q);
1187 		UNLOCK(&sc->wq[def_q]->tx_lock);
1188 		if (rc) {
1189 			if (m != NULL) {
1190 				sc->wq[def_q]->tx_stats.tx_stops ++;
1191 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1192 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1193 				m = NULL;
1194 			}
1195 			break;
1196 		}
1197 		if (m != NULL)
1198 			ETHER_BPF_MTAP(ifp, m);
1199 
1200 	} while (TRUE);
1201 
1202 	return;
1203 }
1204 
1205 
1206 /* Handle the Completion Queue for transmit */
1207 uint16_t
1208 oce_wq_handler(void *arg)
1209 {
1210 	struct oce_wq *wq = (struct oce_wq *)arg;
1211 	POCE_SOFTC sc = wq->parent;
1212 	struct oce_cq *cq = wq->cq;
1213 	struct oce_nic_tx_cqe *cqe;
1214 	int num_cqes = 0;
1215 
1216 	bus_dmamap_sync(cq->ring->dma.tag,
1217 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1218 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1219 	while (cqe->u0.dw[3]) {
1220 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1221 
1222 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1223 		if (wq->ring->cidx >= wq->ring->num_items)
1224 			wq->ring->cidx -= wq->ring->num_items;
1225 
1226 		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1227 		wq->tx_stats.tx_compl++;
1228 		cqe->u0.dw[3] = 0;
1229 		RING_GET(cq->ring, 1);
1230 		bus_dmamap_sync(cq->ring->dma.tag,
1231 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1232 		cqe =
1233 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1234 		num_cqes++;
1235 	}
1236 
1237 	if (num_cqes)
1238 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1239 
1240 	return 0;
1241 }
1242 
1243 
1244 static int
1245 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1246 {
1247 	POCE_SOFTC sc = ifp->if_softc;
1248 	int status = 0, queue_index = 0;
1249 	struct mbuf *next = NULL;
1250 	struct buf_ring *br = NULL;
1251 
1252 	br  = wq->br;
1253 	queue_index = wq->queue_index;
1254 
1255 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1256 		IFF_DRV_RUNNING) {
1257 		if (m != NULL)
1258 			status = drbr_enqueue(ifp, br, m);
1259 		return status;
1260 	}
1261 
1262 	if (m != NULL) {
1263 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1264 			return status;
1265 	}
1266 	while ((next = drbr_peek(ifp, br)) != NULL) {
1267 		if (oce_tx(sc, &next, queue_index)) {
1268 			if (next == NULL) {
1269 				drbr_advance(ifp, br);
1270 			} else {
1271 				drbr_putback(ifp, br, next);
1272 				wq->tx_stats.tx_stops ++;
1273 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1274 			}
1275 			break;
1276 		}
1277 		drbr_advance(ifp, br);
1278 		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1279 		if (next->m_flags & M_MCAST)
1280 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1281 		ETHER_BPF_MTAP(ifp, next);
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 
1288 
1289 
1290 /*****************************************************************************
1291  *			    Receive  routines functions 		     *
1292  *****************************************************************************/
1293 
1294 static void
1295 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1296 {
1297 	uint32_t out;
1298 	struct oce_packet_desc *pd;
1299 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1300 	int i, len, frag_len;
1301 	struct mbuf *m = NULL, *tail = NULL;
1302 	uint16_t vtag;
1303 
1304 	len = cqe->u0.s.pkt_size;
1305 	if (!len) {
1306 		/*partial DMA workaround for Lancer*/
1307 		oce_discard_rx_comp(rq, cqe);
1308 		goto exit;
1309 	}
1310 
1311 	 /* Get vlan_tag value */
1312 	if(IS_BE(sc) || IS_SH(sc))
1313 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1314 	else
1315 		vtag = cqe->u0.s.vlan_tag;
1316 
1317 
1318 	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1319 
1320 		if (rq->packets_out == rq->packets_in) {
1321 			device_printf(sc->dev,
1322 				  "RQ transmit descriptor missing\n");
1323 		}
1324 		out = rq->packets_out + 1;
1325 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1326 			out = 0;
1327 		pd = &rq->pckts[rq->packets_out];
1328 		rq->packets_out = out;
1329 
1330 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1331 		bus_dmamap_unload(rq->tag, pd->map);
1332 		rq->pending--;
1333 
1334 		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1335 		pd->mbuf->m_len = frag_len;
1336 
1337 		if (tail != NULL) {
1338 			/* additional fragments */
1339 			pd->mbuf->m_flags &= ~M_PKTHDR;
1340 			tail->m_next = pd->mbuf;
1341 			tail = pd->mbuf;
1342 		} else {
1343 			/* first fragment, fill out much of the packet header */
1344 			pd->mbuf->m_pkthdr.len = len;
1345 			pd->mbuf->m_pkthdr.csum_flags = 0;
1346 			if (IF_CSUM_ENABLED(sc)) {
1347 				if (cqe->u0.s.l4_cksum_pass) {
1348 					pd->mbuf->m_pkthdr.csum_flags |=
1349 					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1350 					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1351 				}
1352 				if (cqe->u0.s.ip_cksum_pass) {
1353 					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1354 						pd->mbuf->m_pkthdr.csum_flags |=
1355 						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1356 					}
1357 				}
1358 			}
1359 			m = tail = pd->mbuf;
1360 		}
1361 		pd->mbuf = NULL;
1362 		len -= frag_len;
1363 	}
1364 
1365 	if (m) {
1366 		if (!oce_cqe_portid_valid(sc, cqe)) {
1367 			 m_freem(m);
1368 			 goto exit;
1369 		}
1370 
1371 		m->m_pkthdr.rcvif = sc->ifp;
1372 #if __FreeBSD_version >= 800000
1373 		if (rq->queue_index)
1374 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1375 		else
1376 			m->m_pkthdr.flowid = rq->queue_index;
1377 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1378 #endif
1379 		/* This deternies if vlan tag is Valid */
1380 		if (oce_cqe_vtp_valid(sc, cqe)) {
1381 			if (sc->function_mode & FNM_FLEX10_MODE) {
1382 				/* FLEX10. If QnQ is not set, neglect VLAN */
1383 				if (cqe->u0.s.qnq) {
1384 					m->m_pkthdr.ether_vtag = vtag;
1385 					m->m_flags |= M_VLANTAG;
1386 				}
1387 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1388 				/* In UMC mode generally pvid will be striped by
1389 				   hw. But in some cases we have seen it comes
1390 				   with pvid. So if pvid == vlan, neglect vlan.
1391 				*/
1392 				m->m_pkthdr.ether_vtag = vtag;
1393 				m->m_flags |= M_VLANTAG;
1394 			}
1395 		}
1396 
1397 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1398 #if defined(INET6) || defined(INET)
1399 		/* Try to queue to LRO */
1400 		if (IF_LRO_ENABLED(sc) &&
1401 		    (cqe->u0.s.ip_cksum_pass) &&
1402 		    (cqe->u0.s.l4_cksum_pass) &&
1403 		    (!cqe->u0.s.ip_ver)       &&
1404 		    (rq->lro.lro_cnt != 0)) {
1405 
1406 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1407 				rq->lro_pkts_queued ++;
1408 				goto post_done;
1409 			}
1410 			/* If LRO posting fails then try to post to STACK */
1411 		}
1412 #endif
1413 
1414 		(*sc->ifp->if_input) (sc->ifp, m);
1415 #if defined(INET6) || defined(INET)
1416 post_done:
1417 #endif
1418 		/* Update rx stats per queue */
1419 		rq->rx_stats.rx_pkts++;
1420 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1421 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1422 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1423 			rq->rx_stats.rx_mcast_pkts++;
1424 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1425 			rq->rx_stats.rx_ucast_pkts++;
1426 	}
1427 exit:
1428 	return;
1429 }
1430 
1431 
1432 static void
1433 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1434 {
1435 	uint32_t out, i = 0;
1436 	struct oce_packet_desc *pd;
1437 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1438 	int num_frags = cqe->u0.s.num_fragments;
1439 
1440 	for (i = 0; i < num_frags; i++) {
1441 		if (rq->packets_out == rq->packets_in) {
1442 			device_printf(sc->dev,
1443 				"RQ transmit descriptor missing\n");
1444 		}
1445 		out = rq->packets_out + 1;
1446 		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1447 			out = 0;
1448 		pd = &rq->pckts[rq->packets_out];
1449 		rq->packets_out = out;
1450 
1451 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1452 		bus_dmamap_unload(rq->tag, pd->map);
1453 		rq->pending--;
1454 		m_freem(pd->mbuf);
1455 	}
1456 
1457 }
1458 
1459 
1460 static int
1461 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1462 {
1463 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1464 	int vtp = 0;
1465 
1466 	if (sc->be3_native) {
1467 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1468 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1469 	} else
1470 		vtp = cqe->u0.s.vlan_tag_present;
1471 
1472 	return vtp;
1473 
1474 }
1475 
1476 
1477 static int
1478 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1479 {
1480 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1481 	int port_id = 0;
1482 
1483 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1484 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1485 		port_id =  cqe_v1->u0.s.port;
1486 		if (sc->port_id != port_id)
1487 			return 0;
1488 	} else
1489 		;/* For BE3 legacy and Lancer this is dummy */
1490 
1491 	return 1;
1492 
1493 }
1494 
1495 #if defined(INET6) || defined(INET)
1496 static void
1497 oce_rx_flush_lro(struct oce_rq *rq)
1498 {
1499 	struct lro_ctrl	*lro = &rq->lro;
1500 	struct lro_entry *queued;
1501 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1502 
1503 	if (!IF_LRO_ENABLED(sc))
1504 		return;
1505 
1506 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1507 		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1508 		tcp_lro_flush(lro, queued);
1509 	}
1510 	rq->lro_pkts_queued = 0;
1511 
1512 	return;
1513 }
1514 
1515 
1516 static int
1517 oce_init_lro(POCE_SOFTC sc)
1518 {
1519 	struct lro_ctrl *lro = NULL;
1520 	int i = 0, rc = 0;
1521 
1522 	for (i = 0; i < sc->nrqs; i++) {
1523 		lro = &sc->rq[i]->lro;
1524 		rc = tcp_lro_init(lro);
1525 		if (rc != 0) {
1526 			device_printf(sc->dev, "LRO init failed\n");
1527 			return rc;
1528 		}
1529 		lro->ifp = sc->ifp;
1530 	}
1531 
1532 	return rc;
1533 }
1534 
1535 
1536 void
1537 oce_free_lro(POCE_SOFTC sc)
1538 {
1539 	struct lro_ctrl *lro = NULL;
1540 	int i = 0;
1541 
1542 	for (i = 0; i < sc->nrqs; i++) {
1543 		lro = &sc->rq[i]->lro;
1544 		if (lro)
1545 			tcp_lro_free(lro);
1546 	}
1547 }
1548 #endif
1549 
1550 int
1551 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1552 {
1553 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1554 	int i, in, rc;
1555 	struct oce_packet_desc *pd;
1556 	bus_dma_segment_t segs[6];
1557 	int nsegs, added = 0;
1558 	struct oce_nic_rqe *rqe;
1559 	pd_rxulp_db_t rxdb_reg;
1560 
1561 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1562 	for (i = 0; i < count; i++) {
1563 		in = rq->packets_in + 1;
1564 		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1565 			in = 0;
1566 		if (in == rq->packets_out)
1567 			break;	/* no more room */
1568 
1569 		pd = &rq->pckts[rq->packets_in];
1570 		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1571 		if (pd->mbuf == NULL)
1572 			break;
1573 
1574 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1575 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1576 					     pd->map,
1577 					     pd->mbuf,
1578 					     segs, &nsegs, BUS_DMA_NOWAIT);
1579 		if (rc) {
1580 			m_free(pd->mbuf);
1581 			break;
1582 		}
1583 
1584 		if (nsegs != 1) {
1585 			i--;
1586 			continue;
1587 		}
1588 
1589 		rq->packets_in = in;
1590 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1591 
1592 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1593 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1594 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1595 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1596 		RING_PUT(rq->ring, 1);
1597 		added++;
1598 		rq->pending++;
1599 	}
1600 	if (added != 0) {
1601 		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1602 			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1603 			rxdb_reg.bits.qid = rq->rq_id;
1604 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1605 			added -= OCE_MAX_RQ_POSTS;
1606 		}
1607 		if (added > 0) {
1608 			rxdb_reg.bits.qid = rq->rq_id;
1609 			rxdb_reg.bits.num_posted = added;
1610 			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1611 		}
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 
1618 /* Handle the Completion Queue for receive */
1619 uint16_t
1620 oce_rq_handler(void *arg)
1621 {
1622 	struct oce_rq *rq = (struct oce_rq *)arg;
1623 	struct oce_cq *cq = rq->cq;
1624 	POCE_SOFTC sc = rq->parent;
1625 	struct oce_nic_rx_cqe *cqe;
1626 	int num_cqes = 0, rq_buffers_used = 0;
1627 
1628 
1629 	bus_dmamap_sync(cq->ring->dma.tag,
1630 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1631 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1632 	while (cqe->u0.dw[2]) {
1633 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1634 
1635 		RING_GET(rq->ring, 1);
1636 		if (cqe->u0.s.error == 0) {
1637 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1638 		} else {
1639 			rq->rx_stats.rxcp_err++;
1640 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1641 			/* Post L3/L4 errors to stack.*/
1642 			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1643 		}
1644 		rq->rx_stats.rx_compl++;
1645 		cqe->u0.dw[2] = 0;
1646 
1647 #if defined(INET6) || defined(INET)
1648 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1649 			oce_rx_flush_lro(rq);
1650 		}
1651 #endif
1652 
1653 		RING_GET(cq->ring, 1);
1654 		bus_dmamap_sync(cq->ring->dma.tag,
1655 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1656 		cqe =
1657 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1658 		num_cqes++;
1659 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1660 			break;
1661 	}
1662 
1663 #if defined(INET6) || defined(INET)
1664 	if (IF_LRO_ENABLED(sc))
1665 		oce_rx_flush_lro(rq);
1666 #endif
1667 
1668 	if (num_cqes) {
1669 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1670 		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1671 		if (rq_buffers_used > 1)
1672 			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1673 	}
1674 
1675 	return 0;
1676 
1677 }
1678 
1679 
1680 
1681 
1682 /*****************************************************************************
1683  *		   Helper function prototypes in this file 		     *
1684  *****************************************************************************/
1685 
1686 static int
1687 oce_attach_ifp(POCE_SOFTC sc)
1688 {
1689 
1690 	sc->ifp = if_alloc(IFT_ETHER);
1691 	if (!sc->ifp)
1692 		return ENOMEM;
1693 
1694 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1695 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1696 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1697 
1698 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1699 	sc->ifp->if_ioctl = oce_ioctl;
1700 	sc->ifp->if_start = oce_start;
1701 	sc->ifp->if_init = oce_init;
1702 	sc->ifp->if_mtu = ETHERMTU;
1703 	sc->ifp->if_softc = sc;
1704 #if __FreeBSD_version >= 800000
1705 	sc->ifp->if_transmit = oce_multiq_start;
1706 	sc->ifp->if_qflush = oce_multiq_flush;
1707 #endif
1708 
1709 	if_initname(sc->ifp,
1710 		    device_get_name(sc->dev), device_get_unit(sc->dev));
1711 
1712 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1713 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1714 	IFQ_SET_READY(&sc->ifp->if_snd);
1715 
1716 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1717 	sc->ifp->if_hwassist |= CSUM_TSO;
1718 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1719 
1720 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1721 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1722 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1723 
1724 #if defined(INET6) || defined(INET)
1725 	sc->ifp->if_capabilities |= IFCAP_TSO;
1726 	sc->ifp->if_capabilities |= IFCAP_LRO;
1727 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1728 #endif
1729 
1730 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1731 	sc->ifp->if_baudrate = IF_Gbps(10);
1732 
1733 #if __FreeBSD_version >= 1000000
1734 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1735 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
1736 	sc->ifp->if_hw_tsomaxsegsize = 4096;
1737 #endif
1738 
1739 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1740 
1741 	return 0;
1742 }
1743 
1744 
1745 static void
1746 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1747 {
1748 	POCE_SOFTC sc = ifp->if_softc;
1749 
1750 	if (ifp->if_softc !=  arg)
1751 		return;
1752 	if ((vtag == 0) || (vtag > 4095))
1753 		return;
1754 
1755 	sc->vlan_tag[vtag] = 1;
1756 	sc->vlans_added++;
1757 	if (sc->vlans_added <= (sc->max_vlans + 1))
1758 		oce_vid_config(sc);
1759 }
1760 
1761 
1762 static void
1763 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1764 {
1765 	POCE_SOFTC sc = ifp->if_softc;
1766 
1767 	if (ifp->if_softc !=  arg)
1768 		return;
1769 	if ((vtag == 0) || (vtag > 4095))
1770 		return;
1771 
1772 	sc->vlan_tag[vtag] = 0;
1773 	sc->vlans_added--;
1774 	oce_vid_config(sc);
1775 }
1776 
1777 
1778 /*
1779  * A max of 64 vlans can be configured in BE. If the user configures
1780  * more, place the card in vlan promiscuous mode.
1781  */
1782 static int
1783 oce_vid_config(POCE_SOFTC sc)
1784 {
1785 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1786 	uint16_t ntags = 0, i;
1787 	int status = 0;
1788 
1789 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1790 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1791 		for (i = 0; i < MAX_VLANS; i++) {
1792 			if (sc->vlan_tag[i]) {
1793 				vtags[ntags].vtag = i;
1794 				ntags++;
1795 			}
1796 		}
1797 		if (ntags)
1798 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1799 						vtags, ntags, 1, 0);
1800 	} else
1801 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1802 					 	NULL, 0, 1, 1);
1803 	return status;
1804 }
1805 
1806 
1807 static void
1808 oce_mac_addr_set(POCE_SOFTC sc)
1809 {
1810 	uint32_t old_pmac_id = sc->pmac_id;
1811 	int status = 0;
1812 
1813 
1814 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1815 			 sc->macaddr.size_of_struct);
1816 	if (!status)
1817 		return;
1818 
1819 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1820 					sc->if_id, &sc->pmac_id);
1821 	if (!status) {
1822 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1823 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1824 				 sc->macaddr.size_of_struct);
1825 	}
1826 	if (status)
1827 		device_printf(sc->dev, "Failed update macaddress\n");
1828 
1829 }
1830 
1831 
1832 static int
1833 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1834 {
1835 	POCE_SOFTC sc = ifp->if_softc;
1836 	struct ifreq *ifr = (struct ifreq *)data;
1837 	int rc = ENXIO;
1838 	char cookie[32] = {0};
1839 	void *priv_data = (void *)ifr->ifr_data;
1840 	void *ioctl_ptr;
1841 	uint32_t req_size;
1842 	struct mbx_hdr req;
1843 	OCE_DMA_MEM dma_mem;
1844 	struct mbx_common_get_cntl_attr *fw_cmd;
1845 
1846 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1847 		return EFAULT;
1848 
1849 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1850 		return EINVAL;
1851 
1852 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1853 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1854 		return EFAULT;
1855 
1856 	req_size = le32toh(req.u0.req.request_length);
1857 	if (req_size > 65536)
1858 		return EINVAL;
1859 
1860 	req_size += sizeof(struct mbx_hdr);
1861 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1862 	if (rc)
1863 		return ENOMEM;
1864 
1865 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1866 		rc = EFAULT;
1867 		goto dma_free;
1868 	}
1869 
1870 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1871 	if (rc) {
1872 		rc = EIO;
1873 		goto dma_free;
1874 	}
1875 
1876 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1877 		rc =  EFAULT;
1878 
1879 	/*
1880 	   firmware is filling all the attributes for this ioctl except
1881 	   the driver version..so fill it
1882 	 */
1883 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1884 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1885 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1886 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1887 	}
1888 
1889 dma_free:
1890 	oce_dma_free(sc, &dma_mem);
1891 	return rc;
1892 
1893 }
1894 
1895 static void
1896 oce_eqd_set_periodic(POCE_SOFTC sc)
1897 {
1898 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1899 	struct oce_aic_obj *aic;
1900 	struct oce_eq *eqo;
1901 	uint64_t now = 0, delta;
1902 	int eqd, i, num = 0;
1903 	uint32_t ips = 0;
1904 	int tps;
1905 
1906 	for (i = 0 ; i < sc->neqs; i++) {
1907 		eqo = sc->eq[i];
1908 		aic = &sc->aic_obj[i];
1909 		/* When setting the static eq delay from the user space */
1910 		if (!aic->enable) {
1911 			eqd = aic->et_eqd;
1912 			goto modify_eqd;
1913 		}
1914 
1915 		now = ticks;
1916 
1917 		/* Over flow check */
1918 		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1919 			goto done;
1920 
1921 		delta = now - aic->ticks;
1922 		tps = delta/hz;
1923 
1924 		/* Interrupt rate based on elapsed ticks */
1925 		if(tps)
1926 			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1927 
1928 		if (ips > INTR_RATE_HWM)
1929 			eqd = aic->cur_eqd + 20;
1930 		else if (ips < INTR_RATE_LWM)
1931 			eqd = aic->cur_eqd / 2;
1932 		else
1933 			goto done;
1934 
1935 		if (eqd < 10)
1936 			eqd = 0;
1937 
1938 		/* Make sure that the eq delay is in the known range */
1939 		eqd = min(eqd, aic->max_eqd);
1940 		eqd = max(eqd, aic->min_eqd);
1941 
1942 modify_eqd:
1943 		if (eqd != aic->cur_eqd) {
1944 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1945 			set_eqd[num].eq_id = eqo->eq_id;
1946 			aic->cur_eqd = eqd;
1947 			num++;
1948 		}
1949 done:
1950 		aic->intr_prev = eqo->intr;
1951 		aic->ticks = now;
1952 	}
1953 
1954 	/* Is there atleast one eq that needs to be modified? */
1955 	if(num)
1956 		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1957 }
1958 
1959 static void oce_detect_hw_error(POCE_SOFTC sc)
1960 {
1961 
1962 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1963 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1964 	uint32_t i;
1965 
1966 	if (sc->hw_error)
1967 		return;
1968 
1969 	if (IS_XE201(sc)) {
1970 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1971 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1972 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1973 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1974 		}
1975 	} else {
1976 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1977 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1978 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1979 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1980 
1981 		ue_low = (ue_low & ~ue_low_mask);
1982 		ue_high = (ue_high & ~ue_high_mask);
1983 	}
1984 
1985 	/* On certain platforms BE hardware can indicate spurious UEs.
1986 	 * Allow the h/w to stop working completely in case of a real UE.
1987 	 * Hence not setting the hw_error for UE detection.
1988 	 */
1989 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1990 		sc->hw_error = TRUE;
1991 		device_printf(sc->dev, "Error detected in the card\n");
1992 	}
1993 
1994 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 		device_printf(sc->dev,
1996 				"ERR: sliport status 0x%x\n", sliport_status);
1997 		device_printf(sc->dev,
1998 				"ERR: sliport error1 0x%x\n", sliport_err1);
1999 		device_printf(sc->dev,
2000 				"ERR: sliport error2 0x%x\n", sliport_err2);
2001 	}
2002 
2003 	if (ue_low) {
2004 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2005 			if (ue_low & 1)
2006 				device_printf(sc->dev, "UE: %s bit set\n",
2007 							ue_status_low_desc[i]);
2008 		}
2009 	}
2010 
2011 	if (ue_high) {
2012 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2013 			if (ue_high & 1)
2014 				device_printf(sc->dev, "UE: %s bit set\n",
2015 							ue_status_hi_desc[i]);
2016 		}
2017 	}
2018 
2019 }
2020 
2021 
2022 static void
2023 oce_local_timer(void *arg)
2024 {
2025 	POCE_SOFTC sc = arg;
2026 	int i = 0;
2027 
2028 	oce_detect_hw_error(sc);
2029 	oce_refresh_nic_stats(sc);
2030 	oce_refresh_queue_stats(sc);
2031 	oce_mac_addr_set(sc);
2032 
2033 	/* TX Watch Dog*/
2034 	for (i = 0; i < sc->nwqs; i++)
2035 		oce_tx_restart(sc, sc->wq[i]);
2036 
2037 	/* calculate and set the eq delay for optimal interrupt rate */
2038 	if (IS_BE(sc) || IS_SH(sc))
2039 		oce_eqd_set_periodic(sc);
2040 
2041 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2042 }
2043 
2044 
2045 /* NOTE : This should only be called holding
2046  *        DEVICE_LOCK.
2047  */
2048 static void
2049 oce_if_deactivate(POCE_SOFTC sc)
2050 {
2051 	int i, mtime = 0;
2052 	int wait_req = 0;
2053 	struct oce_rq *rq;
2054 	struct oce_wq *wq;
2055 	struct oce_eq *eq;
2056 
2057 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2058 
2059 	/*Wait for max of 400ms for TX completions to be done */
2060 	while (mtime < 400) {
2061 		wait_req = 0;
2062 		for_all_wq_queues(sc, wq, i) {
2063 			if (wq->ring->num_used) {
2064 				wait_req = 1;
2065 				DELAY(1);
2066 				break;
2067 			}
2068 		}
2069 		mtime += 1;
2070 		if (!wait_req)
2071 			break;
2072 	}
2073 
2074 	/* Stop intrs and finish any bottom halves pending */
2075 	oce_hw_intr_disable(sc);
2076 
2077 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2078 	   any other lock. So unlock device lock and require after
2079 	   completing taskqueue_drain.
2080 	*/
2081 	UNLOCK(&sc->dev_lock);
2082 	for (i = 0; i < sc->intr_count; i++) {
2083 		if (sc->intrs[i].tq != NULL) {
2084 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2085 		}
2086 	}
2087 	LOCK(&sc->dev_lock);
2088 
2089 	/* Delete RX queue in card with flush param */
2090 	oce_stop_rx(sc);
2091 
2092 	/* Invalidate any pending cq and eq entries*/
2093 	for_all_evnt_queues(sc, eq, i)
2094 		oce_drain_eq(eq);
2095 	for_all_rq_queues(sc, rq, i)
2096 		oce_drain_rq_cq(rq);
2097 	for_all_wq_queues(sc, wq, i)
2098 		oce_drain_wq_cq(wq);
2099 
2100 	/* But still we need to get MCC aync events.
2101 	   So enable intrs and also arm first EQ
2102 	*/
2103 	oce_hw_intr_enable(sc);
2104 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2105 
2106 	DELAY(10);
2107 }
2108 
2109 
2110 static void
2111 oce_if_activate(POCE_SOFTC sc)
2112 {
2113 	struct oce_eq *eq;
2114 	struct oce_rq *rq;
2115 	struct oce_wq *wq;
2116 	int i, rc = 0;
2117 
2118 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2119 
2120 	oce_hw_intr_disable(sc);
2121 
2122 	oce_start_rx(sc);
2123 
2124 	for_all_rq_queues(sc, rq, i) {
2125 		rc = oce_start_rq(rq);
2126 		if (rc)
2127 			device_printf(sc->dev, "Unable to start RX\n");
2128 	}
2129 
2130 	for_all_wq_queues(sc, wq, i) {
2131 		rc = oce_start_wq(wq);
2132 		if (rc)
2133 			device_printf(sc->dev, "Unable to start TX\n");
2134 	}
2135 
2136 
2137 	for_all_evnt_queues(sc, eq, i)
2138 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2139 
2140 	oce_hw_intr_enable(sc);
2141 
2142 }
2143 
2144 static void
2145 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2146 {
2147 	/* Update Link status */
2148 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2149 	     ASYNC_EVENT_LINK_UP) {
2150 		sc->link_status = ASYNC_EVENT_LINK_UP;
2151 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2152 	} else {
2153 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2154 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2155 	}
2156 }
2157 
2158 
2159 /* Handle the Completion Queue for the Mailbox/Async notifications */
2160 uint16_t
2161 oce_mq_handler(void *arg)
2162 {
2163 	struct oce_mq *mq = (struct oce_mq *)arg;
2164 	POCE_SOFTC sc = mq->parent;
2165 	struct oce_cq *cq = mq->cq;
2166 	int num_cqes = 0, evt_type = 0, optype = 0;
2167 	struct oce_mq_cqe *cqe;
2168 	struct oce_async_cqe_link_state *acqe;
2169 	struct oce_async_event_grp5_pvid_state *gcqe;
2170 	struct oce_async_event_qnq *dbgcqe;
2171 
2172 
2173 	bus_dmamap_sync(cq->ring->dma.tag,
2174 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2175 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2176 
2177 	while (cqe->u0.dw[3]) {
2178 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2179 		if (cqe->u0.s.async_event) {
2180 			evt_type = cqe->u0.s.event_type;
2181 			optype = cqe->u0.s.async_type;
2182 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2183 				/* Link status evt */
2184 				acqe = (struct oce_async_cqe_link_state *)cqe;
2185 				process_link_state(sc, acqe);
2186 			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2187 				   (optype == ASYNC_EVENT_PVID_STATE)) {
2188 				/* GRP5 PVID */
2189 				gcqe =
2190 				(struct oce_async_event_grp5_pvid_state *)cqe;
2191 				if (gcqe->enabled)
2192 					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2193 				else
2194 					sc->pvid = 0;
2195 
2196 			}
2197 			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2198 				optype == ASYNC_EVENT_DEBUG_QNQ) {
2199 				dbgcqe =
2200 				(struct oce_async_event_qnq *)cqe;
2201 				if(dbgcqe->valid)
2202 					sc->qnqid = dbgcqe->vlan_tag;
2203 				sc->qnq_debug_event = TRUE;
2204 			}
2205 		}
2206 		cqe->u0.dw[3] = 0;
2207 		RING_GET(cq->ring, 1);
2208 		bus_dmamap_sync(cq->ring->dma.tag,
2209 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2210 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2211 		num_cqes++;
2212 	}
2213 
2214 	if (num_cqes)
2215 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2216 
2217 	return 0;
2218 }
2219 
2220 
2221 static void
2222 setup_max_queues_want(POCE_SOFTC sc)
2223 {
2224 	/* Check if it is FLEX machine. Is so dont use RSS */
2225 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2226 	    (sc->function_mode & FNM_UMC_MODE)    ||
2227 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2228 	    (!is_rss_enabled(sc))		  ||
2229 	    IS_BE2(sc)) {
2230 		sc->nrqs = 1;
2231 		sc->nwqs = 1;
2232 	} else {
2233 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2234 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2235 	}
2236 
2237 	if (IS_BE2(sc) && is_rss_enabled(sc))
2238 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2239 }
2240 
2241 
2242 static void
2243 update_queues_got(POCE_SOFTC sc)
2244 {
2245 	if (is_rss_enabled(sc)) {
2246 		sc->nrqs = sc->intr_count + 1;
2247 		sc->nwqs = sc->intr_count;
2248 	} else {
2249 		sc->nrqs = 1;
2250 		sc->nwqs = 1;
2251 	}
2252 
2253 	if (IS_BE2(sc))
2254 		sc->nwqs = 1;
2255 }
2256 
2257 static int
2258 oce_check_ipv6_ext_hdr(struct mbuf *m)
2259 {
2260 	struct ether_header *eh = mtod(m, struct ether_header *);
2261 	caddr_t m_datatemp = m->m_data;
2262 
2263 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2264 		m->m_data += sizeof(struct ether_header);
2265 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2266 
2267 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2268 				(ip6->ip6_nxt != IPPROTO_UDP)){
2269 			struct ip6_ext *ip6e = NULL;
2270 			m->m_data += sizeof(struct ip6_hdr);
2271 
2272 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2273 			if(ip6e->ip6e_len == 0xff) {
2274 				m->m_data = m_datatemp;
2275 				return TRUE;
2276 			}
2277 		}
2278 		m->m_data = m_datatemp;
2279 	}
2280 	return FALSE;
2281 }
2282 
2283 static int
2284 is_be3_a1(POCE_SOFTC sc)
2285 {
2286 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2287 		return TRUE;
2288 	}
2289 	return FALSE;
2290 }
2291 
2292 static struct mbuf *
2293 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2294 {
2295 	uint16_t vlan_tag = 0;
2296 
2297 	if(!M_WRITABLE(m))
2298 		return NULL;
2299 
2300 	/* Embed vlan tag in the packet if it is not part of it */
2301 	if(m->m_flags & M_VLANTAG) {
2302 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2303 		m->m_flags &= ~M_VLANTAG;
2304 	}
2305 
2306 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2307 	if(sc->pvid) {
2308 		if(!vlan_tag)
2309 			vlan_tag = sc->pvid;
2310 		*complete = FALSE;
2311 	}
2312 
2313 	if(vlan_tag) {
2314 		m = ether_vlanencap(m, vlan_tag);
2315 	}
2316 
2317 	if(sc->qnqid) {
2318 		m = ether_vlanencap(m, sc->qnqid);
2319 		*complete = FALSE;
2320 	}
2321 	return m;
2322 }
2323 
2324 static int
2325 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2326 {
2327 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2328 			oce_check_ipv6_ext_hdr(m)) {
2329 		return TRUE;
2330 	}
2331 	return FALSE;
2332 }
2333 
2334 static void
2335 oce_get_config(POCE_SOFTC sc)
2336 {
2337 	int rc = 0;
2338 	uint32_t max_rss = 0;
2339 
2340 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2341 		max_rss = OCE_LEGACY_MODE_RSS;
2342 	else
2343 		max_rss = OCE_MAX_RSS;
2344 
2345 	if (!IS_BE(sc)) {
2346 		rc = oce_get_profile_config(sc, max_rss);
2347 		if (rc) {
2348 			sc->nwqs = OCE_MAX_WQ;
2349 			sc->nrssqs = max_rss;
2350 			sc->nrqs = sc->nrssqs + 1;
2351 		}
2352 	}
2353 	else { /* For BE3 don't rely on fw for determining the resources */
2354 		sc->nrssqs = max_rss;
2355 		sc->nrqs = sc->nrssqs + 1;
2356 		sc->nwqs = OCE_MAX_WQ;
2357 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2358 	}
2359 }
2360