xref: /freebsd/sys/dev/oce/oce_if.c (revision 2726a7014867ad7224d09b66836c5d385f0350f4)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 
136 /* Driver entry points prototypes */
137 static int  oce_probe(device_t dev);
138 static int  oce_attach(device_t dev);
139 static int  oce_detach(device_t dev);
140 static int  oce_shutdown(device_t dev);
141 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
142 static void oce_init(void *xsc);
143 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
144 static void oce_multiq_flush(struct ifnet *ifp);
145 
146 /* Driver interrupt routines protypes */
147 static void oce_intr(void *arg, int pending);
148 static int  oce_setup_intr(POCE_SOFTC sc);
149 static int  oce_fast_isr(void *arg);
150 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
151 			  void (*isr) (void *arg, int pending));
152 
153 /* Media callbacks prototypes */
154 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
155 static int  oce_media_change(struct ifnet *ifp);
156 
157 /* Transmit routines prototypes */
158 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
159 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
160 static void oce_process_tx_completion(struct oce_wq *wq);
161 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
162 				 struct oce_wq *wq);
163 
164 /* Receive routines prototypes */
165 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
167 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
168 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static uint16_t oce_rq_handler_lro(void *arg);
170 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
172 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
173 
174 /* Helper function prototypes in this file */
175 static int  oce_attach_ifp(POCE_SOFTC sc);
176 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
178 static int  oce_vid_config(POCE_SOFTC sc);
179 static void oce_mac_addr_set(POCE_SOFTC sc);
180 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
181 static void oce_local_timer(void *arg);
182 static void oce_if_deactivate(POCE_SOFTC sc);
183 static void oce_if_activate(POCE_SOFTC sc);
184 static void setup_max_queues_want(POCE_SOFTC sc);
185 static void update_queues_got(POCE_SOFTC sc);
186 static void process_link_state(POCE_SOFTC sc,
187 		 struct oce_async_cqe_link_state *acqe);
188 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
189 static void oce_get_config(POCE_SOFTC sc);
190 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
191 static void oce_read_env_variables(POCE_SOFTC sc);
192 
193 
194 /* IP specific */
195 #if defined(INET6) || defined(INET)
196 static int  oce_init_lro(POCE_SOFTC sc);
197 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
198 #endif
199 
200 static device_method_t oce_dispatch[] = {
201 	DEVMETHOD(device_probe, oce_probe),
202 	DEVMETHOD(device_attach, oce_attach),
203 	DEVMETHOD(device_detach, oce_detach),
204 	DEVMETHOD(device_shutdown, oce_shutdown),
205 
206 	DEVMETHOD_END
207 };
208 
209 static driver_t oce_driver = {
210 	"oce",
211 	oce_dispatch,
212 	sizeof(OCE_SOFTC)
213 };
214 static devclass_t oce_devclass;
215 
216 
217 /* global vars */
218 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
219 
220 /* Module capabilites and parameters */
221 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
222 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
223 uint32_t oce_rq_buf_size = 2048;
224 
225 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
226 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
227 
228 
229 /* Supported devices table */
230 static uint32_t supportedDevices[] =  {
231 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
232 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
233 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
234 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
235 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
236 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
237 };
238 
239 
240 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
241 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
242     nitems(supportedDevices));
243 MODULE_DEPEND(oce, pci, 1, 1, 1);
244 MODULE_DEPEND(oce, ether, 1, 1, 1);
245 MODULE_VERSION(oce, 1);
246 
247 
248 POCE_SOFTC softc_head = NULL;
249 POCE_SOFTC softc_tail = NULL;
250 
251 struct oce_rdma_if *oce_rdma_if = NULL;
252 
253 /*****************************************************************************
254  *			Driver entry points functions                        *
255  *****************************************************************************/
256 
257 static int
258 oce_probe(device_t dev)
259 {
260 	uint16_t vendor = 0;
261 	uint16_t device = 0;
262 	int i = 0;
263 	char str[256] = {0};
264 	POCE_SOFTC sc;
265 
266 	sc = device_get_softc(dev);
267 	bzero(sc, sizeof(OCE_SOFTC));
268 	sc->dev = dev;
269 
270 	vendor = pci_get_vendor(dev);
271 	device = pci_get_device(dev);
272 
273 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
274 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
275 			if (device == (supportedDevices[i] & 0xffff)) {
276 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
277 					component_revision);
278 				device_set_desc_copy(dev, str);
279 
280 				switch (device) {
281 				case PCI_PRODUCT_BE2:
282 					sc->flags |= OCE_FLAGS_BE2;
283 					break;
284 				case PCI_PRODUCT_BE3:
285 					sc->flags |= OCE_FLAGS_BE3;
286 					break;
287 				case PCI_PRODUCT_XE201:
288 				case PCI_PRODUCT_XE201_VF:
289 					sc->flags |= OCE_FLAGS_XE201;
290 					break;
291 				case PCI_PRODUCT_SH:
292 					sc->flags |= OCE_FLAGS_SH;
293 					break;
294 				default:
295 					return ENXIO;
296 				}
297 				return BUS_PROBE_DEFAULT;
298 			}
299 		}
300 	}
301 
302 	return ENXIO;
303 }
304 
305 
306 static int
307 oce_attach(device_t dev)
308 {
309 	POCE_SOFTC sc;
310 	int rc = 0;
311 
312 	sc = device_get_softc(dev);
313 
314 	rc = oce_hw_pci_alloc(sc);
315 	if (rc)
316 		return rc;
317 
318 	sc->tx_ring_size = OCE_TX_RING_SIZE;
319 	sc->rx_ring_size = OCE_RX_RING_SIZE;
320 	/* receive fragment size should be multiple of 2K */
321 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
322 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
323 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
324 
325 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
326 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
327 
328 	/* initialise the hardware */
329 	rc = oce_hw_init(sc);
330 	if (rc)
331 		goto pci_res_free;
332 
333 	oce_read_env_variables(sc);
334 
335 	oce_get_config(sc);
336 
337 	setup_max_queues_want(sc);
338 
339 	rc = oce_setup_intr(sc);
340 	if (rc)
341 		goto mbox_free;
342 
343 	rc = oce_queue_init_all(sc);
344 	if (rc)
345 		goto intr_free;
346 
347 	rc = oce_attach_ifp(sc);
348 	if (rc)
349 		goto queues_free;
350 
351 #if defined(INET6) || defined(INET)
352 	rc = oce_init_lro(sc);
353 	if (rc)
354 		goto ifp_free;
355 #endif
356 
357 	rc = oce_hw_start(sc);
358 	if (rc)
359 		goto lro_free;
360 
361 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
362 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
363 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
364 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
365 
366 	rc = oce_stats_init(sc);
367 	if (rc)
368 		goto vlan_free;
369 
370 	oce_add_sysctls(sc);
371 
372 	callout_init(&sc->timer, CALLOUT_MPSAFE);
373 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
374 	if (rc)
375 		goto stats_free;
376 
377 	sc->next =NULL;
378 	if (softc_tail != NULL) {
379 	  softc_tail->next = sc;
380 	} else {
381 	  softc_head = sc;
382 	}
383 	softc_tail = sc;
384 
385 	return 0;
386 
387 stats_free:
388 	callout_drain(&sc->timer);
389 	oce_stats_free(sc);
390 vlan_free:
391 	if (sc->vlan_attach)
392 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
393 	if (sc->vlan_detach)
394 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
395 	oce_hw_intr_disable(sc);
396 lro_free:
397 #if defined(INET6) || defined(INET)
398 	oce_free_lro(sc);
399 ifp_free:
400 #endif
401 	ether_ifdetach(sc->ifp);
402 	if_free(sc->ifp);
403 queues_free:
404 	oce_queue_release_all(sc);
405 intr_free:
406 	oce_intr_free(sc);
407 mbox_free:
408 	oce_dma_free(sc, &sc->bsmbx);
409 pci_res_free:
410 	oce_hw_pci_free(sc);
411 	LOCK_DESTROY(&sc->dev_lock);
412 	LOCK_DESTROY(&sc->bmbx_lock);
413 	return rc;
414 
415 }
416 
417 
418 static int
419 oce_detach(device_t dev)
420 {
421 	POCE_SOFTC sc = device_get_softc(dev);
422 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
423 
424         poce_sc_tmp = softc_head;
425         ppoce_sc_tmp1 = &softc_head;
426         while (poce_sc_tmp != NULL) {
427           if (poce_sc_tmp == sc) {
428             *ppoce_sc_tmp1 = sc->next;
429             if (sc->next == NULL) {
430               softc_tail = poce_sc_tmp2;
431             }
432             break;
433           }
434           poce_sc_tmp2 = poce_sc_tmp;
435           ppoce_sc_tmp1 = &poce_sc_tmp->next;
436           poce_sc_tmp = poce_sc_tmp->next;
437         }
438 
439 	LOCK(&sc->dev_lock);
440 	oce_if_deactivate(sc);
441 	UNLOCK(&sc->dev_lock);
442 
443 	callout_drain(&sc->timer);
444 
445 	if (sc->vlan_attach != NULL)
446 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
447 	if (sc->vlan_detach != NULL)
448 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
449 
450 	ether_ifdetach(sc->ifp);
451 
452 	if_free(sc->ifp);
453 
454 	oce_hw_shutdown(sc);
455 
456 	bus_generic_detach(dev);
457 
458 	return 0;
459 }
460 
461 
462 static int
463 oce_shutdown(device_t dev)
464 {
465 	int rc;
466 
467 	rc = oce_detach(dev);
468 
469 	return rc;
470 }
471 
472 
473 static int
474 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
475 {
476 	struct ifreq *ifr = (struct ifreq *)data;
477 	POCE_SOFTC sc = ifp->if_softc;
478 	struct ifi2creq i2c;
479 	uint8_t	offset = 0;
480 	int rc = 0;
481 	uint32_t u;
482 
483 	switch (command) {
484 
485 	case SIOCGIFMEDIA:
486 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
487 		break;
488 
489 	case SIOCSIFMTU:
490 		if (ifr->ifr_mtu > OCE_MAX_MTU)
491 			rc = EINVAL;
492 		else
493 			ifp->if_mtu = ifr->ifr_mtu;
494 		break;
495 
496 	case SIOCSIFFLAGS:
497 		if (ifp->if_flags & IFF_UP) {
498 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
499 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
500 				oce_init(sc);
501 			}
502 			device_printf(sc->dev, "Interface Up\n");
503 		} else {
504 			LOCK(&sc->dev_lock);
505 
506 			sc->ifp->if_drv_flags &=
507 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
508 			oce_if_deactivate(sc);
509 
510 			UNLOCK(&sc->dev_lock);
511 
512 			device_printf(sc->dev, "Interface Down\n");
513 		}
514 
515 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
516 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
517 				sc->promisc = TRUE;
518 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
519 			if (!oce_rxf_set_promiscuous(sc, 0))
520 				sc->promisc = FALSE;
521 		}
522 
523 		break;
524 
525 	case SIOCADDMULTI:
526 	case SIOCDELMULTI:
527 		rc = oce_hw_update_multicast(sc);
528 		if (rc)
529 			device_printf(sc->dev,
530 				"Update multicast address failed\n");
531 		break;
532 
533 	case SIOCSIFCAP:
534 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
535 
536 		if (u & IFCAP_TXCSUM) {
537 			ifp->if_capenable ^= IFCAP_TXCSUM;
538 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
539 
540 			if (IFCAP_TSO & ifp->if_capenable &&
541 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
542 				u &= ~IFCAP_TSO;
543 				ifp->if_capenable &= ~IFCAP_TSO;
544 				ifp->if_hwassist &= ~CSUM_TSO;
545 				if_printf(ifp,
546 					 "TSO disabled due to -txcsum.\n");
547 			}
548 		}
549 
550 		if (u & IFCAP_RXCSUM)
551 			ifp->if_capenable ^= IFCAP_RXCSUM;
552 
553 		if (u & IFCAP_TSO4) {
554 			ifp->if_capenable ^= IFCAP_TSO4;
555 
556 			if (IFCAP_TSO & ifp->if_capenable) {
557 				if (IFCAP_TXCSUM & ifp->if_capenable)
558 					ifp->if_hwassist |= CSUM_TSO;
559 				else {
560 					ifp->if_capenable &= ~IFCAP_TSO;
561 					ifp->if_hwassist &= ~CSUM_TSO;
562 					if_printf(ifp,
563 					    "Enable txcsum first.\n");
564 					rc = EAGAIN;
565 				}
566 			} else
567 				ifp->if_hwassist &= ~CSUM_TSO;
568 		}
569 
570 		if (u & IFCAP_VLAN_HWTAGGING)
571 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
572 
573 		if (u & IFCAP_VLAN_HWFILTER) {
574 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
575 			oce_vid_config(sc);
576 		}
577 #if defined(INET6) || defined(INET)
578 		if (u & IFCAP_LRO) {
579 			ifp->if_capenable ^= IFCAP_LRO;
580 			if(sc->enable_hwlro) {
581 				if(ifp->if_capenable & IFCAP_LRO) {
582 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
583 				}else {
584 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
585 				}
586 			}
587 		}
588 #endif
589 
590 		break;
591 
592 	case SIOCGI2C:
593 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
594 		if (rc)
595 			break;
596 
597 		if (i2c.dev_addr == PAGE_NUM_A0) {
598 			offset = i2c.offset;
599 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
600 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
601 		} else {
602 			rc = EINVAL;
603 			break;
604 		}
605 
606 		if (i2c.len > sizeof(i2c.data) ||
607 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
608 			rc = EINVAL;
609 			break;
610 		}
611 
612 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
613 		if (rc) {
614 			rc = -rc;
615 			break;
616 		}
617 
618 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
619 
620 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
621 		break;
622 
623 	case SIOCGPRIVATE_0:
624 		rc = priv_check(curthread, PRIV_DRIVER);
625 		if (rc != 0)
626 			break;
627 		rc = oce_handle_passthrough(ifp, data);
628 		break;
629 	default:
630 		rc = ether_ioctl(ifp, command, data);
631 		break;
632 	}
633 
634 	return rc;
635 }
636 
637 
638 static void
639 oce_init(void *arg)
640 {
641 	POCE_SOFTC sc = arg;
642 
643 	LOCK(&sc->dev_lock);
644 
645 	if (sc->ifp->if_flags & IFF_UP) {
646 		oce_if_deactivate(sc);
647 		oce_if_activate(sc);
648 	}
649 
650 	UNLOCK(&sc->dev_lock);
651 
652 }
653 
654 
655 static int
656 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
657 {
658 	POCE_SOFTC sc = ifp->if_softc;
659 	struct oce_wq *wq = NULL;
660 	int queue_index = 0;
661 	int status = 0;
662 
663 	if (!sc->link_status)
664 		return ENXIO;
665 
666 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
667 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
668 
669 	wq = sc->wq[queue_index];
670 
671 	LOCK(&wq->tx_lock);
672 	status = oce_multiq_transmit(ifp, m, wq);
673 	UNLOCK(&wq->tx_lock);
674 
675 	return status;
676 
677 }
678 
679 
680 static void
681 oce_multiq_flush(struct ifnet *ifp)
682 {
683 	POCE_SOFTC sc = ifp->if_softc;
684 	struct mbuf     *m;
685 	int i = 0;
686 
687 	for (i = 0; i < sc->nwqs; i++) {
688 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
689 			m_freem(m);
690 	}
691 	if_qflush(ifp);
692 }
693 
694 
695 
696 /*****************************************************************************
697  *                   Driver interrupt routines functions                     *
698  *****************************************************************************/
699 
700 static void
701 oce_intr(void *arg, int pending)
702 {
703 
704 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
705 	POCE_SOFTC sc = ii->sc;
706 	struct oce_eq *eq = ii->eq;
707 	struct oce_eqe *eqe;
708 	struct oce_cq *cq = NULL;
709 	int i, num_eqes = 0;
710 
711 
712 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
713 				 BUS_DMASYNC_POSTWRITE);
714 	do {
715 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
716 		if (eqe->evnt == 0)
717 			break;
718 		eqe->evnt = 0;
719 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
720 					BUS_DMASYNC_POSTWRITE);
721 		RING_GET(eq->ring, 1);
722 		num_eqes++;
723 
724 	} while (TRUE);
725 
726 	if (!num_eqes)
727 		goto eq_arm; /* Spurious */
728 
729  	/* Clear EQ entries, but dont arm */
730 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
731 
732 	/* Process TX, RX and MCC. But dont arm CQ*/
733 	for (i = 0; i < eq->cq_valid; i++) {
734 		cq = eq->cq[i];
735 		(*cq->cq_handler)(cq->cb_arg);
736 	}
737 
738 	/* Arm all cqs connected to this EQ */
739 	for (i = 0; i < eq->cq_valid; i++) {
740 		cq = eq->cq[i];
741 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
742 	}
743 
744 eq_arm:
745 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
746 
747 	return;
748 }
749 
750 
751 static int
752 oce_setup_intr(POCE_SOFTC sc)
753 {
754 	int rc = 0, use_intx = 0;
755 	int vector = 0, req_vectors = 0;
756 	int tot_req_vectors, tot_vectors;
757 
758 	if (is_rss_enabled(sc))
759 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
760 	else
761 		req_vectors = 1;
762 
763 	tot_req_vectors = req_vectors;
764 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
765 	  if (req_vectors > 1) {
766 	    tot_req_vectors += OCE_RDMA_VECTORS;
767 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
768 	  }
769 	}
770 
771         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
772 		sc->intr_count = req_vectors;
773                 tot_vectors = tot_req_vectors;
774 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
775 		if (rc != 0) {
776 			use_intx = 1;
777 			pci_release_msi(sc->dev);
778 		} else {
779 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
780 		    if (tot_vectors < tot_req_vectors) {
781 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
782 			sc->roce_intr_count = (tot_vectors / 2);
783 		      }
784 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
785 		    }
786 		  } else {
787 		    sc->intr_count = tot_vectors;
788 		  }
789     		  sc->flags |= OCE_FLAGS_USING_MSIX;
790 		}
791 	} else
792 		use_intx = 1;
793 
794 	if (use_intx)
795 		sc->intr_count = 1;
796 
797 	/* Scale number of queues based on intr we got */
798 	update_queues_got(sc);
799 
800 	if (use_intx) {
801 		device_printf(sc->dev, "Using legacy interrupt\n");
802 		rc = oce_alloc_intr(sc, vector, oce_intr);
803 		if (rc)
804 			goto error;
805 	} else {
806 		for (; vector < sc->intr_count; vector++) {
807 			rc = oce_alloc_intr(sc, vector, oce_intr);
808 			if (rc)
809 				goto error;
810 		}
811 	}
812 
813 	return 0;
814 error:
815 	oce_intr_free(sc);
816 	return rc;
817 }
818 
819 
820 static int
821 oce_fast_isr(void *arg)
822 {
823 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
824 	POCE_SOFTC sc = ii->sc;
825 
826 	if (ii->eq == NULL)
827 		return FILTER_STRAY;
828 
829 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
830 
831 	taskqueue_enqueue(ii->tq, &ii->task);
832 
833  	ii->eq->intr++;
834 
835 	return FILTER_HANDLED;
836 }
837 
838 
839 static int
840 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
841 {
842 	POCE_INTR_INFO ii;
843 	int rc = 0, rr;
844 
845 	if (vector >= OCE_MAX_EQ)
846 		return (EINVAL);
847 
848 	ii = &sc->intrs[vector];
849 
850 	/* Set the resource id for the interrupt.
851 	 * MSIx is vector + 1 for the resource id,
852 	 * INTx is 0 for the resource id.
853 	 */
854 	if (sc->flags & OCE_FLAGS_USING_MSIX)
855 		rr = vector + 1;
856 	else
857 		rr = 0;
858 	ii->intr_res = bus_alloc_resource_any(sc->dev,
859 					      SYS_RES_IRQ,
860 					      &rr, RF_ACTIVE|RF_SHAREABLE);
861 	ii->irq_rr = rr;
862 	if (ii->intr_res == NULL) {
863 		device_printf(sc->dev,
864 			  "Could not allocate interrupt\n");
865 		rc = ENXIO;
866 		return rc;
867 	}
868 
869 	TASK_INIT(&ii->task, 0, isr, ii);
870 	ii->vector = vector;
871 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
872 	ii->tq = taskqueue_create_fast(ii->task_name,
873 			M_NOWAIT,
874 			taskqueue_thread_enqueue,
875 			&ii->tq);
876 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
877 			device_get_nameunit(sc->dev));
878 
879 	ii->sc = sc;
880 	rc = bus_setup_intr(sc->dev,
881 			ii->intr_res,
882 			INTR_TYPE_NET,
883 			oce_fast_isr, NULL, ii, &ii->tag);
884 	return rc;
885 
886 }
887 
888 
889 void
890 oce_intr_free(POCE_SOFTC sc)
891 {
892 	int i = 0;
893 
894 	for (i = 0; i < sc->intr_count; i++) {
895 
896 		if (sc->intrs[i].tag != NULL)
897 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
898 						sc->intrs[i].tag);
899 		if (sc->intrs[i].tq != NULL)
900 			taskqueue_free(sc->intrs[i].tq);
901 
902 		if (sc->intrs[i].intr_res != NULL)
903 			bus_release_resource(sc->dev, SYS_RES_IRQ,
904 						sc->intrs[i].irq_rr,
905 						sc->intrs[i].intr_res);
906 		sc->intrs[i].tag = NULL;
907 		sc->intrs[i].intr_res = NULL;
908 	}
909 
910 	if (sc->flags & OCE_FLAGS_USING_MSIX)
911 		pci_release_msi(sc->dev);
912 
913 }
914 
915 
916 
917 /******************************************************************************
918 *			  Media callbacks functions 			      *
919 ******************************************************************************/
920 
921 static void
922 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
923 {
924 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
925 
926 
927 	req->ifm_status = IFM_AVALID;
928 	req->ifm_active = IFM_ETHER;
929 
930 	if (sc->link_status == 1)
931 		req->ifm_status |= IFM_ACTIVE;
932 	else
933 		return;
934 
935 	switch (sc->link_speed) {
936 	case 1: /* 10 Mbps */
937 		req->ifm_active |= IFM_10_T | IFM_FDX;
938 		sc->speed = 10;
939 		break;
940 	case 2: /* 100 Mbps */
941 		req->ifm_active |= IFM_100_TX | IFM_FDX;
942 		sc->speed = 100;
943 		break;
944 	case 3: /* 1 Gbps */
945 		req->ifm_active |= IFM_1000_T | IFM_FDX;
946 		sc->speed = 1000;
947 		break;
948 	case 4: /* 10 Gbps */
949 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
950 		sc->speed = 10000;
951 		break;
952 	case 5: /* 20 Gbps */
953 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
954 		sc->speed = 20000;
955 		break;
956 	case 6: /* 25 Gbps */
957 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
958 		sc->speed = 25000;
959 		break;
960 	case 7: /* 40 Gbps */
961 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
962 		sc->speed = 40000;
963 		break;
964 	default:
965 		sc->speed = 0;
966 		break;
967 	}
968 
969 	return;
970 }
971 
972 
973 int
974 oce_media_change(struct ifnet *ifp)
975 {
976 	return 0;
977 }
978 
979 
980 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
981 				struct mbuf *m, boolean_t *os2bmc,
982 				struct mbuf **m_new)
983 {
984 	struct ether_header *eh = NULL;
985 
986 	eh = mtod(m, struct ether_header *);
987 
988 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
989 		*os2bmc = FALSE;
990 		goto done;
991 	}
992 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
993 		goto done;
994 
995 	if (is_mc_allowed_on_bmc(sc, eh) ||
996 	    is_bc_allowed_on_bmc(sc, eh) ||
997 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
998 		*os2bmc = TRUE;
999 		goto done;
1000 	}
1001 
1002 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
1003 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1004 		uint8_t nexthdr = ip6->ip6_nxt;
1005 		if (nexthdr == IPPROTO_ICMPV6) {
1006 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
1007 			switch (icmp6->icmp6_type) {
1008 			case ND_ROUTER_ADVERT:
1009 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
1010 				goto done;
1011 			case ND_NEIGHBOR_ADVERT:
1012 				*os2bmc = is_ipv6_na_filt_enabled(sc);
1013 				goto done;
1014 			default:
1015 				break;
1016 			}
1017 		}
1018 	}
1019 
1020 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
1021 		struct ip *ip = mtod(m, struct ip *);
1022 		int iphlen = ip->ip_hl << 2;
1023 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
1024 		switch (uh->uh_dport) {
1025 		case DHCP_CLIENT_PORT:
1026 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1027 			goto done;
1028 		case DHCP_SERVER_PORT:
1029 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1030 			goto done;
1031 		case NET_BIOS_PORT1:
1032 		case NET_BIOS_PORT2:
1033 			*os2bmc = is_nbios_filt_enabled(sc);
1034 			goto done;
1035 		case DHCPV6_RAS_PORT:
1036 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1037 			goto done;
1038 		default:
1039 			break;
1040 		}
1041 	}
1042 done:
1043 	if (*os2bmc) {
1044 		*m_new = m_dup(m, M_NOWAIT);
1045 		if (!*m_new) {
1046 			*os2bmc = FALSE;
1047 			return;
1048 		}
1049 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1050 	}
1051 }
1052 
1053 
1054 
1055 /*****************************************************************************
1056  *			  Transmit routines functions			     *
1057  *****************************************************************************/
1058 
1059 static int
1060 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1061 {
1062 	int rc = 0, i, retry_cnt = 0;
1063 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1064 	struct mbuf *m, *m_temp, *m_new = NULL;
1065 	struct oce_wq *wq = sc->wq[wq_index];
1066 	struct oce_packet_desc *pd;
1067 	struct oce_nic_hdr_wqe *nichdr;
1068 	struct oce_nic_frag_wqe *nicfrag;
1069 	struct ether_header *eh = NULL;
1070 	int num_wqes;
1071 	uint32_t reg_value;
1072 	boolean_t complete = TRUE;
1073 	boolean_t os2bmc = FALSE;
1074 
1075 	m = *mpp;
1076 	if (!m)
1077 		return EINVAL;
1078 
1079 	if (!(m->m_flags & M_PKTHDR)) {
1080 		rc = ENXIO;
1081 		goto free_ret;
1082 	}
1083 
1084 	/* Don't allow non-TSO packets longer than MTU */
1085 	if (!is_tso_pkt(m)) {
1086 		eh = mtod(m, struct ether_header *);
1087 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1088 			 goto free_ret;
1089 	}
1090 
1091 	if(oce_tx_asic_stall_verify(sc, m)) {
1092 		m = oce_insert_vlan_tag(sc, m, &complete);
1093 		if(!m) {
1094 			device_printf(sc->dev, "Insertion unsuccessful\n");
1095 			return 0;
1096 		}
1097 
1098 	}
1099 
1100 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1101 	 * may cause a transmit stall on that port. So the work-around is to
1102 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1103 	*/
1104 	if(IS_SH(sc) || IS_XE201(sc) ) {
1105 		if(m->m_pkthdr.len <= 32) {
1106 			char buf[36];
1107 			bzero((void *)buf, 36);
1108 			m_append(m, (36 - m->m_pkthdr.len), buf);
1109 		}
1110 	}
1111 
1112 tx_start:
1113 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1114 		/* consolidate packet buffers for TSO/LSO segment offload */
1115 #if defined(INET6) || defined(INET)
1116 		m = oce_tso_setup(sc, mpp);
1117 #else
1118 		m = NULL;
1119 #endif
1120 		if (m == NULL) {
1121 			rc = ENXIO;
1122 			goto free_ret;
1123 		}
1124 	}
1125 
1126 
1127 	pd = &wq->pckts[wq->pkt_desc_head];
1128 
1129 retry:
1130 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1131 				     pd->map,
1132 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1133 	if (rc == 0) {
1134 		num_wqes = pd->nsegs + 1;
1135 		if (IS_BE(sc) || IS_SH(sc)) {
1136 			/*Dummy required only for BE3.*/
1137 			if (num_wqes & 1)
1138 				num_wqes++;
1139 		}
1140 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1141 			bus_dmamap_unload(wq->tag, pd->map);
1142 			return EBUSY;
1143 		}
1144 		atomic_store_rel_int(&wq->pkt_desc_head,
1145 				     (wq->pkt_desc_head + 1) % \
1146 				      OCE_WQ_PACKET_ARRAY_SIZE);
1147 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1148 		pd->mbuf = m;
1149 
1150 		nichdr =
1151 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1152 		nichdr->u0.dw[0] = 0;
1153 		nichdr->u0.dw[1] = 0;
1154 		nichdr->u0.dw[2] = 0;
1155 		nichdr->u0.dw[3] = 0;
1156 
1157 		nichdr->u0.s.complete = complete;
1158 		nichdr->u0.s.mgmt = os2bmc;
1159 		nichdr->u0.s.event = 1;
1160 		nichdr->u0.s.crc = 1;
1161 		nichdr->u0.s.forward = 0;
1162 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1163 		nichdr->u0.s.udpcs =
1164 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1165 		nichdr->u0.s.tcpcs =
1166 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1167 		nichdr->u0.s.num_wqe = num_wqes;
1168 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1169 
1170 		if (m->m_flags & M_VLANTAG) {
1171 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1172 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1173 		}
1174 
1175 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1176 			if (m->m_pkthdr.tso_segsz) {
1177 				nichdr->u0.s.lso = 1;
1178 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1179 			}
1180 			if (!IS_BE(sc) || !IS_SH(sc))
1181 				nichdr->u0.s.ipcs = 1;
1182 		}
1183 
1184 		RING_PUT(wq->ring, 1);
1185 		atomic_add_int(&wq->ring->num_used, 1);
1186 
1187 		for (i = 0; i < pd->nsegs; i++) {
1188 			nicfrag =
1189 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1190 						      struct oce_nic_frag_wqe);
1191 			nicfrag->u0.s.rsvd0 = 0;
1192 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1193 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1194 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1195 			pd->wqe_idx = wq->ring->pidx;
1196 			RING_PUT(wq->ring, 1);
1197 			atomic_add_int(&wq->ring->num_used, 1);
1198 		}
1199 		if (num_wqes > (pd->nsegs + 1)) {
1200 			nicfrag =
1201 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1202 						      struct oce_nic_frag_wqe);
1203 			nicfrag->u0.dw[0] = 0;
1204 			nicfrag->u0.dw[1] = 0;
1205 			nicfrag->u0.dw[2] = 0;
1206 			nicfrag->u0.dw[3] = 0;
1207 			pd->wqe_idx = wq->ring->pidx;
1208 			RING_PUT(wq->ring, 1);
1209 			atomic_add_int(&wq->ring->num_used, 1);
1210 			pd->nsegs++;
1211 		}
1212 
1213 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1214 		wq->tx_stats.tx_reqs++;
1215 		wq->tx_stats.tx_wrbs += num_wqes;
1216 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1217 		wq->tx_stats.tx_pkts++;
1218 
1219 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1220 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1221 		reg_value = (num_wqes << 16) | wq->wq_id;
1222 
1223 		/* if os2bmc is not enabled or if the pkt is already tagged as
1224 		   bmc, do nothing
1225 		 */
1226 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1227 
1228 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1229 
1230 	} else if (rc == EFBIG)	{
1231 		if (retry_cnt == 0) {
1232 			m_temp = m_defrag(m, M_NOWAIT);
1233 			if (m_temp == NULL)
1234 				goto free_ret;
1235 			m = m_temp;
1236 			*mpp = m_temp;
1237 			retry_cnt = retry_cnt + 1;
1238 			goto retry;
1239 		} else
1240 			goto free_ret;
1241 	} else if (rc == ENOMEM)
1242 		return rc;
1243 	else
1244 		goto free_ret;
1245 
1246 	if (os2bmc) {
1247 		m = m_new;
1248 		goto tx_start;
1249 	}
1250 
1251 	return 0;
1252 
1253 free_ret:
1254 	m_freem(*mpp);
1255 	*mpp = NULL;
1256 	return rc;
1257 }
1258 
1259 
1260 static void
1261 oce_process_tx_completion(struct oce_wq *wq)
1262 {
1263 	struct oce_packet_desc *pd;
1264 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1265 	struct mbuf *m;
1266 
1267 	pd = &wq->pckts[wq->pkt_desc_tail];
1268 	atomic_store_rel_int(&wq->pkt_desc_tail,
1269 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1270 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1271 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1272 	bus_dmamap_unload(wq->tag, pd->map);
1273 
1274 	m = pd->mbuf;
1275 	m_freem(m);
1276 	pd->mbuf = NULL;
1277 
1278 
1279 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1280 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1281 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1282 			oce_tx_restart(sc, wq);
1283 		}
1284 	}
1285 }
1286 
1287 
1288 static void
1289 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1290 {
1291 
1292 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1293 		return;
1294 
1295 	if (!drbr_empty(sc->ifp, wq->br))
1296 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1297 
1298 }
1299 
1300 
1301 #if defined(INET6) || defined(INET)
1302 static struct mbuf *
1303 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1304 {
1305 	struct mbuf *m;
1306 #ifdef INET
1307 	struct ip *ip;
1308 #endif
1309 #ifdef INET6
1310 	struct ip6_hdr *ip6;
1311 #endif
1312 	struct ether_vlan_header *eh;
1313 	struct tcphdr *th;
1314 	uint16_t etype;
1315 	int total_len = 0, ehdrlen = 0;
1316 
1317 	m = *mpp;
1318 
1319 	if (M_WRITABLE(m) == 0) {
1320 		m = m_dup(*mpp, M_NOWAIT);
1321 		if (!m)
1322 			return NULL;
1323 		m_freem(*mpp);
1324 		*mpp = m;
1325 	}
1326 
1327 	eh = mtod(m, struct ether_vlan_header *);
1328 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1329 		etype = ntohs(eh->evl_proto);
1330 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1331 	} else {
1332 		etype = ntohs(eh->evl_encap_proto);
1333 		ehdrlen = ETHER_HDR_LEN;
1334 	}
1335 
1336 	switch (etype) {
1337 #ifdef INET
1338 	case ETHERTYPE_IP:
1339 		ip = (struct ip *)(m->m_data + ehdrlen);
1340 		if (ip->ip_p != IPPROTO_TCP)
1341 			return NULL;
1342 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1343 
1344 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1345 		break;
1346 #endif
1347 #ifdef INET6
1348 	case ETHERTYPE_IPV6:
1349 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1350 		if (ip6->ip6_nxt != IPPROTO_TCP)
1351 			return NULL;
1352 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1353 
1354 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1355 		break;
1356 #endif
1357 	default:
1358 		return NULL;
1359 	}
1360 
1361 	m = m_pullup(m, total_len);
1362 	if (!m)
1363 		return NULL;
1364 	*mpp = m;
1365 	return m;
1366 
1367 }
1368 #endif /* INET6 || INET */
1369 
1370 void
1371 oce_tx_task(void *arg, int npending)
1372 {
1373 	struct oce_wq *wq = arg;
1374 	POCE_SOFTC sc = wq->parent;
1375 	struct ifnet *ifp = sc->ifp;
1376 	int rc = 0;
1377 
1378 	LOCK(&wq->tx_lock);
1379 	rc = oce_multiq_transmit(ifp, NULL, wq);
1380 	if (rc) {
1381 		device_printf(sc->dev,
1382 				"TX[%d] restart failed\n", wq->queue_index);
1383 	}
1384 	UNLOCK(&wq->tx_lock);
1385 }
1386 
1387 
1388 void
1389 oce_start(struct ifnet *ifp)
1390 {
1391 	POCE_SOFTC sc = ifp->if_softc;
1392 	struct mbuf *m;
1393 	int rc = 0;
1394 	int def_q = 0; /* Defualt tx queue is 0*/
1395 
1396 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1397 			IFF_DRV_RUNNING)
1398 		return;
1399 
1400 	if (!sc->link_status)
1401 		return;
1402 
1403 	do {
1404 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1405 		if (m == NULL)
1406 			break;
1407 
1408 		LOCK(&sc->wq[def_q]->tx_lock);
1409 		rc = oce_tx(sc, &m, def_q);
1410 		UNLOCK(&sc->wq[def_q]->tx_lock);
1411 		if (rc) {
1412 			if (m != NULL) {
1413 				sc->wq[def_q]->tx_stats.tx_stops ++;
1414 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1415 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1416 				m = NULL;
1417 			}
1418 			break;
1419 		}
1420 		if (m != NULL)
1421 			ETHER_BPF_MTAP(ifp, m);
1422 
1423 	} while (TRUE);
1424 
1425 	return;
1426 }
1427 
1428 
1429 /* Handle the Completion Queue for transmit */
1430 uint16_t
1431 oce_wq_handler(void *arg)
1432 {
1433 	struct oce_wq *wq = (struct oce_wq *)arg;
1434 	POCE_SOFTC sc = wq->parent;
1435 	struct oce_cq *cq = wq->cq;
1436 	struct oce_nic_tx_cqe *cqe;
1437 	int num_cqes = 0;
1438 
1439 	LOCK(&wq->tx_compl_lock);
1440 	bus_dmamap_sync(cq->ring->dma.tag,
1441 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1442 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1443 	while (cqe->u0.dw[3]) {
1444 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1445 
1446 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1447 		if (wq->ring->cidx >= wq->ring->num_items)
1448 			wq->ring->cidx -= wq->ring->num_items;
1449 
1450 		oce_process_tx_completion(wq);
1451 		wq->tx_stats.tx_compl++;
1452 		cqe->u0.dw[3] = 0;
1453 		RING_GET(cq->ring, 1);
1454 		bus_dmamap_sync(cq->ring->dma.tag,
1455 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1456 		cqe =
1457 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1458 		num_cqes++;
1459 	}
1460 
1461 	if (num_cqes)
1462 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1463 
1464 	UNLOCK(&wq->tx_compl_lock);
1465 	return num_cqes;
1466 }
1467 
1468 
1469 static int
1470 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1471 {
1472 	POCE_SOFTC sc = ifp->if_softc;
1473 	int status = 0, queue_index = 0;
1474 	struct mbuf *next = NULL;
1475 	struct buf_ring *br = NULL;
1476 
1477 	br  = wq->br;
1478 	queue_index = wq->queue_index;
1479 
1480 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1481 		IFF_DRV_RUNNING) {
1482 		if (m != NULL)
1483 			status = drbr_enqueue(ifp, br, m);
1484 		return status;
1485 	}
1486 
1487 	if (m != NULL) {
1488 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1489 			return status;
1490 	}
1491 	while ((next = drbr_peek(ifp, br)) != NULL) {
1492 		if (oce_tx(sc, &next, queue_index)) {
1493 			if (next == NULL) {
1494 				drbr_advance(ifp, br);
1495 			} else {
1496 				drbr_putback(ifp, br, next);
1497 				wq->tx_stats.tx_stops ++;
1498 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1499 			}
1500 			break;
1501 		}
1502 		drbr_advance(ifp, br);
1503 		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1504 		if (next->m_flags & M_MCAST)
1505 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1506 		ETHER_BPF_MTAP(ifp, next);
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 
1513 
1514 
1515 /*****************************************************************************
1516  *			    Receive  routines functions 		     *
1517  *****************************************************************************/
1518 
1519 static void
1520 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1521 {
1522 	uint32_t *p;
1523         struct ether_header *eh = NULL;
1524         struct tcphdr *tcp_hdr = NULL;
1525         struct ip *ip4_hdr = NULL;
1526         struct ip6_hdr *ip6 = NULL;
1527         uint32_t payload_len = 0;
1528 
1529         eh = mtod(m, struct ether_header *);
1530         /* correct IP header */
1531         if(!cqe2->ipv6_frame) {
1532 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1533                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1534                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1535                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1536         }else {
1537         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1538                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1539                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1540                                                 - sizeof(struct ip6_hdr);
1541                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1542                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1543         }
1544 
1545         /* correct tcp header */
1546         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1547         if(cqe2->push) {
1548         	tcp_hdr->th_flags |= TH_PUSH;
1549         }
1550         tcp_hdr->th_win = htons(cqe2->tcp_window);
1551         tcp_hdr->th_sum = 0xffff;
1552         if(cqe2->ts_opt) {
1553                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1554                 *p = cqe1->tcp_timestamp_val;
1555                 *(p+1) = cqe1->tcp_timestamp_ecr;
1556         }
1557 
1558 	return;
1559 }
1560 
1561 static void
1562 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1563 {
1564 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1565         uint32_t i = 0, frag_len = 0;
1566 	uint32_t len = cqe_info->pkt_size;
1567         struct oce_packet_desc *pd;
1568         struct mbuf *tail = NULL;
1569 
1570         for (i = 0; i < cqe_info->num_frags; i++) {
1571                 if (rq->ring->cidx == rq->ring->pidx) {
1572                         device_printf(sc->dev,
1573                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1574                         return;
1575                 }
1576                 pd = &rq->pckts[rq->ring->cidx];
1577 
1578                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1579                 bus_dmamap_unload(rq->tag, pd->map);
1580 		RING_GET(rq->ring, 1);
1581                 rq->pending--;
1582 
1583                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1584                 pd->mbuf->m_len = frag_len;
1585 
1586                 if (tail != NULL) {
1587                         /* additional fragments */
1588                         pd->mbuf->m_flags &= ~M_PKTHDR;
1589                         tail->m_next = pd->mbuf;
1590 			if(rq->islro)
1591                         	tail->m_nextpkt = NULL;
1592                         tail = pd->mbuf;
1593                 } else {
1594                         /* first fragment, fill out much of the packet header */
1595                         pd->mbuf->m_pkthdr.len = len;
1596 			if(rq->islro)
1597                         	pd->mbuf->m_nextpkt = NULL;
1598                         pd->mbuf->m_pkthdr.csum_flags = 0;
1599                         if (IF_CSUM_ENABLED(sc)) {
1600                                 if (cqe_info->l4_cksum_pass) {
1601                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1602                                                 pd->mbuf->m_pkthdr.csum_flags |=
1603                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1604                                         }else { /* IPV6 frame */
1605 						if(rq->islro) {
1606                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1607                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1608 						}
1609                                         }
1610                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1611                                 }
1612                                 if (cqe_info->ip_cksum_pass) {
1613                                         pd->mbuf->m_pkthdr.csum_flags |=
1614                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1615                                 }
1616                         }
1617                         *m = tail = pd->mbuf;
1618                }
1619                 pd->mbuf = NULL;
1620                 len -= frag_len;
1621         }
1622 
1623         return;
1624 }
1625 
1626 static void
1627 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1628 {
1629         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1630         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1631         struct mbuf *m = NULL;
1632 	struct oce_common_cqe_info cq_info;
1633 
1634 	/* parse cqe */
1635         if(cqe2 == NULL) {
1636                 cq_info.pkt_size =  cqe->pkt_size;
1637                 cq_info.vtag = cqe->vlan_tag;
1638                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1639                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1640                 cq_info.ipv6_frame = cqe->ipv6_frame;
1641                 cq_info.vtp = cqe->vtp;
1642                 cq_info.qnq = cqe->qnq;
1643         }else {
1644                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1645                 cq_info.pkt_size =  cqe2->coalesced_size;
1646                 cq_info.vtag = cqe2->vlan_tag;
1647                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1648                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1649                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1650                 cq_info.vtp = cqe2->vtp;
1651                 cq_info.qnq = cqe1->qnq;
1652         }
1653 
1654 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1655 
1656         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1657         if(cq_info.pkt_size % rq->cfg.frag_size)
1658                 cq_info.num_frags++;
1659 
1660 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1661 
1662 	if (m) {
1663 		if(cqe2) {
1664 			//assert(cqe2->valid != 0);
1665 
1666 			//assert(cqe2->cqe_type != 2);
1667 			oce_correct_header(m, cqe1, cqe2);
1668 		}
1669 
1670 		m->m_pkthdr.rcvif = sc->ifp;
1671 		if (rq->queue_index)
1672 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1673 		else
1674 			m->m_pkthdr.flowid = rq->queue_index;
1675 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1676 
1677 		/* This deternies if vlan tag is Valid */
1678 		if (cq_info.vtp) {
1679 			if (sc->function_mode & FNM_FLEX10_MODE) {
1680 				/* FLEX10. If QnQ is not set, neglect VLAN */
1681 				if (cq_info.qnq) {
1682 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1683 					m->m_flags |= M_VLANTAG;
1684 				}
1685 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1686 				/* In UMC mode generally pvid will be striped by
1687 				   hw. But in some cases we have seen it comes
1688 				   with pvid. So if pvid == vlan, neglect vlan.
1689 				 */
1690 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1691 				m->m_flags |= M_VLANTAG;
1692 			}
1693 		}
1694 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1695 
1696 		(*sc->ifp->if_input) (sc->ifp, m);
1697 
1698 		/* Update rx stats per queue */
1699 		rq->rx_stats.rx_pkts++;
1700 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1701 		rq->rx_stats.rx_frags += cq_info.num_frags;
1702 		rq->rx_stats.rx_ucast_pkts++;
1703 	}
1704         return;
1705 }
1706 
1707 static void
1708 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1709 {
1710 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1711 	int len;
1712 	struct mbuf *m = NULL;
1713 	struct oce_common_cqe_info cq_info;
1714 	uint16_t vtag = 0;
1715 
1716 	/* Is it a flush compl that has no data */
1717 	if(!cqe->u0.s.num_fragments)
1718 		goto exit;
1719 
1720 	len = cqe->u0.s.pkt_size;
1721 	if (!len) {
1722 		/*partial DMA workaround for Lancer*/
1723 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1724 		goto exit;
1725 	}
1726 
1727 	if (!oce_cqe_portid_valid(sc, cqe)) {
1728 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1729 		goto exit;
1730 	}
1731 
1732 	 /* Get vlan_tag value */
1733 	if(IS_BE(sc) || IS_SH(sc))
1734 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1735 	else
1736 		vtag = cqe->u0.s.vlan_tag;
1737 
1738 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1739 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1740 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1741 	cq_info.num_frags = cqe->u0.s.num_fragments;
1742 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1743 
1744 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1745 
1746 	if (m) {
1747 		m->m_pkthdr.rcvif = sc->ifp;
1748 		if (rq->queue_index)
1749 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1750 		else
1751 			m->m_pkthdr.flowid = rq->queue_index;
1752 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1753 
1754 		/* This deternies if vlan tag is Valid */
1755 		if (oce_cqe_vtp_valid(sc, cqe)) {
1756 			if (sc->function_mode & FNM_FLEX10_MODE) {
1757 				/* FLEX10. If QnQ is not set, neglect VLAN */
1758 				if (cqe->u0.s.qnq) {
1759 					m->m_pkthdr.ether_vtag = vtag;
1760 					m->m_flags |= M_VLANTAG;
1761 				}
1762 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1763 				/* In UMC mode generally pvid will be striped by
1764 				   hw. But in some cases we have seen it comes
1765 				   with pvid. So if pvid == vlan, neglect vlan.
1766 				*/
1767 				m->m_pkthdr.ether_vtag = vtag;
1768 				m->m_flags |= M_VLANTAG;
1769 			}
1770 		}
1771 
1772 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1773 #if defined(INET6) || defined(INET)
1774 		/* Try to queue to LRO */
1775 		if (IF_LRO_ENABLED(sc) &&
1776 		    (cqe->u0.s.ip_cksum_pass) &&
1777 		    (cqe->u0.s.l4_cksum_pass) &&
1778 		    (!cqe->u0.s.ip_ver)       &&
1779 		    (rq->lro.lro_cnt != 0)) {
1780 
1781 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1782 				rq->lro_pkts_queued ++;
1783 				goto post_done;
1784 			}
1785 			/* If LRO posting fails then try to post to STACK */
1786 		}
1787 #endif
1788 
1789 		(*sc->ifp->if_input) (sc->ifp, m);
1790 #if defined(INET6) || defined(INET)
1791 post_done:
1792 #endif
1793 		/* Update rx stats per queue */
1794 		rq->rx_stats.rx_pkts++;
1795 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1796 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1797 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1798 			rq->rx_stats.rx_mcast_pkts++;
1799 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1800 			rq->rx_stats.rx_ucast_pkts++;
1801 	}
1802 exit:
1803 	return;
1804 }
1805 
1806 
1807 void
1808 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1809 {
1810 	uint32_t i = 0;
1811 	struct oce_packet_desc *pd;
1812 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1813 
1814 	for (i = 0; i < num_frags; i++) {
1815                 if (rq->ring->cidx == rq->ring->pidx) {
1816                         device_printf(sc->dev,
1817                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1818                         return;
1819                 }
1820                 pd = &rq->pckts[rq->ring->cidx];
1821                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1822                 bus_dmamap_unload(rq->tag, pd->map);
1823                 if (pd->mbuf != NULL) {
1824                         m_freem(pd->mbuf);
1825                         pd->mbuf = NULL;
1826                 }
1827 
1828 		RING_GET(rq->ring, 1);
1829                 rq->pending--;
1830 	}
1831 }
1832 
1833 
1834 static int
1835 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1836 {
1837 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1838 	int vtp = 0;
1839 
1840 	if (sc->be3_native) {
1841 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1842 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1843 	} else
1844 		vtp = cqe->u0.s.vlan_tag_present;
1845 
1846 	return vtp;
1847 
1848 }
1849 
1850 
1851 static int
1852 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1853 {
1854 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1855 	int port_id = 0;
1856 
1857 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1858 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1859 		port_id =  cqe_v1->u0.s.port;
1860 		if (sc->port_id != port_id)
1861 			return 0;
1862 	} else
1863 		;/* For BE3 legacy and Lancer this is dummy */
1864 
1865 	return 1;
1866 
1867 }
1868 
1869 #if defined(INET6) || defined(INET)
1870 void
1871 oce_rx_flush_lro(struct oce_rq *rq)
1872 {
1873 	struct lro_ctrl	*lro = &rq->lro;
1874 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1875 
1876 	if (!IF_LRO_ENABLED(sc))
1877 		return;
1878 
1879 	tcp_lro_flush_all(lro);
1880 	rq->lro_pkts_queued = 0;
1881 
1882 	return;
1883 }
1884 
1885 
1886 static int
1887 oce_init_lro(POCE_SOFTC sc)
1888 {
1889 	struct lro_ctrl *lro = NULL;
1890 	int i = 0, rc = 0;
1891 
1892 	for (i = 0; i < sc->nrqs; i++) {
1893 		lro = &sc->rq[i]->lro;
1894 		rc = tcp_lro_init(lro);
1895 		if (rc != 0) {
1896 			device_printf(sc->dev, "LRO init failed\n");
1897 			return rc;
1898 		}
1899 		lro->ifp = sc->ifp;
1900 	}
1901 
1902 	return rc;
1903 }
1904 
1905 
1906 void
1907 oce_free_lro(POCE_SOFTC sc)
1908 {
1909 	struct lro_ctrl *lro = NULL;
1910 	int i = 0;
1911 
1912 	for (i = 0; i < sc->nrqs; i++) {
1913 		lro = &sc->rq[i]->lro;
1914 		if (lro)
1915 			tcp_lro_free(lro);
1916 	}
1917 }
1918 #endif
1919 
1920 int
1921 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1922 {
1923 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1924 	int i, in, rc;
1925 	struct oce_packet_desc *pd;
1926 	bus_dma_segment_t segs[6];
1927 	int nsegs, added = 0;
1928 	struct oce_nic_rqe *rqe;
1929 	pd_rxulp_db_t rxdb_reg;
1930 	uint32_t val = 0;
1931 	uint32_t oce_max_rq_posts = 64;
1932 
1933 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1934 	for (i = 0; i < count; i++) {
1935 		in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1936 
1937 		pd = &rq->pckts[rq->ring->pidx];
1938 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1939 		if (pd->mbuf == NULL) {
1940 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1941 			break;
1942 		}
1943 		pd->mbuf->m_nextpkt = NULL;
1944 
1945 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1946 
1947 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1948 					     pd->map,
1949 					     pd->mbuf,
1950 					     segs, &nsegs, BUS_DMA_NOWAIT);
1951 		if (rc) {
1952 			m_free(pd->mbuf);
1953 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1954 			break;
1955 		}
1956 
1957 		if (nsegs != 1) {
1958 			i--;
1959 			continue;
1960 		}
1961 
1962 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1963 
1964 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1965 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1966 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1967 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1968 		RING_PUT(rq->ring, 1);
1969 		added++;
1970 		rq->pending++;
1971 	}
1972 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1973 	if (added != 0) {
1974 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1975 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1976 			rxdb_reg.bits.qid = rq->rq_id;
1977 			if(rq->islro) {
1978                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1979                                 val |= oce_max_rq_posts << 16;
1980                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1981 			}else {
1982 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1983 			}
1984 			added -= oce_max_rq_posts;
1985 		}
1986 		if (added > 0) {
1987 			rxdb_reg.bits.qid = rq->rq_id;
1988 			rxdb_reg.bits.num_posted = added;
1989 			if(rq->islro) {
1990                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1991                                 val |= added << 16;
1992                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1993 			}else {
1994 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1995 			}
1996 		}
1997 	}
1998 
1999 	return 0;
2000 }
2001 
2002 static void
2003 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
2004 {
2005         if (num_cqes) {
2006                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
2007 		if(!sc->enable_hwlro) {
2008 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
2009 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
2010 		}else {
2011                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
2012                         	oce_alloc_rx_bufs(rq, 64);
2013         	}
2014 	}
2015 
2016         return;
2017 }
2018 
2019 uint16_t
2020 oce_rq_handler_lro(void *arg)
2021 {
2022         struct oce_rq *rq = (struct oce_rq *)arg;
2023         struct oce_cq *cq = rq->cq;
2024         POCE_SOFTC sc = rq->parent;
2025         struct nic_hwlro_singleton_cqe *cqe;
2026         struct nic_hwlro_cqe_part2 *cqe2;
2027         int num_cqes = 0;
2028 
2029 	LOCK(&rq->rx_lock);
2030         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2031         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2032         while (cqe->valid) {
2033                 if(cqe->cqe_type == 0) { /* singleton cqe */
2034 			/* we should not get singleton cqe after cqe1 on same rq */
2035 			if(rq->cqe_firstpart != NULL) {
2036 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
2037 				goto exit_rq_handler_lro;
2038 			}
2039                         if(cqe->error != 0) {
2040                                 rq->rx_stats.rxcp_err++;
2041 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2042                         }
2043                         oce_rx_lro(rq, cqe, NULL);
2044                         rq->rx_stats.rx_compl++;
2045                         cqe->valid = 0;
2046                         RING_GET(cq->ring, 1);
2047                         num_cqes++;
2048                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2049                                 break;
2050                 }else if(cqe->cqe_type == 0x1) { /* first part */
2051 			/* we should not get cqe1 after cqe1 on same rq */
2052 			if(rq->cqe_firstpart != NULL) {
2053 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
2054 				goto exit_rq_handler_lro;
2055 			}
2056 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2057                         RING_GET(cq->ring, 1);
2058                 }else if(cqe->cqe_type == 0x2) { /* second part */
2059 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2060                         if(cqe2->error != 0) {
2061                                 rq->rx_stats.rxcp_err++;
2062 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2063                         }
2064 			/* We should not get cqe2 without cqe1 */
2065 			if(rq->cqe_firstpart == NULL) {
2066 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2067 				goto exit_rq_handler_lro;
2068 			}
2069                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2070 
2071                         rq->rx_stats.rx_compl++;
2072                         rq->cqe_firstpart->valid = 0;
2073                         cqe2->valid = 0;
2074 			rq->cqe_firstpart = NULL;
2075 
2076                         RING_GET(cq->ring, 1);
2077                         num_cqes += 2;
2078                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2079                                 break;
2080 		}
2081 
2082                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2083                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2084         }
2085 	oce_check_rx_bufs(sc, num_cqes, rq);
2086 exit_rq_handler_lro:
2087 	UNLOCK(&rq->rx_lock);
2088 	return 0;
2089 }
2090 
2091 /* Handle the Completion Queue for receive */
2092 uint16_t
2093 oce_rq_handler(void *arg)
2094 {
2095 	struct oce_rq *rq = (struct oce_rq *)arg;
2096 	struct oce_cq *cq = rq->cq;
2097 	POCE_SOFTC sc = rq->parent;
2098 	struct oce_nic_rx_cqe *cqe;
2099 	int num_cqes = 0;
2100 
2101 	if(rq->islro) {
2102 		oce_rq_handler_lro(arg);
2103 		return 0;
2104 	}
2105 	LOCK(&rq->rx_lock);
2106 	bus_dmamap_sync(cq->ring->dma.tag,
2107 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2108 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2109 	while (cqe->u0.dw[2]) {
2110 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2111 
2112 		if (cqe->u0.s.error == 0) {
2113 			oce_rx(rq, cqe);
2114 		} else {
2115 			rq->rx_stats.rxcp_err++;
2116 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2117 			/* Post L3/L4 errors to stack.*/
2118 			oce_rx(rq, cqe);
2119 		}
2120 		rq->rx_stats.rx_compl++;
2121 		cqe->u0.dw[2] = 0;
2122 
2123 #if defined(INET6) || defined(INET)
2124 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2125 			oce_rx_flush_lro(rq);
2126 		}
2127 #endif
2128 
2129 		RING_GET(cq->ring, 1);
2130 		bus_dmamap_sync(cq->ring->dma.tag,
2131 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2132 		cqe =
2133 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2134 		num_cqes++;
2135 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2136 			break;
2137 	}
2138 
2139 #if defined(INET6) || defined(INET)
2140         if (IF_LRO_ENABLED(sc))
2141                 oce_rx_flush_lro(rq);
2142 #endif
2143 
2144 	oce_check_rx_bufs(sc, num_cqes, rq);
2145 	UNLOCK(&rq->rx_lock);
2146 	return 0;
2147 
2148 }
2149 
2150 
2151 
2152 
2153 /*****************************************************************************
2154  *		   Helper function prototypes in this file 		     *
2155  *****************************************************************************/
2156 
2157 static int
2158 oce_attach_ifp(POCE_SOFTC sc)
2159 {
2160 
2161 	sc->ifp = if_alloc(IFT_ETHER);
2162 	if (!sc->ifp)
2163 		return ENOMEM;
2164 
2165 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2166 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2167 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2168 
2169 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
2170 	sc->ifp->if_ioctl = oce_ioctl;
2171 	sc->ifp->if_start = oce_start;
2172 	sc->ifp->if_init = oce_init;
2173 	sc->ifp->if_mtu = ETHERMTU;
2174 	sc->ifp->if_softc = sc;
2175 	sc->ifp->if_transmit = oce_multiq_start;
2176 	sc->ifp->if_qflush = oce_multiq_flush;
2177 
2178 	if_initname(sc->ifp,
2179 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2180 
2181 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2182 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2183 	IFQ_SET_READY(&sc->ifp->if_snd);
2184 
2185 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2186 	sc->ifp->if_hwassist |= CSUM_TSO;
2187 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2188 
2189 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2190 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2191 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2192 
2193 #if defined(INET6) || defined(INET)
2194 	sc->ifp->if_capabilities |= IFCAP_TSO;
2195 	sc->ifp->if_capabilities |= IFCAP_LRO;
2196 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2197 #endif
2198 
2199 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2200 	sc->ifp->if_baudrate = IF_Gbps(10);
2201 
2202 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2203 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2204 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2205 
2206 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2207 
2208 	return 0;
2209 }
2210 
2211 
2212 static void
2213 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2214 {
2215 	POCE_SOFTC sc = ifp->if_softc;
2216 
2217 	if (ifp->if_softc !=  arg)
2218 		return;
2219 	if ((vtag == 0) || (vtag > 4095))
2220 		return;
2221 
2222 	sc->vlan_tag[vtag] = 1;
2223 	sc->vlans_added++;
2224 	if (sc->vlans_added <= (sc->max_vlans + 1))
2225 		oce_vid_config(sc);
2226 }
2227 
2228 
2229 static void
2230 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2231 {
2232 	POCE_SOFTC sc = ifp->if_softc;
2233 
2234 	if (ifp->if_softc !=  arg)
2235 		return;
2236 	if ((vtag == 0) || (vtag > 4095))
2237 		return;
2238 
2239 	sc->vlan_tag[vtag] = 0;
2240 	sc->vlans_added--;
2241 	oce_vid_config(sc);
2242 }
2243 
2244 
2245 /*
2246  * A max of 64 vlans can be configured in BE. If the user configures
2247  * more, place the card in vlan promiscuous mode.
2248  */
2249 static int
2250 oce_vid_config(POCE_SOFTC sc)
2251 {
2252 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2253 	uint16_t ntags = 0, i;
2254 	int status = 0;
2255 
2256 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2257 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2258 		for (i = 0; i < MAX_VLANS; i++) {
2259 			if (sc->vlan_tag[i]) {
2260 				vtags[ntags].vtag = i;
2261 				ntags++;
2262 			}
2263 		}
2264 		if (ntags)
2265 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2266 						vtags, ntags, 1, 0);
2267 	} else
2268 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2269 					 	NULL, 0, 1, 1);
2270 	return status;
2271 }
2272 
2273 
2274 static void
2275 oce_mac_addr_set(POCE_SOFTC sc)
2276 {
2277 	uint32_t old_pmac_id = sc->pmac_id;
2278 	int status = 0;
2279 
2280 
2281 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2282 			 sc->macaddr.size_of_struct);
2283 	if (!status)
2284 		return;
2285 
2286 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2287 					sc->if_id, &sc->pmac_id);
2288 	if (!status) {
2289 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2290 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2291 				 sc->macaddr.size_of_struct);
2292 	}
2293 	if (status)
2294 		device_printf(sc->dev, "Failed update macaddress\n");
2295 
2296 }
2297 
2298 
2299 static int
2300 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2301 {
2302 	POCE_SOFTC sc = ifp->if_softc;
2303 	struct ifreq *ifr = (struct ifreq *)data;
2304 	int rc = ENXIO;
2305 	char cookie[32] = {0};
2306 	void *priv_data = ifr_data_get_ptr(ifr);
2307 	void *ioctl_ptr;
2308 	uint32_t req_size;
2309 	struct mbx_hdr req;
2310 	OCE_DMA_MEM dma_mem;
2311 	struct mbx_common_get_cntl_attr *fw_cmd;
2312 
2313 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2314 		return EFAULT;
2315 
2316 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2317 		return EINVAL;
2318 
2319 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2320 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2321 		return EFAULT;
2322 
2323 	req_size = le32toh(req.u0.req.request_length);
2324 	if (req_size > 65536)
2325 		return EINVAL;
2326 
2327 	req_size += sizeof(struct mbx_hdr);
2328 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2329 	if (rc)
2330 		return ENOMEM;
2331 
2332 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2333 		rc = EFAULT;
2334 		goto dma_free;
2335 	}
2336 
2337 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2338 	if (rc) {
2339 		rc = EIO;
2340 		goto dma_free;
2341 	}
2342 
2343 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2344 		rc =  EFAULT;
2345 
2346 	/*
2347 	   firmware is filling all the attributes for this ioctl except
2348 	   the driver version..so fill it
2349 	 */
2350 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2351 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2352 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2353 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2354 	}
2355 
2356 dma_free:
2357 	oce_dma_free(sc, &dma_mem);
2358 	return rc;
2359 
2360 }
2361 
2362 static void
2363 oce_eqd_set_periodic(POCE_SOFTC sc)
2364 {
2365 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2366 	struct oce_aic_obj *aic;
2367 	struct oce_eq *eqo;
2368 	uint64_t now = 0, delta;
2369 	int eqd, i, num = 0;
2370 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2371 	struct oce_wq *wq;
2372 	struct oce_rq *rq;
2373 
2374 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2375 
2376 	for (i = 0 ; i < sc->neqs; i++) {
2377 		eqo = sc->eq[i];
2378 		aic = &sc->aic_obj[i];
2379 		/* When setting the static eq delay from the user space */
2380 		if (!aic->enable) {
2381 			if (aic->ticks)
2382 				aic->ticks = 0;
2383 			eqd = aic->et_eqd;
2384 			goto modify_eqd;
2385 		}
2386 
2387 		if (i == 0) {
2388 			rq = sc->rq[0];
2389 			rxpkts = rq->rx_stats.rx_pkts;
2390 		} else
2391 			rxpkts = 0;
2392 		if (i + 1 < sc->nrqs) {
2393 			rq = sc->rq[i + 1];
2394 			rxpkts += rq->rx_stats.rx_pkts;
2395 		}
2396 		if (i < sc->nwqs) {
2397 			wq = sc->wq[i];
2398 			tx_reqs = wq->tx_stats.tx_reqs;
2399 		} else
2400 			tx_reqs = 0;
2401 		now = ticks;
2402 
2403 		if (!aic->ticks || now < aic->ticks ||
2404 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2405 			aic->prev_rxpkts = rxpkts;
2406 			aic->prev_txreqs = tx_reqs;
2407 			aic->ticks = now;
2408 			continue;
2409 		}
2410 
2411 		delta = ticks_to_msecs(now - aic->ticks);
2412 
2413 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2414 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2415 		eqd = (pps / 15000) << 2;
2416 		if (eqd < 8)
2417 			eqd = 0;
2418 
2419 		/* Make sure that the eq delay is in the known range */
2420 		eqd = min(eqd, aic->max_eqd);
2421 		eqd = max(eqd, aic->min_eqd);
2422 
2423 		aic->prev_rxpkts = rxpkts;
2424 		aic->prev_txreqs = tx_reqs;
2425 		aic->ticks = now;
2426 
2427 modify_eqd:
2428 		if (eqd != aic->cur_eqd) {
2429 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2430 			set_eqd[num].eq_id = eqo->eq_id;
2431 			aic->cur_eqd = eqd;
2432 			num++;
2433 		}
2434 	}
2435 
2436 	/* Is there atleast one eq that needs to be modified? */
2437         for(i = 0; i < num; i += 8) {
2438                 if((num - i) >=8 )
2439                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2440                 else
2441                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2442         }
2443 
2444 }
2445 
2446 static void oce_detect_hw_error(POCE_SOFTC sc)
2447 {
2448 
2449 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2450 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2451 	uint32_t i;
2452 
2453 	if (sc->hw_error)
2454 		return;
2455 
2456 	if (IS_XE201(sc)) {
2457 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2458 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2459 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2460 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2461 		}
2462 	} else {
2463 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2464 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2465 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2466 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2467 
2468 		ue_low = (ue_low & ~ue_low_mask);
2469 		ue_high = (ue_high & ~ue_high_mask);
2470 	}
2471 
2472 	/* On certain platforms BE hardware can indicate spurious UEs.
2473 	 * Allow the h/w to stop working completely in case of a real UE.
2474 	 * Hence not setting the hw_error for UE detection.
2475 	 */
2476 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2477 		sc->hw_error = TRUE;
2478 		device_printf(sc->dev, "Error detected in the card\n");
2479 	}
2480 
2481 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2482 		device_printf(sc->dev,
2483 				"ERR: sliport status 0x%x\n", sliport_status);
2484 		device_printf(sc->dev,
2485 				"ERR: sliport error1 0x%x\n", sliport_err1);
2486 		device_printf(sc->dev,
2487 				"ERR: sliport error2 0x%x\n", sliport_err2);
2488 	}
2489 
2490 	if (ue_low) {
2491 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2492 			if (ue_low & 1)
2493 				device_printf(sc->dev, "UE: %s bit set\n",
2494 							ue_status_low_desc[i]);
2495 		}
2496 	}
2497 
2498 	if (ue_high) {
2499 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2500 			if (ue_high & 1)
2501 				device_printf(sc->dev, "UE: %s bit set\n",
2502 							ue_status_hi_desc[i]);
2503 		}
2504 	}
2505 
2506 }
2507 
2508 
2509 static void
2510 oce_local_timer(void *arg)
2511 {
2512 	POCE_SOFTC sc = arg;
2513 	int i = 0;
2514 
2515 	oce_detect_hw_error(sc);
2516 	oce_refresh_nic_stats(sc);
2517 	oce_refresh_queue_stats(sc);
2518 	oce_mac_addr_set(sc);
2519 
2520 	/* TX Watch Dog*/
2521 	for (i = 0; i < sc->nwqs; i++)
2522 		oce_tx_restart(sc, sc->wq[i]);
2523 
2524 	/* calculate and set the eq delay for optimal interrupt rate */
2525 	if (IS_BE(sc) || IS_SH(sc))
2526 		oce_eqd_set_periodic(sc);
2527 
2528 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2529 }
2530 
2531 static void
2532 oce_tx_compl_clean(POCE_SOFTC sc)
2533 {
2534 	struct oce_wq *wq;
2535 	int i = 0, timeo = 0, num_wqes = 0;
2536 	int pending_txqs = sc->nwqs;
2537 
2538 	/* Stop polling for compls when HW has been silent for 10ms or
2539 	 * hw_error or no outstanding completions expected
2540 	 */
2541 	do {
2542 		pending_txqs = sc->nwqs;
2543 
2544 		for_all_wq_queues(sc, wq, i) {
2545 			num_wqes = oce_wq_handler(wq);
2546 
2547 			if(num_wqes)
2548 				timeo = 0;
2549 
2550 			if(!wq->ring->num_used)
2551 				pending_txqs--;
2552 		}
2553 
2554 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2555 			break;
2556 
2557 		DELAY(1000);
2558 	} while (TRUE);
2559 
2560 	for_all_wq_queues(sc, wq, i) {
2561 		while(wq->ring->num_used) {
2562 			LOCK(&wq->tx_compl_lock);
2563 			oce_process_tx_completion(wq);
2564 			UNLOCK(&wq->tx_compl_lock);
2565 		}
2566 	}
2567 
2568 }
2569 
2570 /* NOTE : This should only be called holding
2571  *        DEVICE_LOCK.
2572  */
2573 static void
2574 oce_if_deactivate(POCE_SOFTC sc)
2575 {
2576 	int i;
2577 	struct oce_rq *rq;
2578 	struct oce_wq *wq;
2579 	struct oce_eq *eq;
2580 
2581 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2582 
2583 	oce_tx_compl_clean(sc);
2584 
2585 	/* Stop intrs and finish any bottom halves pending */
2586 	oce_hw_intr_disable(sc);
2587 
2588 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2589 	   any other lock. So unlock device lock and require after
2590 	   completing taskqueue_drain.
2591 	*/
2592 	UNLOCK(&sc->dev_lock);
2593 	for (i = 0; i < sc->intr_count; i++) {
2594 		if (sc->intrs[i].tq != NULL) {
2595 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2596 		}
2597 	}
2598 	LOCK(&sc->dev_lock);
2599 
2600 	/* Delete RX queue in card with flush param */
2601 	oce_stop_rx(sc);
2602 
2603 	/* Invalidate any pending cq and eq entries*/
2604 	for_all_evnt_queues(sc, eq, i)
2605 		oce_drain_eq(eq);
2606 	for_all_rq_queues(sc, rq, i)
2607 		oce_drain_rq_cq(rq);
2608 	for_all_wq_queues(sc, wq, i)
2609 		oce_drain_wq_cq(wq);
2610 
2611 	/* But still we need to get MCC aync events.
2612 	   So enable intrs and also arm first EQ
2613 	*/
2614 	oce_hw_intr_enable(sc);
2615 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2616 
2617 	DELAY(10);
2618 }
2619 
2620 
2621 static void
2622 oce_if_activate(POCE_SOFTC sc)
2623 {
2624 	struct oce_eq *eq;
2625 	struct oce_rq *rq;
2626 	struct oce_wq *wq;
2627 	int i, rc = 0;
2628 
2629 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2630 
2631 	oce_hw_intr_disable(sc);
2632 
2633 	oce_start_rx(sc);
2634 
2635 	for_all_rq_queues(sc, rq, i) {
2636 		rc = oce_start_rq(rq);
2637 		if (rc)
2638 			device_printf(sc->dev, "Unable to start RX\n");
2639 	}
2640 
2641 	for_all_wq_queues(sc, wq, i) {
2642 		rc = oce_start_wq(wq);
2643 		if (rc)
2644 			device_printf(sc->dev, "Unable to start TX\n");
2645 	}
2646 
2647 
2648 	for_all_evnt_queues(sc, eq, i)
2649 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2650 
2651 	oce_hw_intr_enable(sc);
2652 
2653 }
2654 
2655 static void
2656 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2657 {
2658 	/* Update Link status */
2659 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2660 	     ASYNC_EVENT_LINK_UP) {
2661 		sc->link_status = ASYNC_EVENT_LINK_UP;
2662 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2663 	} else {
2664 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2665 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2666 	}
2667 }
2668 
2669 
2670 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2671 					 struct oce_async_evt_grp5_os2bmc *evt)
2672 {
2673 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2674 	if (evt->u.s.mgmt_enable)
2675 		sc->flags |= OCE_FLAGS_OS2BMC;
2676 	else
2677 		return;
2678 
2679 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2680 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2681 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2682 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2683 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2684 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2685 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2686 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2687 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2688 }
2689 
2690 
2691 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2692 {
2693 	struct oce_async_event_grp5_pvid_state *gcqe;
2694 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2695 
2696 	switch (cqe->u0.s.async_type) {
2697 	case ASYNC_EVENT_PVID_STATE:
2698 		/* GRP5 PVID */
2699 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2700 		if (gcqe->enabled)
2701 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2702 		else
2703 			sc->pvid = 0;
2704 		break;
2705 	case ASYNC_EVENT_OS2BMC:
2706 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2707 		oce_async_grp5_osbmc_process(sc, bmccqe);
2708 		break;
2709 	default:
2710 		break;
2711 	}
2712 }
2713 
2714 /* Handle the Completion Queue for the Mailbox/Async notifications */
2715 uint16_t
2716 oce_mq_handler(void *arg)
2717 {
2718 	struct oce_mq *mq = (struct oce_mq *)arg;
2719 	POCE_SOFTC sc = mq->parent;
2720 	struct oce_cq *cq = mq->cq;
2721 	int num_cqes = 0, evt_type = 0, optype = 0;
2722 	struct oce_mq_cqe *cqe;
2723 	struct oce_async_cqe_link_state *acqe;
2724 	struct oce_async_event_qnq *dbgcqe;
2725 
2726 
2727 	bus_dmamap_sync(cq->ring->dma.tag,
2728 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2729 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2730 
2731 	while (cqe->u0.dw[3]) {
2732 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2733 		if (cqe->u0.s.async_event) {
2734 			evt_type = cqe->u0.s.event_type;
2735 			optype = cqe->u0.s.async_type;
2736 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2737 				/* Link status evt */
2738 				acqe = (struct oce_async_cqe_link_state *)cqe;
2739 				process_link_state(sc, acqe);
2740 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2741 				oce_process_grp5_events(sc, cqe);
2742 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2743 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2744 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2745 				if(dbgcqe->valid)
2746 					sc->qnqid = dbgcqe->vlan_tag;
2747 				sc->qnq_debug_event = TRUE;
2748 			}
2749 		}
2750 		cqe->u0.dw[3] = 0;
2751 		RING_GET(cq->ring, 1);
2752 		bus_dmamap_sync(cq->ring->dma.tag,
2753 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2754 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2755 		num_cqes++;
2756 	}
2757 
2758 	if (num_cqes)
2759 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2760 
2761 	return 0;
2762 }
2763 
2764 
2765 static void
2766 setup_max_queues_want(POCE_SOFTC sc)
2767 {
2768 	/* Check if it is FLEX machine. Is so dont use RSS */
2769 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2770 	    (sc->function_mode & FNM_UMC_MODE)    ||
2771 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2772 	    (!is_rss_enabled(sc))		  ||
2773 	    IS_BE2(sc)) {
2774 		sc->nrqs = 1;
2775 		sc->nwqs = 1;
2776 	} else {
2777 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2778 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2779 	}
2780 
2781 	if (IS_BE2(sc) && is_rss_enabled(sc))
2782 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2783 }
2784 
2785 
2786 static void
2787 update_queues_got(POCE_SOFTC sc)
2788 {
2789 	if (is_rss_enabled(sc)) {
2790 		sc->nrqs = sc->intr_count + 1;
2791 		sc->nwqs = sc->intr_count;
2792 	} else {
2793 		sc->nrqs = 1;
2794 		sc->nwqs = 1;
2795 	}
2796 
2797 	if (IS_BE2(sc))
2798 		sc->nwqs = 1;
2799 }
2800 
2801 static int
2802 oce_check_ipv6_ext_hdr(struct mbuf *m)
2803 {
2804 	struct ether_header *eh = mtod(m, struct ether_header *);
2805 	caddr_t m_datatemp = m->m_data;
2806 
2807 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2808 		m->m_data += sizeof(struct ether_header);
2809 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2810 
2811 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2812 				(ip6->ip6_nxt != IPPROTO_UDP)){
2813 			struct ip6_ext *ip6e = NULL;
2814 			m->m_data += sizeof(struct ip6_hdr);
2815 
2816 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2817 			if(ip6e->ip6e_len == 0xff) {
2818 				m->m_data = m_datatemp;
2819 				return TRUE;
2820 			}
2821 		}
2822 		m->m_data = m_datatemp;
2823 	}
2824 	return FALSE;
2825 }
2826 
2827 static int
2828 is_be3_a1(POCE_SOFTC sc)
2829 {
2830 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2831 		return TRUE;
2832 	}
2833 	return FALSE;
2834 }
2835 
2836 static struct mbuf *
2837 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2838 {
2839 	uint16_t vlan_tag = 0;
2840 
2841 	if(!M_WRITABLE(m))
2842 		return NULL;
2843 
2844 	/* Embed vlan tag in the packet if it is not part of it */
2845 	if(m->m_flags & M_VLANTAG) {
2846 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2847 		m->m_flags &= ~M_VLANTAG;
2848 	}
2849 
2850 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2851 	if(sc->pvid) {
2852 		if(!vlan_tag)
2853 			vlan_tag = sc->pvid;
2854 		if (complete)
2855 			*complete = FALSE;
2856 	}
2857 
2858 	if(vlan_tag) {
2859 		m = ether_vlanencap(m, vlan_tag);
2860 	}
2861 
2862 	if(sc->qnqid) {
2863 		m = ether_vlanencap(m, sc->qnqid);
2864 
2865 		if (complete)
2866 			*complete = FALSE;
2867 	}
2868 	return m;
2869 }
2870 
2871 static int
2872 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2873 {
2874 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2875 			oce_check_ipv6_ext_hdr(m)) {
2876 		return TRUE;
2877 	}
2878 	return FALSE;
2879 }
2880 
2881 static void
2882 oce_get_config(POCE_SOFTC sc)
2883 {
2884 	int rc = 0;
2885 	uint32_t max_rss = 0;
2886 
2887 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2888 		max_rss = OCE_LEGACY_MODE_RSS;
2889 	else
2890 		max_rss = OCE_MAX_RSS;
2891 
2892 	if (!IS_BE(sc)) {
2893 		rc = oce_get_profile_config(sc, max_rss);
2894 		if (rc) {
2895 			sc->nwqs = OCE_MAX_WQ;
2896 			sc->nrssqs = max_rss;
2897 			sc->nrqs = sc->nrssqs + 1;
2898 		}
2899 	}
2900 	else { /* For BE3 don't rely on fw for determining the resources */
2901 		sc->nrssqs = max_rss;
2902 		sc->nrqs = sc->nrssqs + 1;
2903 		sc->nwqs = OCE_MAX_WQ;
2904 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2905 	}
2906 }
2907 
2908 static void
2909 oce_rdma_close(void)
2910 {
2911   if (oce_rdma_if != NULL) {
2912     oce_rdma_if = NULL;
2913   }
2914 }
2915 
2916 static void
2917 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2918 {
2919   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2920 }
2921 
2922 int
2923 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2924 {
2925   POCE_SOFTC sc;
2926   struct oce_dev_info di;
2927   int i;
2928 
2929   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2930     return -EINVAL;
2931   }
2932 
2933   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2934       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2935     return -ENXIO;
2936   }
2937 
2938   rdma_info->close = oce_rdma_close;
2939   rdma_info->mbox_post = oce_mbox_post;
2940   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2941   rdma_info->get_mac_addr = oce_get_mac_addr;
2942 
2943   oce_rdma_if = rdma_if;
2944 
2945   sc = softc_head;
2946   while (sc != NULL) {
2947     if (oce_rdma_if->announce != NULL) {
2948       memset(&di, 0, sizeof(di));
2949       di.dev = sc->dev;
2950       di.softc = sc;
2951       di.ifp = sc->ifp;
2952       di.db_bhandle = sc->db_bhandle;
2953       di.db_btag = sc->db_btag;
2954       di.db_page_size = 4096;
2955       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2956         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2957       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2958         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2959       } else {
2960         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2961       }
2962       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2963       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2964         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2965         di.msix.start_vector = sc->intr_count;
2966         for (i=0; i<di.msix.num_vectors; i++) {
2967           di.msix.vector_list[i] = sc->intrs[i].vector;
2968         }
2969       } else {
2970       }
2971       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2972       di.vendor_id = pci_get_vendor(sc->dev);
2973       di.dev_id = pci_get_device(sc->dev);
2974 
2975       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2976           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2977       }
2978 
2979       rdma_if->announce(&di);
2980       sc = sc->next;
2981     }
2982   }
2983 
2984   return 0;
2985 }
2986 
2987 static void
2988 oce_read_env_variables( POCE_SOFTC sc )
2989 {
2990 	char *value = NULL;
2991 	int rc = 0;
2992 
2993         /* read if user wants to enable hwlro or swlro */
2994         //value = getenv("oce_enable_hwlro");
2995         if(value && IS_SH(sc)) {
2996                 sc->enable_hwlro = strtol(value, NULL, 10);
2997                 if(sc->enable_hwlro) {
2998                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2999                         if(rc) {
3000                                 device_printf(sc->dev, "no hardware lro support\n");
3001                 		device_printf(sc->dev, "software lro enabled\n");
3002                                 sc->enable_hwlro = 0;
3003                         }else {
3004                                 device_printf(sc->dev, "hardware lro enabled\n");
3005 				oce_max_rsp_handled = 32;
3006                         }
3007                 }else {
3008                         device_printf(sc->dev, "software lro enabled\n");
3009                 }
3010         }else {
3011                 sc->enable_hwlro = 0;
3012         }
3013 
3014         /* read mbuf size */
3015         //value = getenv("oce_rq_buf_size");
3016         if(value && IS_SH(sc)) {
3017                 oce_rq_buf_size = strtol(value, NULL, 10);
3018                 switch(oce_rq_buf_size) {
3019                 case 2048:
3020                 case 4096:
3021                 case 9216:
3022                 case 16384:
3023                         break;
3024 
3025                 default:
3026                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
3027                         oce_rq_buf_size = 2048;
3028                 }
3029         }
3030 
3031 	return;
3032 }
3033