xref: /freebsd/sys/dev/oce/oce_if.c (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 /* Driver entry points prototypes */
136 static int  oce_probe(device_t dev);
137 static int  oce_attach(device_t dev);
138 static int  oce_detach(device_t dev);
139 static int  oce_shutdown(device_t dev);
140 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
141 static void oce_init(void *xsc);
142 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
143 static void oce_multiq_flush(struct ifnet *ifp);
144 
145 /* Driver interrupt routines protypes */
146 static void oce_intr(void *arg, int pending);
147 static int  oce_setup_intr(POCE_SOFTC sc);
148 static int  oce_fast_isr(void *arg);
149 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
150 			  void (*isr) (void *arg, int pending));
151 
152 /* Media callbacks prototypes */
153 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
154 static int  oce_media_change(struct ifnet *ifp);
155 
156 /* Transmit routines prototypes */
157 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
158 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
159 static void oce_process_tx_completion(struct oce_wq *wq);
160 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
161 				 struct oce_wq *wq);
162 
163 /* Receive routines prototypes */
164 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
167 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
168 static uint16_t oce_rq_handler_lro(void *arg);
169 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
172 
173 /* Helper function prototypes in this file */
174 static int  oce_attach_ifp(POCE_SOFTC sc);
175 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
176 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static int  oce_vid_config(POCE_SOFTC sc);
178 static void oce_mac_addr_set(POCE_SOFTC sc);
179 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
180 static void oce_local_timer(void *arg);
181 static void oce_if_deactivate(POCE_SOFTC sc);
182 static void oce_if_activate(POCE_SOFTC sc);
183 static void setup_max_queues_want(POCE_SOFTC sc);
184 static void update_queues_got(POCE_SOFTC sc);
185 static void process_link_state(POCE_SOFTC sc,
186 		 struct oce_async_cqe_link_state *acqe);
187 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
188 static void oce_get_config(POCE_SOFTC sc);
189 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
190 static void oce_read_env_variables(POCE_SOFTC sc);
191 
192 /* IP specific */
193 #if defined(INET6) || defined(INET)
194 static int  oce_init_lro(POCE_SOFTC sc);
195 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
196 #endif
197 
198 static device_method_t oce_dispatch[] = {
199 	DEVMETHOD(device_probe, oce_probe),
200 	DEVMETHOD(device_attach, oce_attach),
201 	DEVMETHOD(device_detach, oce_detach),
202 	DEVMETHOD(device_shutdown, oce_shutdown),
203 
204 	DEVMETHOD_END
205 };
206 
207 static driver_t oce_driver = {
208 	"oce",
209 	oce_dispatch,
210 	sizeof(OCE_SOFTC)
211 };
212 
213 /* global vars */
214 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
215 
216 /* Module capabilites and parameters */
217 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
218 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
219 uint32_t oce_rq_buf_size = 2048;
220 
221 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
222 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
223 
224 /* Supported devices table */
225 static uint32_t supportedDevices[] =  {
226 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
227 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
228 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
229 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
230 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
231 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
232 };
233 
234 DRIVER_MODULE(oce, pci, oce_driver, 0, 0);
235 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
236     nitems(supportedDevices));
237 MODULE_DEPEND(oce, pci, 1, 1, 1);
238 MODULE_DEPEND(oce, ether, 1, 1, 1);
239 MODULE_VERSION(oce, 1);
240 
241 POCE_SOFTC softc_head = NULL;
242 POCE_SOFTC softc_tail = NULL;
243 
244 struct oce_rdma_if *oce_rdma_if = NULL;
245 
246 /*****************************************************************************
247  *			Driver entry points functions                        *
248  *****************************************************************************/
249 
250 static int
251 oce_probe(device_t dev)
252 {
253 	uint16_t vendor = 0;
254 	uint16_t device = 0;
255 	int i = 0;
256 	char str[256] = {0};
257 	POCE_SOFTC sc;
258 
259 	sc = device_get_softc(dev);
260 	bzero(sc, sizeof(OCE_SOFTC));
261 	sc->dev = dev;
262 
263 	vendor = pci_get_vendor(dev);
264 	device = pci_get_device(dev);
265 
266 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
267 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
268 			if (device == (supportedDevices[i] & 0xffff)) {
269 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
270 					component_revision);
271 				device_set_desc_copy(dev, str);
272 
273 				switch (device) {
274 				case PCI_PRODUCT_BE2:
275 					sc->flags |= OCE_FLAGS_BE2;
276 					break;
277 				case PCI_PRODUCT_BE3:
278 					sc->flags |= OCE_FLAGS_BE3;
279 					break;
280 				case PCI_PRODUCT_XE201:
281 				case PCI_PRODUCT_XE201_VF:
282 					sc->flags |= OCE_FLAGS_XE201;
283 					break;
284 				case PCI_PRODUCT_SH:
285 					sc->flags |= OCE_FLAGS_SH;
286 					break;
287 				default:
288 					return ENXIO;
289 				}
290 				return BUS_PROBE_DEFAULT;
291 			}
292 		}
293 	}
294 
295 	return ENXIO;
296 }
297 
298 static int
299 oce_attach(device_t dev)
300 {
301 	POCE_SOFTC sc;
302 	int rc = 0;
303 
304 	sc = device_get_softc(dev);
305 
306 	rc = oce_hw_pci_alloc(sc);
307 	if (rc)
308 		return rc;
309 
310 	sc->tx_ring_size = OCE_TX_RING_SIZE;
311 	sc->rx_ring_size = OCE_RX_RING_SIZE;
312 	/* receive fragment size should be multiple of 2K */
313 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
314 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
315 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
316 
317 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
318 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
319 
320 	/* initialise the hardware */
321 	rc = oce_hw_init(sc);
322 	if (rc)
323 		goto pci_res_free;
324 
325 	oce_read_env_variables(sc);
326 
327 	oce_get_config(sc);
328 
329 	setup_max_queues_want(sc);
330 
331 	rc = oce_setup_intr(sc);
332 	if (rc)
333 		goto mbox_free;
334 
335 	rc = oce_queue_init_all(sc);
336 	if (rc)
337 		goto intr_free;
338 
339 	rc = oce_attach_ifp(sc);
340 	if (rc)
341 		goto queues_free;
342 
343 #if defined(INET6) || defined(INET)
344 	rc = oce_init_lro(sc);
345 	if (rc)
346 		goto ifp_free;
347 #endif
348 
349 	rc = oce_hw_start(sc);
350 	if (rc)
351 		goto lro_free;
352 
353 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
354 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
355 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
356 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
357 
358 	rc = oce_stats_init(sc);
359 	if (rc)
360 		goto vlan_free;
361 
362 	oce_add_sysctls(sc);
363 
364 	callout_init(&sc->timer, CALLOUT_MPSAFE);
365 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
366 	if (rc)
367 		goto stats_free;
368 
369 	sc->next =NULL;
370 	if (softc_tail != NULL) {
371 	  softc_tail->next = sc;
372 	} else {
373 	  softc_head = sc;
374 	}
375 	softc_tail = sc;
376 
377 	return 0;
378 
379 stats_free:
380 	callout_drain(&sc->timer);
381 	oce_stats_free(sc);
382 vlan_free:
383 	if (sc->vlan_attach)
384 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
385 	if (sc->vlan_detach)
386 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
387 	oce_hw_intr_disable(sc);
388 lro_free:
389 #if defined(INET6) || defined(INET)
390 	oce_free_lro(sc);
391 ifp_free:
392 #endif
393 	ether_ifdetach(sc->ifp);
394 	if_free(sc->ifp);
395 queues_free:
396 	oce_queue_release_all(sc);
397 intr_free:
398 	oce_intr_free(sc);
399 mbox_free:
400 	oce_dma_free(sc, &sc->bsmbx);
401 pci_res_free:
402 	oce_hw_pci_free(sc);
403 	LOCK_DESTROY(&sc->dev_lock);
404 	LOCK_DESTROY(&sc->bmbx_lock);
405 	return rc;
406 
407 }
408 
409 static int
410 oce_detach(device_t dev)
411 {
412 	POCE_SOFTC sc = device_get_softc(dev);
413 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
414 
415         poce_sc_tmp = softc_head;
416         ppoce_sc_tmp1 = &softc_head;
417         while (poce_sc_tmp != NULL) {
418           if (poce_sc_tmp == sc) {
419             *ppoce_sc_tmp1 = sc->next;
420             if (sc->next == NULL) {
421               softc_tail = poce_sc_tmp2;
422             }
423             break;
424           }
425           poce_sc_tmp2 = poce_sc_tmp;
426           ppoce_sc_tmp1 = &poce_sc_tmp->next;
427           poce_sc_tmp = poce_sc_tmp->next;
428         }
429 
430 	LOCK(&sc->dev_lock);
431 	oce_if_deactivate(sc);
432 	UNLOCK(&sc->dev_lock);
433 
434 	callout_drain(&sc->timer);
435 
436 	if (sc->vlan_attach != NULL)
437 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
438 	if (sc->vlan_detach != NULL)
439 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
440 
441 	ether_ifdetach(sc->ifp);
442 
443 	if_free(sc->ifp);
444 
445 	oce_hw_shutdown(sc);
446 
447 	bus_generic_detach(dev);
448 
449 	return 0;
450 }
451 
452 static int
453 oce_shutdown(device_t dev)
454 {
455 	int rc;
456 
457 	rc = oce_detach(dev);
458 
459 	return rc;
460 }
461 
462 static int
463 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
464 {
465 	struct ifreq *ifr = (struct ifreq *)data;
466 	POCE_SOFTC sc = ifp->if_softc;
467 	struct ifi2creq i2c;
468 	uint8_t	offset = 0;
469 	int rc = 0;
470 	uint32_t u;
471 
472 	switch (command) {
473 	case SIOCGIFMEDIA:
474 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
475 		break;
476 
477 	case SIOCSIFMTU:
478 		if (ifr->ifr_mtu > OCE_MAX_MTU)
479 			rc = EINVAL;
480 		else
481 			ifp->if_mtu = ifr->ifr_mtu;
482 		break;
483 
484 	case SIOCSIFFLAGS:
485 		if (ifp->if_flags & IFF_UP) {
486 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
487 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
488 				oce_init(sc);
489 			}
490 			device_printf(sc->dev, "Interface Up\n");
491 		} else {
492 			LOCK(&sc->dev_lock);
493 
494 			sc->ifp->if_drv_flags &=
495 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
496 			oce_if_deactivate(sc);
497 
498 			UNLOCK(&sc->dev_lock);
499 
500 			device_printf(sc->dev, "Interface Down\n");
501 		}
502 
503 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
504 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
505 				sc->promisc = TRUE;
506 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
507 			if (!oce_rxf_set_promiscuous(sc, 0))
508 				sc->promisc = FALSE;
509 		}
510 
511 		break;
512 
513 	case SIOCADDMULTI:
514 	case SIOCDELMULTI:
515 		rc = oce_hw_update_multicast(sc);
516 		if (rc)
517 			device_printf(sc->dev,
518 				"Update multicast address failed\n");
519 		break;
520 
521 	case SIOCSIFCAP:
522 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
523 
524 		if (u & IFCAP_TXCSUM) {
525 			ifp->if_capenable ^= IFCAP_TXCSUM;
526 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
527 
528 			if (IFCAP_TSO & ifp->if_capenable &&
529 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
530 				u &= ~IFCAP_TSO;
531 				ifp->if_capenable &= ~IFCAP_TSO;
532 				ifp->if_hwassist &= ~CSUM_TSO;
533 				if_printf(ifp,
534 					 "TSO disabled due to -txcsum.\n");
535 			}
536 		}
537 
538 		if (u & IFCAP_RXCSUM)
539 			ifp->if_capenable ^= IFCAP_RXCSUM;
540 
541 		if (u & IFCAP_TSO4) {
542 			ifp->if_capenable ^= IFCAP_TSO4;
543 
544 			if (IFCAP_TSO & ifp->if_capenable) {
545 				if (IFCAP_TXCSUM & ifp->if_capenable)
546 					ifp->if_hwassist |= CSUM_TSO;
547 				else {
548 					ifp->if_capenable &= ~IFCAP_TSO;
549 					ifp->if_hwassist &= ~CSUM_TSO;
550 					if_printf(ifp,
551 					    "Enable txcsum first.\n");
552 					rc = EAGAIN;
553 				}
554 			} else
555 				ifp->if_hwassist &= ~CSUM_TSO;
556 		}
557 
558 		if (u & IFCAP_VLAN_HWTAGGING)
559 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
560 
561 		if (u & IFCAP_VLAN_HWFILTER) {
562 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
563 			oce_vid_config(sc);
564 		}
565 #if defined(INET6) || defined(INET)
566 		if (u & IFCAP_LRO) {
567 			ifp->if_capenable ^= IFCAP_LRO;
568 			if(sc->enable_hwlro) {
569 				if(ifp->if_capenable & IFCAP_LRO) {
570 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
571 				}else {
572 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
573 				}
574 			}
575 		}
576 #endif
577 
578 		break;
579 
580 	case SIOCGI2C:
581 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
582 		if (rc)
583 			break;
584 
585 		if (i2c.dev_addr == PAGE_NUM_A0) {
586 			offset = i2c.offset;
587 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
588 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
589 		} else {
590 			rc = EINVAL;
591 			break;
592 		}
593 
594 		if (i2c.len > sizeof(i2c.data) ||
595 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
596 			rc = EINVAL;
597 			break;
598 		}
599 
600 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
601 		if (rc) {
602 			rc = -rc;
603 			break;
604 		}
605 
606 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
607 
608 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
609 		break;
610 
611 	case SIOCGPRIVATE_0:
612 		rc = priv_check(curthread, PRIV_DRIVER);
613 		if (rc != 0)
614 			break;
615 		rc = oce_handle_passthrough(ifp, data);
616 		break;
617 	default:
618 		rc = ether_ioctl(ifp, command, data);
619 		break;
620 	}
621 
622 	return rc;
623 }
624 
625 static void
626 oce_init(void *arg)
627 {
628 	POCE_SOFTC sc = arg;
629 
630 	LOCK(&sc->dev_lock);
631 
632 	if (sc->ifp->if_flags & IFF_UP) {
633 		oce_if_deactivate(sc);
634 		oce_if_activate(sc);
635 	}
636 
637 	UNLOCK(&sc->dev_lock);
638 
639 }
640 
641 static int
642 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
643 {
644 	POCE_SOFTC sc = ifp->if_softc;
645 	struct oce_wq *wq = NULL;
646 	int queue_index = 0;
647 	int status = 0;
648 
649 	if (!sc->link_status)
650 		return ENXIO;
651 
652 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
653 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
654 
655 	wq = sc->wq[queue_index];
656 
657 	LOCK(&wq->tx_lock);
658 	status = oce_multiq_transmit(ifp, m, wq);
659 	UNLOCK(&wq->tx_lock);
660 
661 	return status;
662 
663 }
664 
665 static void
666 oce_multiq_flush(struct ifnet *ifp)
667 {
668 	POCE_SOFTC sc = ifp->if_softc;
669 	struct mbuf     *m;
670 	int i = 0;
671 
672 	for (i = 0; i < sc->nwqs; i++) {
673 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
674 			m_freem(m);
675 	}
676 	if_qflush(ifp);
677 }
678 
679 /*****************************************************************************
680  *                   Driver interrupt routines functions                     *
681  *****************************************************************************/
682 
683 static void
684 oce_intr(void *arg, int pending)
685 {
686 
687 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
688 	POCE_SOFTC sc = ii->sc;
689 	struct oce_eq *eq = ii->eq;
690 	struct oce_eqe *eqe;
691 	struct oce_cq *cq = NULL;
692 	int i, num_eqes = 0;
693 
694 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
695 				 BUS_DMASYNC_POSTWRITE);
696 	do {
697 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
698 		if (eqe->evnt == 0)
699 			break;
700 		eqe->evnt = 0;
701 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
702 					BUS_DMASYNC_POSTWRITE);
703 		RING_GET(eq->ring, 1);
704 		num_eqes++;
705 
706 	} while (TRUE);
707 
708 	if (!num_eqes)
709 		goto eq_arm; /* Spurious */
710 
711  	/* Clear EQ entries, but dont arm */
712 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
713 
714 	/* Process TX, RX and MCC. But dont arm CQ*/
715 	for (i = 0; i < eq->cq_valid; i++) {
716 		cq = eq->cq[i];
717 		(*cq->cq_handler)(cq->cb_arg);
718 	}
719 
720 	/* Arm all cqs connected to this EQ */
721 	for (i = 0; i < eq->cq_valid; i++) {
722 		cq = eq->cq[i];
723 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
724 	}
725 
726 eq_arm:
727 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
728 
729 	return;
730 }
731 
732 static int
733 oce_setup_intr(POCE_SOFTC sc)
734 {
735 	int rc = 0, use_intx = 0;
736 	int vector = 0, req_vectors = 0;
737 	int tot_req_vectors, tot_vectors;
738 
739 	if (is_rss_enabled(sc))
740 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
741 	else
742 		req_vectors = 1;
743 
744 	tot_req_vectors = req_vectors;
745 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
746 	  if (req_vectors > 1) {
747 	    tot_req_vectors += OCE_RDMA_VECTORS;
748 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
749 	  }
750 	}
751 
752         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
753 		sc->intr_count = req_vectors;
754                 tot_vectors = tot_req_vectors;
755 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
756 		if (rc != 0) {
757 			use_intx = 1;
758 			pci_release_msi(sc->dev);
759 		} else {
760 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
761 		    if (tot_vectors < tot_req_vectors) {
762 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
763 			sc->roce_intr_count = (tot_vectors / 2);
764 		      }
765 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
766 		    }
767 		  } else {
768 		    sc->intr_count = tot_vectors;
769 		  }
770     		  sc->flags |= OCE_FLAGS_USING_MSIX;
771 		}
772 	} else
773 		use_intx = 1;
774 
775 	if (use_intx)
776 		sc->intr_count = 1;
777 
778 	/* Scale number of queues based on intr we got */
779 	update_queues_got(sc);
780 
781 	if (use_intx) {
782 		device_printf(sc->dev, "Using legacy interrupt\n");
783 		rc = oce_alloc_intr(sc, vector, oce_intr);
784 		if (rc)
785 			goto error;
786 	} else {
787 		for (; vector < sc->intr_count; vector++) {
788 			rc = oce_alloc_intr(sc, vector, oce_intr);
789 			if (rc)
790 				goto error;
791 		}
792 	}
793 
794 	return 0;
795 error:
796 	oce_intr_free(sc);
797 	return rc;
798 }
799 
800 static int
801 oce_fast_isr(void *arg)
802 {
803 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
804 	POCE_SOFTC sc = ii->sc;
805 
806 	if (ii->eq == NULL)
807 		return FILTER_STRAY;
808 
809 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
810 
811 	taskqueue_enqueue(ii->tq, &ii->task);
812 
813  	ii->eq->intr++;
814 
815 	return FILTER_HANDLED;
816 }
817 
818 static int
819 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
820 {
821 	POCE_INTR_INFO ii;
822 	int rc = 0, rr;
823 
824 	if (vector >= OCE_MAX_EQ)
825 		return (EINVAL);
826 
827 	ii = &sc->intrs[vector];
828 
829 	/* Set the resource id for the interrupt.
830 	 * MSIx is vector + 1 for the resource id,
831 	 * INTx is 0 for the resource id.
832 	 */
833 	if (sc->flags & OCE_FLAGS_USING_MSIX)
834 		rr = vector + 1;
835 	else
836 		rr = 0;
837 	ii->intr_res = bus_alloc_resource_any(sc->dev,
838 					      SYS_RES_IRQ,
839 					      &rr, RF_ACTIVE|RF_SHAREABLE);
840 	ii->irq_rr = rr;
841 	if (ii->intr_res == NULL) {
842 		device_printf(sc->dev,
843 			  "Could not allocate interrupt\n");
844 		rc = ENXIO;
845 		return rc;
846 	}
847 
848 	TASK_INIT(&ii->task, 0, isr, ii);
849 	ii->vector = vector;
850 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
851 	ii->tq = taskqueue_create_fast(ii->task_name,
852 			M_NOWAIT,
853 			taskqueue_thread_enqueue,
854 			&ii->tq);
855 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
856 			device_get_nameunit(sc->dev));
857 
858 	ii->sc = sc;
859 	rc = bus_setup_intr(sc->dev,
860 			ii->intr_res,
861 			INTR_TYPE_NET,
862 			oce_fast_isr, NULL, ii, &ii->tag);
863 	return rc;
864 
865 }
866 
867 void
868 oce_intr_free(POCE_SOFTC sc)
869 {
870 	int i = 0;
871 
872 	for (i = 0; i < sc->intr_count; i++) {
873 
874 		if (sc->intrs[i].tag != NULL)
875 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
876 						sc->intrs[i].tag);
877 		if (sc->intrs[i].tq != NULL)
878 			taskqueue_free(sc->intrs[i].tq);
879 
880 		if (sc->intrs[i].intr_res != NULL)
881 			bus_release_resource(sc->dev, SYS_RES_IRQ,
882 						sc->intrs[i].irq_rr,
883 						sc->intrs[i].intr_res);
884 		sc->intrs[i].tag = NULL;
885 		sc->intrs[i].intr_res = NULL;
886 	}
887 
888 	if (sc->flags & OCE_FLAGS_USING_MSIX)
889 		pci_release_msi(sc->dev);
890 
891 }
892 
893 /******************************************************************************
894 *			  Media callbacks functions 			      *
895 ******************************************************************************/
896 
897 static void
898 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
899 {
900 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
901 
902 	req->ifm_status = IFM_AVALID;
903 	req->ifm_active = IFM_ETHER;
904 
905 	if (sc->link_status == 1)
906 		req->ifm_status |= IFM_ACTIVE;
907 	else
908 		return;
909 
910 	switch (sc->link_speed) {
911 	case 1: /* 10 Mbps */
912 		req->ifm_active |= IFM_10_T | IFM_FDX;
913 		sc->speed = 10;
914 		break;
915 	case 2: /* 100 Mbps */
916 		req->ifm_active |= IFM_100_TX | IFM_FDX;
917 		sc->speed = 100;
918 		break;
919 	case 3: /* 1 Gbps */
920 		req->ifm_active |= IFM_1000_T | IFM_FDX;
921 		sc->speed = 1000;
922 		break;
923 	case 4: /* 10 Gbps */
924 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
925 		sc->speed = 10000;
926 		break;
927 	case 5: /* 20 Gbps */
928 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
929 		sc->speed = 20000;
930 		break;
931 	case 6: /* 25 Gbps */
932 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
933 		sc->speed = 25000;
934 		break;
935 	case 7: /* 40 Gbps */
936 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
937 		sc->speed = 40000;
938 		break;
939 	default:
940 		sc->speed = 0;
941 		break;
942 	}
943 
944 	return;
945 }
946 
947 int
948 oce_media_change(struct ifnet *ifp)
949 {
950 	return 0;
951 }
952 
953 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
954 				struct mbuf *m, boolean_t *os2bmc,
955 				struct mbuf **m_new)
956 {
957 	struct ether_header *eh = NULL;
958 
959 	eh = mtod(m, struct ether_header *);
960 
961 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
962 		*os2bmc = FALSE;
963 		goto done;
964 	}
965 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
966 		goto done;
967 
968 	if (is_mc_allowed_on_bmc(sc, eh) ||
969 	    is_bc_allowed_on_bmc(sc, eh) ||
970 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
971 		*os2bmc = TRUE;
972 		goto done;
973 	}
974 
975 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
976 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
977 		uint8_t nexthdr = ip6->ip6_nxt;
978 		if (nexthdr == IPPROTO_ICMPV6) {
979 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
980 			switch (icmp6->icmp6_type) {
981 			case ND_ROUTER_ADVERT:
982 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
983 				goto done;
984 			case ND_NEIGHBOR_ADVERT:
985 				*os2bmc = is_ipv6_na_filt_enabled(sc);
986 				goto done;
987 			default:
988 				break;
989 			}
990 		}
991 	}
992 
993 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
994 		struct ip *ip = mtod(m, struct ip *);
995 		int iphlen = ip->ip_hl << 2;
996 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
997 		switch (uh->uh_dport) {
998 		case DHCP_CLIENT_PORT:
999 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1000 			goto done;
1001 		case DHCP_SERVER_PORT:
1002 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1003 			goto done;
1004 		case NET_BIOS_PORT1:
1005 		case NET_BIOS_PORT2:
1006 			*os2bmc = is_nbios_filt_enabled(sc);
1007 			goto done;
1008 		case DHCPV6_RAS_PORT:
1009 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1010 			goto done;
1011 		default:
1012 			break;
1013 		}
1014 	}
1015 done:
1016 	if (*os2bmc) {
1017 		*m_new = m_dup(m, M_NOWAIT);
1018 		if (!*m_new) {
1019 			*os2bmc = FALSE;
1020 			return;
1021 		}
1022 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1023 	}
1024 }
1025 
1026 /*****************************************************************************
1027  *			  Transmit routines functions			     *
1028  *****************************************************************************/
1029 
1030 static int
1031 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1032 {
1033 	int rc = 0, i, retry_cnt = 0;
1034 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1035 	struct mbuf *m, *m_temp, *m_new = NULL;
1036 	struct oce_wq *wq = sc->wq[wq_index];
1037 	struct oce_packet_desc *pd;
1038 	struct oce_nic_hdr_wqe *nichdr;
1039 	struct oce_nic_frag_wqe *nicfrag;
1040 	struct ether_header *eh = NULL;
1041 	int num_wqes;
1042 	uint32_t reg_value;
1043 	boolean_t complete = TRUE;
1044 	boolean_t os2bmc = FALSE;
1045 
1046 	m = *mpp;
1047 	if (!m)
1048 		return EINVAL;
1049 
1050 	if (!(m->m_flags & M_PKTHDR)) {
1051 		rc = ENXIO;
1052 		goto free_ret;
1053 	}
1054 
1055 	/* Don't allow non-TSO packets longer than MTU */
1056 	if (!is_tso_pkt(m)) {
1057 		eh = mtod(m, struct ether_header *);
1058 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1059 			 goto free_ret;
1060 	}
1061 
1062 	if(oce_tx_asic_stall_verify(sc, m)) {
1063 		m = oce_insert_vlan_tag(sc, m, &complete);
1064 		if(!m) {
1065 			device_printf(sc->dev, "Insertion unsuccessful\n");
1066 			return 0;
1067 		}
1068 	}
1069 
1070 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1071 	 * may cause a transmit stall on that port. So the work-around is to
1072 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1073 	*/
1074 	if(IS_SH(sc) || IS_XE201(sc) ) {
1075 		if(m->m_pkthdr.len <= 32) {
1076 			char buf[36];
1077 			bzero((void *)buf, 36);
1078 			m_append(m, (36 - m->m_pkthdr.len), buf);
1079 		}
1080 	}
1081 
1082 tx_start:
1083 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1084 		/* consolidate packet buffers for TSO/LSO segment offload */
1085 #if defined(INET6) || defined(INET)
1086 		m = oce_tso_setup(sc, mpp);
1087 #else
1088 		m = NULL;
1089 #endif
1090 		if (m == NULL) {
1091 			rc = ENXIO;
1092 			goto free_ret;
1093 		}
1094 	}
1095 
1096 	pd = &wq->pckts[wq->pkt_desc_head];
1097 
1098 retry:
1099 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1100 				     pd->map,
1101 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1102 	if (rc == 0) {
1103 		num_wqes = pd->nsegs + 1;
1104 		if (IS_BE(sc) || IS_SH(sc)) {
1105 			/*Dummy required only for BE3.*/
1106 			if (num_wqes & 1)
1107 				num_wqes++;
1108 		}
1109 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1110 			bus_dmamap_unload(wq->tag, pd->map);
1111 			return EBUSY;
1112 		}
1113 		atomic_store_rel_int(&wq->pkt_desc_head,
1114 				     (wq->pkt_desc_head + 1) % \
1115 				      OCE_WQ_PACKET_ARRAY_SIZE);
1116 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1117 		pd->mbuf = m;
1118 
1119 		nichdr =
1120 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1121 		nichdr->u0.dw[0] = 0;
1122 		nichdr->u0.dw[1] = 0;
1123 		nichdr->u0.dw[2] = 0;
1124 		nichdr->u0.dw[3] = 0;
1125 
1126 		nichdr->u0.s.complete = complete;
1127 		nichdr->u0.s.mgmt = os2bmc;
1128 		nichdr->u0.s.event = 1;
1129 		nichdr->u0.s.crc = 1;
1130 		nichdr->u0.s.forward = 0;
1131 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1132 		nichdr->u0.s.udpcs =
1133 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1134 		nichdr->u0.s.tcpcs =
1135 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1136 		nichdr->u0.s.num_wqe = num_wqes;
1137 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1138 
1139 		if (m->m_flags & M_VLANTAG) {
1140 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1141 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1142 		}
1143 
1144 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1145 			if (m->m_pkthdr.tso_segsz) {
1146 				nichdr->u0.s.lso = 1;
1147 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1148 			}
1149 			if (!IS_BE(sc) || !IS_SH(sc))
1150 				nichdr->u0.s.ipcs = 1;
1151 		}
1152 
1153 		RING_PUT(wq->ring, 1);
1154 		atomic_add_int(&wq->ring->num_used, 1);
1155 
1156 		for (i = 0; i < pd->nsegs; i++) {
1157 			nicfrag =
1158 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1159 						      struct oce_nic_frag_wqe);
1160 			nicfrag->u0.s.rsvd0 = 0;
1161 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1162 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1163 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1164 			pd->wqe_idx = wq->ring->pidx;
1165 			RING_PUT(wq->ring, 1);
1166 			atomic_add_int(&wq->ring->num_used, 1);
1167 		}
1168 		if (num_wqes > (pd->nsegs + 1)) {
1169 			nicfrag =
1170 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1171 						      struct oce_nic_frag_wqe);
1172 			nicfrag->u0.dw[0] = 0;
1173 			nicfrag->u0.dw[1] = 0;
1174 			nicfrag->u0.dw[2] = 0;
1175 			nicfrag->u0.dw[3] = 0;
1176 			pd->wqe_idx = wq->ring->pidx;
1177 			RING_PUT(wq->ring, 1);
1178 			atomic_add_int(&wq->ring->num_used, 1);
1179 			pd->nsegs++;
1180 		}
1181 
1182 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1183 		wq->tx_stats.tx_reqs++;
1184 		wq->tx_stats.tx_wrbs += num_wqes;
1185 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1186 		wq->tx_stats.tx_pkts++;
1187 
1188 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1189 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1190 		reg_value = (num_wqes << 16) | wq->wq_id;
1191 
1192 		/* if os2bmc is not enabled or if the pkt is already tagged as
1193 		   bmc, do nothing
1194 		 */
1195 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1196 
1197 		if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1198 		if (m->m_flags & M_MCAST)
1199 			if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
1200 		ETHER_BPF_MTAP(sc->ifp, m);
1201 
1202 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1203 
1204 	} else if (rc == EFBIG)	{
1205 		if (retry_cnt == 0) {
1206 			m_temp = m_defrag(m, M_NOWAIT);
1207 			if (m_temp == NULL)
1208 				goto free_ret;
1209 			m = m_temp;
1210 			*mpp = m_temp;
1211 			retry_cnt = retry_cnt + 1;
1212 			goto retry;
1213 		} else
1214 			goto free_ret;
1215 	} else if (rc == ENOMEM)
1216 		return rc;
1217 	else
1218 		goto free_ret;
1219 
1220 	if (os2bmc) {
1221 		m = m_new;
1222 		goto tx_start;
1223 	}
1224 
1225 	return 0;
1226 
1227 free_ret:
1228 	m_freem(*mpp);
1229 	*mpp = NULL;
1230 	return rc;
1231 }
1232 
1233 static void
1234 oce_process_tx_completion(struct oce_wq *wq)
1235 {
1236 	struct oce_packet_desc *pd;
1237 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1238 	struct mbuf *m;
1239 
1240 	pd = &wq->pckts[wq->pkt_desc_tail];
1241 	atomic_store_rel_int(&wq->pkt_desc_tail,
1242 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1243 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1244 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1245 	bus_dmamap_unload(wq->tag, pd->map);
1246 
1247 	m = pd->mbuf;
1248 	m_freem(m);
1249 	pd->mbuf = NULL;
1250 
1251 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1252 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1253 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1254 			oce_tx_restart(sc, wq);
1255 		}
1256 	}
1257 }
1258 
1259 static void
1260 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1261 {
1262 
1263 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1264 		return;
1265 
1266 	if (!drbr_empty(sc->ifp, wq->br))
1267 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1268 
1269 }
1270 
1271 #if defined(INET6) || defined(INET)
1272 static struct mbuf *
1273 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1274 {
1275 	struct mbuf *m;
1276 #ifdef INET
1277 	struct ip *ip;
1278 #endif
1279 #ifdef INET6
1280 	struct ip6_hdr *ip6;
1281 #endif
1282 	struct ether_vlan_header *eh;
1283 	struct tcphdr *th;
1284 	uint16_t etype;
1285 	int total_len = 0, ehdrlen = 0;
1286 
1287 	m = *mpp;
1288 
1289 	if (M_WRITABLE(m) == 0) {
1290 		m = m_dup(*mpp, M_NOWAIT);
1291 		if (!m)
1292 			return NULL;
1293 		m_freem(*mpp);
1294 		*mpp = m;
1295 	}
1296 
1297 	eh = mtod(m, struct ether_vlan_header *);
1298 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1299 		etype = ntohs(eh->evl_proto);
1300 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1301 	} else {
1302 		etype = ntohs(eh->evl_encap_proto);
1303 		ehdrlen = ETHER_HDR_LEN;
1304 	}
1305 
1306 	switch (etype) {
1307 #ifdef INET
1308 	case ETHERTYPE_IP:
1309 		ip = (struct ip *)(m->m_data + ehdrlen);
1310 		if (ip->ip_p != IPPROTO_TCP)
1311 			return NULL;
1312 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1313 
1314 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1315 		break;
1316 #endif
1317 #ifdef INET6
1318 	case ETHERTYPE_IPV6:
1319 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1320 		if (ip6->ip6_nxt != IPPROTO_TCP)
1321 			return NULL;
1322 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1323 
1324 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1325 		break;
1326 #endif
1327 	default:
1328 		return NULL;
1329 	}
1330 
1331 	m = m_pullup(m, total_len);
1332 	*mpp = m;
1333 	return m;
1334 }
1335 #endif /* INET6 || INET */
1336 
1337 void
1338 oce_tx_task(void *arg, int npending)
1339 {
1340 	struct oce_wq *wq = arg;
1341 	POCE_SOFTC sc = wq->parent;
1342 	struct ifnet *ifp = sc->ifp;
1343 	int rc = 0;
1344 
1345 	LOCK(&wq->tx_lock);
1346 	rc = oce_multiq_transmit(ifp, NULL, wq);
1347 	if (rc) {
1348 		device_printf(sc->dev,
1349 				"TX[%d] restart failed\n", wq->queue_index);
1350 	}
1351 	UNLOCK(&wq->tx_lock);
1352 }
1353 
1354 void
1355 oce_start(struct ifnet *ifp)
1356 {
1357 	POCE_SOFTC sc = ifp->if_softc;
1358 	struct mbuf *m;
1359 	int rc = 0;
1360 	int def_q = 0; /* Defualt tx queue is 0*/
1361 
1362 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1363 			IFF_DRV_RUNNING)
1364 		return;
1365 
1366 	if (!sc->link_status)
1367 		return;
1368 
1369 	while (true) {
1370 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1371 		if (m == NULL)
1372 			break;
1373 
1374 		LOCK(&sc->wq[def_q]->tx_lock);
1375 		rc = oce_tx(sc, &m, def_q);
1376 		UNLOCK(&sc->wq[def_q]->tx_lock);
1377 		if (rc) {
1378 			if (m != NULL) {
1379 				sc->wq[def_q]->tx_stats.tx_stops ++;
1380 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1381 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1382 				m = NULL;
1383 			}
1384 			break;
1385 		}
1386 	}
1387 }
1388 
1389 /* Handle the Completion Queue for transmit */
1390 uint16_t
1391 oce_wq_handler(void *arg)
1392 {
1393 	struct oce_wq *wq = (struct oce_wq *)arg;
1394 	POCE_SOFTC sc = wq->parent;
1395 	struct oce_cq *cq = wq->cq;
1396 	struct oce_nic_tx_cqe *cqe;
1397 	int num_cqes = 0;
1398 
1399 	LOCK(&wq->tx_compl_lock);
1400 	bus_dmamap_sync(cq->ring->dma.tag,
1401 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1402 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1403 	while (cqe->u0.dw[3]) {
1404 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1405 
1406 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1407 		if (wq->ring->cidx >= wq->ring->num_items)
1408 			wq->ring->cidx -= wq->ring->num_items;
1409 
1410 		oce_process_tx_completion(wq);
1411 		wq->tx_stats.tx_compl++;
1412 		cqe->u0.dw[3] = 0;
1413 		RING_GET(cq->ring, 1);
1414 		bus_dmamap_sync(cq->ring->dma.tag,
1415 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1416 		cqe =
1417 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1418 		num_cqes++;
1419 	}
1420 
1421 	if (num_cqes)
1422 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1423 
1424 	UNLOCK(&wq->tx_compl_lock);
1425 	return num_cqes;
1426 }
1427 
1428 static int
1429 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1430 {
1431 	POCE_SOFTC sc = ifp->if_softc;
1432 	int status = 0, queue_index = 0;
1433 	struct mbuf *next = NULL;
1434 	struct buf_ring *br = NULL;
1435 
1436 	br  = wq->br;
1437 	queue_index = wq->queue_index;
1438 
1439 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1440 		IFF_DRV_RUNNING) {
1441 		if (m != NULL)
1442 			status = drbr_enqueue(ifp, br, m);
1443 		return status;
1444 	}
1445 
1446 	if (m != NULL) {
1447 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1448 			return status;
1449 	}
1450 	while ((next = drbr_peek(ifp, br)) != NULL) {
1451 		if (oce_tx(sc, &next, queue_index)) {
1452 			if (next == NULL) {
1453 				drbr_advance(ifp, br);
1454 			} else {
1455 				drbr_putback(ifp, br, next);
1456 				wq->tx_stats.tx_stops ++;
1457 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1458 			}
1459 			break;
1460 		}
1461 		drbr_advance(ifp, br);
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 /*****************************************************************************
1468  *			    Receive  routines functions 		     *
1469  *****************************************************************************/
1470 
1471 static void
1472 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1473 {
1474 	uint32_t *p;
1475         struct ether_header *eh = NULL;
1476         struct tcphdr *tcp_hdr = NULL;
1477         struct ip *ip4_hdr = NULL;
1478         struct ip6_hdr *ip6 = NULL;
1479         uint32_t payload_len = 0;
1480 
1481         eh = mtod(m, struct ether_header *);
1482         /* correct IP header */
1483         if(!cqe2->ipv6_frame) {
1484 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1485                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1486                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1487                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1488         }else {
1489         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1490                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1491                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1492                                                 - sizeof(struct ip6_hdr);
1493                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1494                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1495         }
1496 
1497         /* correct tcp header */
1498         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1499         if(cqe2->push) {
1500         	tcp_hdr->th_flags |= TH_PUSH;
1501         }
1502         tcp_hdr->th_win = htons(cqe2->tcp_window);
1503         tcp_hdr->th_sum = 0xffff;
1504         if(cqe2->ts_opt) {
1505                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1506                 *p = cqe1->tcp_timestamp_val;
1507                 *(p+1) = cqe1->tcp_timestamp_ecr;
1508         }
1509 
1510 	return;
1511 }
1512 
1513 static void
1514 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1515 {
1516 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1517         uint32_t i = 0, frag_len = 0;
1518 	uint32_t len = cqe_info->pkt_size;
1519         struct oce_packet_desc *pd;
1520         struct mbuf *tail = NULL;
1521 
1522         for (i = 0; i < cqe_info->num_frags; i++) {
1523                 if (rq->ring->cidx == rq->ring->pidx) {
1524                         device_printf(sc->dev,
1525                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1526                         return;
1527                 }
1528                 pd = &rq->pckts[rq->ring->cidx];
1529 
1530                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1531                 bus_dmamap_unload(rq->tag, pd->map);
1532 		RING_GET(rq->ring, 1);
1533                 rq->pending--;
1534 
1535                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1536                 pd->mbuf->m_len = frag_len;
1537 
1538                 if (tail != NULL) {
1539                         /* additional fragments */
1540                         pd->mbuf->m_flags &= ~M_PKTHDR;
1541                         tail->m_next = pd->mbuf;
1542 			if(rq->islro)
1543                         	tail->m_nextpkt = NULL;
1544                         tail = pd->mbuf;
1545                 } else {
1546                         /* first fragment, fill out much of the packet header */
1547                         pd->mbuf->m_pkthdr.len = len;
1548 			if(rq->islro)
1549                         	pd->mbuf->m_nextpkt = NULL;
1550                         pd->mbuf->m_pkthdr.csum_flags = 0;
1551                         if (IF_CSUM_ENABLED(sc)) {
1552                                 if (cqe_info->l4_cksum_pass) {
1553                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1554                                                 pd->mbuf->m_pkthdr.csum_flags |=
1555                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1556                                         }else { /* IPV6 frame */
1557 						if(rq->islro) {
1558                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1559                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1560 						}
1561                                         }
1562                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1563                                 }
1564                                 if (cqe_info->ip_cksum_pass) {
1565                                         pd->mbuf->m_pkthdr.csum_flags |=
1566                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1567                                 }
1568                         }
1569                         *m = tail = pd->mbuf;
1570                }
1571                 pd->mbuf = NULL;
1572                 len -= frag_len;
1573         }
1574 
1575         return;
1576 }
1577 
1578 static void
1579 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1580 {
1581         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1582         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1583         struct mbuf *m = NULL;
1584 	struct oce_common_cqe_info cq_info;
1585 
1586 	/* parse cqe */
1587         if(cqe2 == NULL) {
1588                 cq_info.pkt_size =  cqe->pkt_size;
1589                 cq_info.vtag = cqe->vlan_tag;
1590                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1591                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1592                 cq_info.ipv6_frame = cqe->ipv6_frame;
1593                 cq_info.vtp = cqe->vtp;
1594                 cq_info.qnq = cqe->qnq;
1595         }else {
1596                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1597                 cq_info.pkt_size =  cqe2->coalesced_size;
1598                 cq_info.vtag = cqe2->vlan_tag;
1599                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1600                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1601                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1602                 cq_info.vtp = cqe2->vtp;
1603                 cq_info.qnq = cqe1->qnq;
1604         }
1605 
1606 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1607 
1608         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1609         if(cq_info.pkt_size % rq->cfg.frag_size)
1610                 cq_info.num_frags++;
1611 
1612 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1613 
1614 	if (m) {
1615 		if(cqe2) {
1616 			//assert(cqe2->valid != 0);
1617 
1618 			//assert(cqe2->cqe_type != 2);
1619 			oce_correct_header(m, cqe1, cqe2);
1620 		}
1621 
1622 		m->m_pkthdr.rcvif = sc->ifp;
1623 		if (rq->queue_index)
1624 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1625 		else
1626 			m->m_pkthdr.flowid = rq->queue_index;
1627 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1628 
1629 		/* This deternies if vlan tag is Valid */
1630 		if (cq_info.vtp) {
1631 			if (sc->function_mode & FNM_FLEX10_MODE) {
1632 				/* FLEX10. If QnQ is not set, neglect VLAN */
1633 				if (cq_info.qnq) {
1634 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1635 					m->m_flags |= M_VLANTAG;
1636 				}
1637 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1638 				/* In UMC mode generally pvid will be striped by
1639 				   hw. But in some cases we have seen it comes
1640 				   with pvid. So if pvid == vlan, neglect vlan.
1641 				 */
1642 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1643 				m->m_flags |= M_VLANTAG;
1644 			}
1645 		}
1646 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1647 
1648 		(*sc->ifp->if_input) (sc->ifp, m);
1649 
1650 		/* Update rx stats per queue */
1651 		rq->rx_stats.rx_pkts++;
1652 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1653 		rq->rx_stats.rx_frags += cq_info.num_frags;
1654 		rq->rx_stats.rx_ucast_pkts++;
1655 	}
1656         return;
1657 }
1658 
1659 static void
1660 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1661 {
1662 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1663 	int len;
1664 	struct mbuf *m = NULL;
1665 	struct oce_common_cqe_info cq_info;
1666 	uint16_t vtag = 0;
1667 
1668 	/* Is it a flush compl that has no data */
1669 	if(!cqe->u0.s.num_fragments)
1670 		goto exit;
1671 
1672 	len = cqe->u0.s.pkt_size;
1673 	if (!len) {
1674 		/*partial DMA workaround for Lancer*/
1675 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1676 		goto exit;
1677 	}
1678 
1679 	if (!oce_cqe_portid_valid(sc, cqe)) {
1680 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1681 		goto exit;
1682 	}
1683 
1684 	 /* Get vlan_tag value */
1685 	if(IS_BE(sc) || IS_SH(sc))
1686 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1687 	else
1688 		vtag = cqe->u0.s.vlan_tag;
1689 
1690 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1691 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1692 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1693 	cq_info.num_frags = cqe->u0.s.num_fragments;
1694 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1695 
1696 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1697 
1698 	if (m) {
1699 		m->m_pkthdr.rcvif = sc->ifp;
1700 		if (rq->queue_index)
1701 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1702 		else
1703 			m->m_pkthdr.flowid = rq->queue_index;
1704 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1705 
1706 		/* This deternies if vlan tag is Valid */
1707 		if (oce_cqe_vtp_valid(sc, cqe)) {
1708 			if (sc->function_mode & FNM_FLEX10_MODE) {
1709 				/* FLEX10. If QnQ is not set, neglect VLAN */
1710 				if (cqe->u0.s.qnq) {
1711 					m->m_pkthdr.ether_vtag = vtag;
1712 					m->m_flags |= M_VLANTAG;
1713 				}
1714 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1715 				/* In UMC mode generally pvid will be striped by
1716 				   hw. But in some cases we have seen it comes
1717 				   with pvid. So if pvid == vlan, neglect vlan.
1718 				*/
1719 				m->m_pkthdr.ether_vtag = vtag;
1720 				m->m_flags |= M_VLANTAG;
1721 			}
1722 		}
1723 
1724 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1725 #if defined(INET6) || defined(INET)
1726 		/* Try to queue to LRO */
1727 		if (IF_LRO_ENABLED(sc) &&
1728 		    (cqe->u0.s.ip_cksum_pass) &&
1729 		    (cqe->u0.s.l4_cksum_pass) &&
1730 		    (!cqe->u0.s.ip_ver)       &&
1731 		    (rq->lro.lro_cnt != 0)) {
1732 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1733 				rq->lro_pkts_queued ++;
1734 				goto post_done;
1735 			}
1736 			/* If LRO posting fails then try to post to STACK */
1737 		}
1738 #endif
1739 
1740 		(*sc->ifp->if_input) (sc->ifp, m);
1741 #if defined(INET6) || defined(INET)
1742 post_done:
1743 #endif
1744 		/* Update rx stats per queue */
1745 		rq->rx_stats.rx_pkts++;
1746 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1747 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1748 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1749 			rq->rx_stats.rx_mcast_pkts++;
1750 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1751 			rq->rx_stats.rx_ucast_pkts++;
1752 	}
1753 exit:
1754 	return;
1755 }
1756 
1757 void
1758 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1759 {
1760 	uint32_t i = 0;
1761 	struct oce_packet_desc *pd;
1762 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1763 
1764 	for (i = 0; i < num_frags; i++) {
1765                 if (rq->ring->cidx == rq->ring->pidx) {
1766                         device_printf(sc->dev,
1767                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1768                         return;
1769                 }
1770                 pd = &rq->pckts[rq->ring->cidx];
1771                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1772                 bus_dmamap_unload(rq->tag, pd->map);
1773                 if (pd->mbuf != NULL) {
1774                         m_freem(pd->mbuf);
1775                         pd->mbuf = NULL;
1776                 }
1777 
1778 		RING_GET(rq->ring, 1);
1779                 rq->pending--;
1780 	}
1781 }
1782 
1783 static int
1784 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1785 {
1786 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1787 	int vtp = 0;
1788 
1789 	if (sc->be3_native) {
1790 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1791 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1792 	} else
1793 		vtp = cqe->u0.s.vlan_tag_present;
1794 
1795 	return vtp;
1796 
1797 }
1798 
1799 static int
1800 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1801 {
1802 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1803 	int port_id = 0;
1804 
1805 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1806 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1807 		port_id =  cqe_v1->u0.s.port;
1808 		if (sc->port_id != port_id)
1809 			return 0;
1810 	} else
1811 		;/* For BE3 legacy and Lancer this is dummy */
1812 
1813 	return 1;
1814 
1815 }
1816 
1817 #if defined(INET6) || defined(INET)
1818 void
1819 oce_rx_flush_lro(struct oce_rq *rq)
1820 {
1821 	struct lro_ctrl	*lro = &rq->lro;
1822 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1823 
1824 	if (!IF_LRO_ENABLED(sc))
1825 		return;
1826 
1827 	tcp_lro_flush_all(lro);
1828 	rq->lro_pkts_queued = 0;
1829 
1830 	return;
1831 }
1832 
1833 static int
1834 oce_init_lro(POCE_SOFTC sc)
1835 {
1836 	struct lro_ctrl *lro = NULL;
1837 	int i = 0, rc = 0;
1838 
1839 	for (i = 0; i < sc->nrqs; i++) {
1840 		lro = &sc->rq[i]->lro;
1841 		rc = tcp_lro_init(lro);
1842 		if (rc != 0) {
1843 			device_printf(sc->dev, "LRO init failed\n");
1844 			return rc;
1845 		}
1846 		lro->ifp = sc->ifp;
1847 	}
1848 
1849 	return rc;
1850 }
1851 
1852 void
1853 oce_free_lro(POCE_SOFTC sc)
1854 {
1855 	struct lro_ctrl *lro = NULL;
1856 	int i = 0;
1857 
1858 	for (i = 0; i < sc->nrqs; i++) {
1859 		lro = &sc->rq[i]->lro;
1860 		if (lro)
1861 			tcp_lro_free(lro);
1862 	}
1863 }
1864 #endif
1865 
1866 int
1867 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1868 {
1869 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1870 	int i, rc;
1871 	struct oce_packet_desc *pd;
1872 	bus_dma_segment_t segs[6];
1873 	int nsegs, added = 0;
1874 	struct oce_nic_rqe *rqe;
1875 	pd_rxulp_db_t rxdb_reg;
1876 	uint32_t val = 0;
1877 	uint32_t oce_max_rq_posts = 64;
1878 
1879 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1880 	for (i = 0; i < count; i++) {
1881 		pd = &rq->pckts[rq->ring->pidx];
1882 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1883 		if (pd->mbuf == NULL) {
1884 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1885 			break;
1886 		}
1887 		pd->mbuf->m_nextpkt = NULL;
1888 
1889 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1890 
1891 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1892 					     pd->map,
1893 					     pd->mbuf,
1894 					     segs, &nsegs, BUS_DMA_NOWAIT);
1895 		if (rc) {
1896 			m_free(pd->mbuf);
1897 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1898 			break;
1899 		}
1900 
1901 		if (nsegs != 1) {
1902 			i--;
1903 			continue;
1904 		}
1905 
1906 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1907 
1908 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1909 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1910 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1911 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1912 		RING_PUT(rq->ring, 1);
1913 		added++;
1914 		rq->pending++;
1915 	}
1916 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1917 	if (added != 0) {
1918 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1919 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1920 			rxdb_reg.bits.qid = rq->rq_id;
1921 			if(rq->islro) {
1922                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1923                                 val |= oce_max_rq_posts << 16;
1924                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1925 			}else {
1926 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1927 			}
1928 			added -= oce_max_rq_posts;
1929 		}
1930 		if (added > 0) {
1931 			rxdb_reg.bits.qid = rq->rq_id;
1932 			rxdb_reg.bits.num_posted = added;
1933 			if(rq->islro) {
1934                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1935                                 val |= added << 16;
1936                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1937 			}else {
1938 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1939 			}
1940 		}
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 static void
1947 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1948 {
1949         if (num_cqes) {
1950                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1951 		if(!sc->enable_hwlro) {
1952 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1953 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1954 		}else {
1955                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1956                         	oce_alloc_rx_bufs(rq, 64);
1957         	}
1958 	}
1959 
1960         return;
1961 }
1962 
1963 uint16_t
1964 oce_rq_handler_lro(void *arg)
1965 {
1966         struct oce_rq *rq = (struct oce_rq *)arg;
1967         struct oce_cq *cq = rq->cq;
1968         POCE_SOFTC sc = rq->parent;
1969         struct nic_hwlro_singleton_cqe *cqe;
1970         struct nic_hwlro_cqe_part2 *cqe2;
1971         int num_cqes = 0;
1972 
1973 	LOCK(&rq->rx_lock);
1974         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1975         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1976         while (cqe->valid) {
1977                 if(cqe->cqe_type == 0) { /* singleton cqe */
1978 			/* we should not get singleton cqe after cqe1 on same rq */
1979 			if(rq->cqe_firstpart != NULL) {
1980 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1981 				goto exit_rq_handler_lro;
1982 			}
1983                         if(cqe->error != 0) {
1984                                 rq->rx_stats.rxcp_err++;
1985 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1986                         }
1987                         oce_rx_lro(rq, cqe, NULL);
1988                         rq->rx_stats.rx_compl++;
1989                         cqe->valid = 0;
1990                         RING_GET(cq->ring, 1);
1991                         num_cqes++;
1992                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1993                                 break;
1994                 }else if(cqe->cqe_type == 0x1) { /* first part */
1995 			/* we should not get cqe1 after cqe1 on same rq */
1996 			if(rq->cqe_firstpart != NULL) {
1997 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1998 				goto exit_rq_handler_lro;
1999 			}
2000 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2001                         RING_GET(cq->ring, 1);
2002                 }else if(cqe->cqe_type == 0x2) { /* second part */
2003 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2004                         if(cqe2->error != 0) {
2005                                 rq->rx_stats.rxcp_err++;
2006 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2007                         }
2008 			/* We should not get cqe2 without cqe1 */
2009 			if(rq->cqe_firstpart == NULL) {
2010 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2011 				goto exit_rq_handler_lro;
2012 			}
2013                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2014 
2015                         rq->rx_stats.rx_compl++;
2016                         rq->cqe_firstpart->valid = 0;
2017                         cqe2->valid = 0;
2018 			rq->cqe_firstpart = NULL;
2019 
2020                         RING_GET(cq->ring, 1);
2021                         num_cqes += 2;
2022                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2023                                 break;
2024 		}
2025 
2026                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2027                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2028         }
2029 	oce_check_rx_bufs(sc, num_cqes, rq);
2030 exit_rq_handler_lro:
2031 	UNLOCK(&rq->rx_lock);
2032 	return 0;
2033 }
2034 
2035 /* Handle the Completion Queue for receive */
2036 uint16_t
2037 oce_rq_handler(void *arg)
2038 {
2039 	struct epoch_tracker et;
2040 	struct oce_rq *rq = (struct oce_rq *)arg;
2041 	struct oce_cq *cq = rq->cq;
2042 	POCE_SOFTC sc = rq->parent;
2043 	struct oce_nic_rx_cqe *cqe;
2044 	int num_cqes = 0;
2045 
2046 	NET_EPOCH_ENTER(et);
2047 	if(rq->islro) {
2048 		oce_rq_handler_lro(arg);
2049 		NET_EPOCH_EXIT(et);
2050 		return 0;
2051 	}
2052 	LOCK(&rq->rx_lock);
2053 	bus_dmamap_sync(cq->ring->dma.tag,
2054 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2055 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2056 	while (cqe->u0.dw[2]) {
2057 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2058 
2059 		if (cqe->u0.s.error == 0) {
2060 			oce_rx(rq, cqe);
2061 		} else {
2062 			rq->rx_stats.rxcp_err++;
2063 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2064 			/* Post L3/L4 errors to stack.*/
2065 			oce_rx(rq, cqe);
2066 		}
2067 		rq->rx_stats.rx_compl++;
2068 		cqe->u0.dw[2] = 0;
2069 
2070 #if defined(INET6) || defined(INET)
2071 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2072 			oce_rx_flush_lro(rq);
2073 		}
2074 #endif
2075 
2076 		RING_GET(cq->ring, 1);
2077 		bus_dmamap_sync(cq->ring->dma.tag,
2078 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2079 		cqe =
2080 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2081 		num_cqes++;
2082 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2083 			break;
2084 	}
2085 
2086 #if defined(INET6) || defined(INET)
2087         if (IF_LRO_ENABLED(sc))
2088                 oce_rx_flush_lro(rq);
2089 #endif
2090 
2091 	oce_check_rx_bufs(sc, num_cqes, rq);
2092 	UNLOCK(&rq->rx_lock);
2093 	NET_EPOCH_EXIT(et);
2094 	return 0;
2095 
2096 }
2097 
2098 /*****************************************************************************
2099  *		   Helper function prototypes in this file 		     *
2100  *****************************************************************************/
2101 
2102 static int
2103 oce_attach_ifp(POCE_SOFTC sc)
2104 {
2105 
2106 	sc->ifp = if_alloc(IFT_ETHER);
2107 	if (!sc->ifp)
2108 		return ENOMEM;
2109 
2110 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2111 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2112 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2113 
2114 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_KNOWSEPOCH;
2115 	sc->ifp->if_ioctl = oce_ioctl;
2116 	sc->ifp->if_start = oce_start;
2117 	sc->ifp->if_init = oce_init;
2118 	sc->ifp->if_mtu = ETHERMTU;
2119 	sc->ifp->if_softc = sc;
2120 	sc->ifp->if_transmit = oce_multiq_start;
2121 	sc->ifp->if_qflush = oce_multiq_flush;
2122 
2123 	if_initname(sc->ifp,
2124 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2125 
2126 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2127 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2128 	IFQ_SET_READY(&sc->ifp->if_snd);
2129 
2130 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2131 	sc->ifp->if_hwassist |= CSUM_TSO;
2132 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2133 
2134 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2135 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2136 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2137 
2138 #if defined(INET6) || defined(INET)
2139 	sc->ifp->if_capabilities |= IFCAP_TSO;
2140 	sc->ifp->if_capabilities |= IFCAP_LRO;
2141 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2142 #endif
2143 
2144 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2145 	sc->ifp->if_baudrate = IF_Gbps(10);
2146 
2147 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2148 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2149 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2150 
2151 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2152 
2153 	return 0;
2154 }
2155 
2156 static void
2157 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2158 {
2159 	POCE_SOFTC sc = ifp->if_softc;
2160 
2161 	if (ifp->if_softc !=  arg)
2162 		return;
2163 	if ((vtag == 0) || (vtag > 4095))
2164 		return;
2165 
2166 	sc->vlan_tag[vtag] = 1;
2167 	sc->vlans_added++;
2168 	if (sc->vlans_added <= (sc->max_vlans + 1))
2169 		oce_vid_config(sc);
2170 }
2171 
2172 static void
2173 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2174 {
2175 	POCE_SOFTC sc = ifp->if_softc;
2176 
2177 	if (ifp->if_softc !=  arg)
2178 		return;
2179 	if ((vtag == 0) || (vtag > 4095))
2180 		return;
2181 
2182 	sc->vlan_tag[vtag] = 0;
2183 	sc->vlans_added--;
2184 	oce_vid_config(sc);
2185 }
2186 
2187 /*
2188  * A max of 64 vlans can be configured in BE. If the user configures
2189  * more, place the card in vlan promiscuous mode.
2190  */
2191 static int
2192 oce_vid_config(POCE_SOFTC sc)
2193 {
2194 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2195 	uint16_t ntags = 0, i;
2196 	int status = 0;
2197 
2198 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2199 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2200 		for (i = 0; i < MAX_VLANS; i++) {
2201 			if (sc->vlan_tag[i]) {
2202 				vtags[ntags].vtag = i;
2203 				ntags++;
2204 			}
2205 		}
2206 		if (ntags)
2207 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2208 						vtags, ntags, 1, 0);
2209 	} else
2210 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2211 					 	NULL, 0, 1, 1);
2212 	return status;
2213 }
2214 
2215 static void
2216 oce_mac_addr_set(POCE_SOFTC sc)
2217 {
2218 	uint32_t old_pmac_id = sc->pmac_id;
2219 	int status = 0;
2220 
2221 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2222 			 sc->macaddr.size_of_struct);
2223 	if (!status)
2224 		return;
2225 
2226 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2227 					sc->if_id, &sc->pmac_id);
2228 	if (!status) {
2229 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2230 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2231 				 sc->macaddr.size_of_struct);
2232 	}
2233 	if (status)
2234 		device_printf(sc->dev, "Failed update macaddress\n");
2235 
2236 }
2237 
2238 static int
2239 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2240 {
2241 	POCE_SOFTC sc = ifp->if_softc;
2242 	struct ifreq *ifr = (struct ifreq *)data;
2243 	int rc = ENXIO;
2244 	char cookie[32] = {0};
2245 	void *priv_data = ifr_data_get_ptr(ifr);
2246 	void *ioctl_ptr;
2247 	uint32_t req_size;
2248 	struct mbx_hdr req;
2249 	OCE_DMA_MEM dma_mem;
2250 	struct mbx_common_get_cntl_attr *fw_cmd;
2251 
2252 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2253 		return EFAULT;
2254 
2255 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2256 		return EINVAL;
2257 
2258 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2259 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2260 		return EFAULT;
2261 
2262 	req_size = le32toh(req.u0.req.request_length);
2263 	if (req_size > 65536)
2264 		return EINVAL;
2265 
2266 	req_size += sizeof(struct mbx_hdr);
2267 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2268 	if (rc)
2269 		return ENOMEM;
2270 
2271 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2272 		rc = EFAULT;
2273 		goto dma_free;
2274 	}
2275 
2276 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2277 	if (rc) {
2278 		rc = EIO;
2279 		goto dma_free;
2280 	}
2281 
2282 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2283 		rc =  EFAULT;
2284 
2285 	/*
2286 	   firmware is filling all the attributes for this ioctl except
2287 	   the driver version..so fill it
2288 	 */
2289 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2290 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2291 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2292 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2293 	}
2294 
2295 dma_free:
2296 	oce_dma_free(sc, &dma_mem);
2297 	return rc;
2298 
2299 }
2300 
2301 static void
2302 oce_eqd_set_periodic(POCE_SOFTC sc)
2303 {
2304 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2305 	struct oce_aic_obj *aic;
2306 	struct oce_eq *eqo;
2307 	uint64_t now = 0, delta;
2308 	int eqd, i, num = 0;
2309 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2310 	struct oce_wq *wq;
2311 	struct oce_rq *rq;
2312 
2313 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2314 
2315 	for (i = 0 ; i < sc->neqs; i++) {
2316 		eqo = sc->eq[i];
2317 		aic = &sc->aic_obj[i];
2318 		/* When setting the static eq delay from the user space */
2319 		if (!aic->enable) {
2320 			if (aic->ticks)
2321 				aic->ticks = 0;
2322 			eqd = aic->et_eqd;
2323 			goto modify_eqd;
2324 		}
2325 
2326 		if (i == 0) {
2327 			rq = sc->rq[0];
2328 			rxpkts = rq->rx_stats.rx_pkts;
2329 		} else
2330 			rxpkts = 0;
2331 		if (i + 1 < sc->nrqs) {
2332 			rq = sc->rq[i + 1];
2333 			rxpkts += rq->rx_stats.rx_pkts;
2334 		}
2335 		if (i < sc->nwqs) {
2336 			wq = sc->wq[i];
2337 			tx_reqs = wq->tx_stats.tx_reqs;
2338 		} else
2339 			tx_reqs = 0;
2340 		now = ticks;
2341 
2342 		if (!aic->ticks || now < aic->ticks ||
2343 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2344 			aic->prev_rxpkts = rxpkts;
2345 			aic->prev_txreqs = tx_reqs;
2346 			aic->ticks = now;
2347 			continue;
2348 		}
2349 
2350 		delta = ticks_to_msecs(now - aic->ticks);
2351 
2352 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2353 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2354 		eqd = (pps / 15000) << 2;
2355 		if (eqd < 8)
2356 			eqd = 0;
2357 
2358 		/* Make sure that the eq delay is in the known range */
2359 		eqd = min(eqd, aic->max_eqd);
2360 		eqd = max(eqd, aic->min_eqd);
2361 
2362 		aic->prev_rxpkts = rxpkts;
2363 		aic->prev_txreqs = tx_reqs;
2364 		aic->ticks = now;
2365 
2366 modify_eqd:
2367 		if (eqd != aic->cur_eqd) {
2368 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2369 			set_eqd[num].eq_id = eqo->eq_id;
2370 			aic->cur_eqd = eqd;
2371 			num++;
2372 		}
2373 	}
2374 
2375 	/* Is there atleast one eq that needs to be modified? */
2376         for(i = 0; i < num; i += 8) {
2377                 if((num - i) >=8 )
2378                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2379                 else
2380                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2381         }
2382 
2383 }
2384 
2385 static void oce_detect_hw_error(POCE_SOFTC sc)
2386 {
2387 
2388 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2389 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2390 	uint32_t i;
2391 
2392 	if (sc->hw_error)
2393 		return;
2394 
2395 	if (IS_XE201(sc)) {
2396 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2397 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2398 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2399 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2400 		}
2401 	} else {
2402 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2403 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2404 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2405 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2406 
2407 		ue_low = (ue_low & ~ue_low_mask);
2408 		ue_high = (ue_high & ~ue_high_mask);
2409 	}
2410 
2411 	/* On certain platforms BE hardware can indicate spurious UEs.
2412 	 * Allow the h/w to stop working completely in case of a real UE.
2413 	 * Hence not setting the hw_error for UE detection.
2414 	 */
2415 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2416 		sc->hw_error = TRUE;
2417 		device_printf(sc->dev, "Error detected in the card\n");
2418 	}
2419 
2420 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2421 		device_printf(sc->dev,
2422 				"ERR: sliport status 0x%x\n", sliport_status);
2423 		device_printf(sc->dev,
2424 				"ERR: sliport error1 0x%x\n", sliport_err1);
2425 		device_printf(sc->dev,
2426 				"ERR: sliport error2 0x%x\n", sliport_err2);
2427 	}
2428 
2429 	if (ue_low) {
2430 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2431 			if (ue_low & 1)
2432 				device_printf(sc->dev, "UE: %s bit set\n",
2433 							ue_status_low_desc[i]);
2434 		}
2435 	}
2436 
2437 	if (ue_high) {
2438 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2439 			if (ue_high & 1)
2440 				device_printf(sc->dev, "UE: %s bit set\n",
2441 							ue_status_hi_desc[i]);
2442 		}
2443 	}
2444 
2445 }
2446 
2447 static void
2448 oce_local_timer(void *arg)
2449 {
2450 	POCE_SOFTC sc = arg;
2451 	int i = 0;
2452 
2453 	oce_detect_hw_error(sc);
2454 	oce_refresh_nic_stats(sc);
2455 	oce_refresh_queue_stats(sc);
2456 	oce_mac_addr_set(sc);
2457 
2458 	/* TX Watch Dog*/
2459 	for (i = 0; i < sc->nwqs; i++)
2460 		oce_tx_restart(sc, sc->wq[i]);
2461 
2462 	/* calculate and set the eq delay for optimal interrupt rate */
2463 	if (IS_BE(sc) || IS_SH(sc))
2464 		oce_eqd_set_periodic(sc);
2465 
2466 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2467 }
2468 
2469 static void
2470 oce_tx_compl_clean(POCE_SOFTC sc)
2471 {
2472 	struct oce_wq *wq;
2473 	int i = 0, timeo = 0, num_wqes = 0;
2474 	int pending_txqs = sc->nwqs;
2475 
2476 	/* Stop polling for compls when HW has been silent for 10ms or
2477 	 * hw_error or no outstanding completions expected
2478 	 */
2479 	do {
2480 		pending_txqs = sc->nwqs;
2481 
2482 		for_all_wq_queues(sc, wq, i) {
2483 			num_wqes = oce_wq_handler(wq);
2484 
2485 			if(num_wqes)
2486 				timeo = 0;
2487 
2488 			if(!wq->ring->num_used)
2489 				pending_txqs--;
2490 		}
2491 
2492 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2493 			break;
2494 
2495 		DELAY(1000);
2496 	} while (TRUE);
2497 
2498 	for_all_wq_queues(sc, wq, i) {
2499 		while(wq->ring->num_used) {
2500 			LOCK(&wq->tx_compl_lock);
2501 			oce_process_tx_completion(wq);
2502 			UNLOCK(&wq->tx_compl_lock);
2503 		}
2504 	}
2505 
2506 }
2507 
2508 /* NOTE : This should only be called holding
2509  *        DEVICE_LOCK.
2510  */
2511 static void
2512 oce_if_deactivate(POCE_SOFTC sc)
2513 {
2514 	int i;
2515 	struct oce_rq *rq;
2516 	struct oce_wq *wq;
2517 	struct oce_eq *eq;
2518 
2519 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2520 
2521 	oce_tx_compl_clean(sc);
2522 
2523 	/* Stop intrs and finish any bottom halves pending */
2524 	oce_hw_intr_disable(sc);
2525 
2526 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2527 	   any other lock. So unlock device lock and require after
2528 	   completing taskqueue_drain.
2529 	*/
2530 	UNLOCK(&sc->dev_lock);
2531 	for (i = 0; i < sc->intr_count; i++) {
2532 		if (sc->intrs[i].tq != NULL) {
2533 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2534 		}
2535 	}
2536 	LOCK(&sc->dev_lock);
2537 
2538 	/* Delete RX queue in card with flush param */
2539 	oce_stop_rx(sc);
2540 
2541 	/* Invalidate any pending cq and eq entries*/
2542 	for_all_evnt_queues(sc, eq, i)
2543 		oce_drain_eq(eq);
2544 	for_all_rq_queues(sc, rq, i)
2545 		oce_drain_rq_cq(rq);
2546 	for_all_wq_queues(sc, wq, i)
2547 		oce_drain_wq_cq(wq);
2548 
2549 	/* But still we need to get MCC aync events.
2550 	   So enable intrs and also arm first EQ
2551 	*/
2552 	oce_hw_intr_enable(sc);
2553 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2554 
2555 	DELAY(10);
2556 }
2557 
2558 static void
2559 oce_if_activate(POCE_SOFTC sc)
2560 {
2561 	struct oce_eq *eq;
2562 	struct oce_rq *rq;
2563 	struct oce_wq *wq;
2564 	int i, rc = 0;
2565 
2566 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2567 
2568 	oce_hw_intr_disable(sc);
2569 
2570 	oce_start_rx(sc);
2571 
2572 	for_all_rq_queues(sc, rq, i) {
2573 		rc = oce_start_rq(rq);
2574 		if (rc)
2575 			device_printf(sc->dev, "Unable to start RX\n");
2576 	}
2577 
2578 	for_all_wq_queues(sc, wq, i) {
2579 		rc = oce_start_wq(wq);
2580 		if (rc)
2581 			device_printf(sc->dev, "Unable to start TX\n");
2582 	}
2583 
2584 	for_all_evnt_queues(sc, eq, i)
2585 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2586 
2587 	oce_hw_intr_enable(sc);
2588 
2589 }
2590 
2591 static void
2592 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2593 {
2594 	/* Update Link status */
2595 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2596 	     ASYNC_EVENT_LINK_UP) {
2597 		sc->link_status = ASYNC_EVENT_LINK_UP;
2598 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2599 	} else {
2600 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2601 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2602 	}
2603 }
2604 
2605 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2606 					 struct oce_async_evt_grp5_os2bmc *evt)
2607 {
2608 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2609 	if (evt->u.s.mgmt_enable)
2610 		sc->flags |= OCE_FLAGS_OS2BMC;
2611 	else
2612 		return;
2613 
2614 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2615 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2616 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2617 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2618 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2619 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2620 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2621 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2622 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2623 }
2624 
2625 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2626 {
2627 	struct oce_async_event_grp5_pvid_state *gcqe;
2628 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2629 
2630 	switch (cqe->u0.s.async_type) {
2631 	case ASYNC_EVENT_PVID_STATE:
2632 		/* GRP5 PVID */
2633 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2634 		if (gcqe->enabled)
2635 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2636 		else
2637 			sc->pvid = 0;
2638 		break;
2639 	case ASYNC_EVENT_OS2BMC:
2640 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2641 		oce_async_grp5_osbmc_process(sc, bmccqe);
2642 		break;
2643 	default:
2644 		break;
2645 	}
2646 }
2647 
2648 /* Handle the Completion Queue for the Mailbox/Async notifications */
2649 uint16_t
2650 oce_mq_handler(void *arg)
2651 {
2652 	struct oce_mq *mq = (struct oce_mq *)arg;
2653 	POCE_SOFTC sc = mq->parent;
2654 	struct oce_cq *cq = mq->cq;
2655 	int num_cqes = 0, evt_type = 0, optype = 0;
2656 	struct oce_mq_cqe *cqe;
2657 	struct oce_async_cqe_link_state *acqe;
2658 	struct oce_async_event_qnq *dbgcqe;
2659 
2660 	bus_dmamap_sync(cq->ring->dma.tag,
2661 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2662 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2663 
2664 	while (cqe->u0.dw[3]) {
2665 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2666 		if (cqe->u0.s.async_event) {
2667 			evt_type = cqe->u0.s.event_type;
2668 			optype = cqe->u0.s.async_type;
2669 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2670 				/* Link status evt */
2671 				acqe = (struct oce_async_cqe_link_state *)cqe;
2672 				process_link_state(sc, acqe);
2673 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2674 				oce_process_grp5_events(sc, cqe);
2675 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2676 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2677 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2678 				if(dbgcqe->valid)
2679 					sc->qnqid = dbgcqe->vlan_tag;
2680 				sc->qnq_debug_event = TRUE;
2681 			}
2682 		}
2683 		cqe->u0.dw[3] = 0;
2684 		RING_GET(cq->ring, 1);
2685 		bus_dmamap_sync(cq->ring->dma.tag,
2686 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2687 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2688 		num_cqes++;
2689 	}
2690 
2691 	if (num_cqes)
2692 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2693 
2694 	return 0;
2695 }
2696 
2697 static void
2698 setup_max_queues_want(POCE_SOFTC sc)
2699 {
2700 	/* Check if it is FLEX machine. Is so dont use RSS */
2701 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2702 	    (sc->function_mode & FNM_UMC_MODE)    ||
2703 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2704 	    (!is_rss_enabled(sc))		  ||
2705 	    IS_BE2(sc)) {
2706 		sc->nrqs = 1;
2707 		sc->nwqs = 1;
2708 	} else {
2709 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2710 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2711 	}
2712 
2713 	if (IS_BE2(sc) && is_rss_enabled(sc))
2714 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2715 }
2716 
2717 static void
2718 update_queues_got(POCE_SOFTC sc)
2719 {
2720 	if (is_rss_enabled(sc)) {
2721 		sc->nrqs = sc->intr_count + 1;
2722 		sc->nwqs = sc->intr_count;
2723 	} else {
2724 		sc->nrqs = 1;
2725 		sc->nwqs = 1;
2726 	}
2727 
2728 	if (IS_BE2(sc))
2729 		sc->nwqs = 1;
2730 }
2731 
2732 static int
2733 oce_check_ipv6_ext_hdr(struct mbuf *m)
2734 {
2735 	struct ether_header *eh = mtod(m, struct ether_header *);
2736 	caddr_t m_datatemp = m->m_data;
2737 
2738 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2739 		m->m_data += sizeof(struct ether_header);
2740 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2741 
2742 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2743 				(ip6->ip6_nxt != IPPROTO_UDP)){
2744 			struct ip6_ext *ip6e = NULL;
2745 			m->m_data += sizeof(struct ip6_hdr);
2746 
2747 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2748 			if(ip6e->ip6e_len == 0xff) {
2749 				m->m_data = m_datatemp;
2750 				return TRUE;
2751 			}
2752 		}
2753 		m->m_data = m_datatemp;
2754 	}
2755 	return FALSE;
2756 }
2757 
2758 static int
2759 is_be3_a1(POCE_SOFTC sc)
2760 {
2761 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2762 		return TRUE;
2763 	}
2764 	return FALSE;
2765 }
2766 
2767 static struct mbuf *
2768 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2769 {
2770 	uint16_t vlan_tag = 0;
2771 
2772 	if(!M_WRITABLE(m))
2773 		return NULL;
2774 
2775 	/* Embed vlan tag in the packet if it is not part of it */
2776 	if(m->m_flags & M_VLANTAG) {
2777 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2778 		m->m_flags &= ~M_VLANTAG;
2779 	}
2780 
2781 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2782 	if(sc->pvid) {
2783 		if(!vlan_tag)
2784 			vlan_tag = sc->pvid;
2785 		if (complete)
2786 			*complete = FALSE;
2787 	}
2788 
2789 	if(vlan_tag) {
2790 		m = ether_vlanencap(m, vlan_tag);
2791 	}
2792 
2793 	if(sc->qnqid) {
2794 		m = ether_vlanencap(m, sc->qnqid);
2795 
2796 		if (complete)
2797 			*complete = FALSE;
2798 	}
2799 	return m;
2800 }
2801 
2802 static int
2803 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2804 {
2805 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2806 			oce_check_ipv6_ext_hdr(m)) {
2807 		return TRUE;
2808 	}
2809 	return FALSE;
2810 }
2811 
2812 static void
2813 oce_get_config(POCE_SOFTC sc)
2814 {
2815 	int rc = 0;
2816 	uint32_t max_rss = 0;
2817 
2818 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2819 		max_rss = OCE_LEGACY_MODE_RSS;
2820 	else
2821 		max_rss = OCE_MAX_RSS;
2822 
2823 	if (!IS_BE(sc)) {
2824 		rc = oce_get_profile_config(sc, max_rss);
2825 		if (rc) {
2826 			sc->nwqs = OCE_MAX_WQ;
2827 			sc->nrssqs = max_rss;
2828 			sc->nrqs = sc->nrssqs + 1;
2829 		}
2830 	}
2831 	else { /* For BE3 don't rely on fw for determining the resources */
2832 		sc->nrssqs = max_rss;
2833 		sc->nrqs = sc->nrssqs + 1;
2834 		sc->nwqs = OCE_MAX_WQ;
2835 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2836 	}
2837 }
2838 
2839 static void
2840 oce_rdma_close(void)
2841 {
2842   if (oce_rdma_if != NULL) {
2843     oce_rdma_if = NULL;
2844   }
2845 }
2846 
2847 static void
2848 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2849 {
2850   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2851 }
2852 
2853 int
2854 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2855 {
2856   POCE_SOFTC sc;
2857   struct oce_dev_info di;
2858   int i;
2859 
2860   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2861     return -EINVAL;
2862   }
2863 
2864   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2865       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2866     return -ENXIO;
2867   }
2868 
2869   rdma_info->close = oce_rdma_close;
2870   rdma_info->mbox_post = oce_mbox_post;
2871   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2872   rdma_info->get_mac_addr = oce_get_mac_addr;
2873 
2874   oce_rdma_if = rdma_if;
2875 
2876   sc = softc_head;
2877   while (sc != NULL) {
2878     if (oce_rdma_if->announce != NULL) {
2879       memset(&di, 0, sizeof(di));
2880       di.dev = sc->dev;
2881       di.softc = sc;
2882       di.ifp = sc->ifp;
2883       di.db_bhandle = sc->db_bhandle;
2884       di.db_btag = sc->db_btag;
2885       di.db_page_size = 4096;
2886       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2887         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2888       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2889         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2890       } else {
2891         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2892       }
2893       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2894       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2895         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2896         di.msix.start_vector = sc->intr_count;
2897         for (i=0; i<di.msix.num_vectors; i++) {
2898           di.msix.vector_list[i] = sc->intrs[i].vector;
2899         }
2900       } else {
2901       }
2902       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2903       di.vendor_id = pci_get_vendor(sc->dev);
2904       di.dev_id = pci_get_device(sc->dev);
2905 
2906       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2907           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2908       }
2909 
2910       rdma_if->announce(&di);
2911       sc = sc->next;
2912     }
2913   }
2914 
2915   return 0;
2916 }
2917 
2918 static void
2919 oce_read_env_variables( POCE_SOFTC sc )
2920 {
2921 	char *value = NULL;
2922 	int rc = 0;
2923 
2924         /* read if user wants to enable hwlro or swlro */
2925         //value = getenv("oce_enable_hwlro");
2926         if(value && IS_SH(sc)) {
2927                 sc->enable_hwlro = strtol(value, NULL, 10);
2928                 if(sc->enable_hwlro) {
2929                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2930                         if(rc) {
2931                                 device_printf(sc->dev, "no hardware lro support\n");
2932                 		device_printf(sc->dev, "software lro enabled\n");
2933                                 sc->enable_hwlro = 0;
2934                         }else {
2935                                 device_printf(sc->dev, "hardware lro enabled\n");
2936 				oce_max_rsp_handled = 32;
2937                         }
2938                 }else {
2939                         device_printf(sc->dev, "software lro enabled\n");
2940                 }
2941         }else {
2942                 sc->enable_hwlro = 0;
2943         }
2944 
2945         /* read mbuf size */
2946         //value = getenv("oce_rq_buf_size");
2947         if(value && IS_SH(sc)) {
2948                 oce_rq_buf_size = strtol(value, NULL, 10);
2949                 switch(oce_rq_buf_size) {
2950                 case 2048:
2951                 case 4096:
2952                 case 9216:
2953                 case 16384:
2954                         break;
2955 
2956                 default:
2957                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
2958                         oce_rq_buf_size = 2048;
2959                 }
2960         }
2961 
2962 	return;
2963 }
2964