xref: /freebsd/sys/dev/oce/oce_if.c (revision ae7e8a02e6e93455e026036132c4d053b2c12ad9)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 /* Driver entry points prototypes */
136 static int  oce_probe(device_t dev);
137 static int  oce_attach(device_t dev);
138 static int  oce_detach(device_t dev);
139 static int  oce_shutdown(device_t dev);
140 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
141 static void oce_init(void *xsc);
142 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
143 static void oce_multiq_flush(struct ifnet *ifp);
144 
145 /* Driver interrupt routines protypes */
146 static void oce_intr(void *arg, int pending);
147 static int  oce_setup_intr(POCE_SOFTC sc);
148 static int  oce_fast_isr(void *arg);
149 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
150 			  void (*isr) (void *arg, int pending));
151 
152 /* Media callbacks prototypes */
153 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
154 static int  oce_media_change(struct ifnet *ifp);
155 
156 /* Transmit routines prototypes */
157 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
158 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
159 static void oce_process_tx_completion(struct oce_wq *wq);
160 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
161 				 struct oce_wq *wq);
162 
163 /* Receive routines prototypes */
164 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
167 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
168 static uint16_t oce_rq_handler_lro(void *arg);
169 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
172 
173 /* Helper function prototypes in this file */
174 static int  oce_attach_ifp(POCE_SOFTC sc);
175 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
176 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static int  oce_vid_config(POCE_SOFTC sc);
178 static void oce_mac_addr_set(POCE_SOFTC sc);
179 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
180 static void oce_local_timer(void *arg);
181 static void oce_if_deactivate(POCE_SOFTC sc);
182 static void oce_if_activate(POCE_SOFTC sc);
183 static void setup_max_queues_want(POCE_SOFTC sc);
184 static void update_queues_got(POCE_SOFTC sc);
185 static void process_link_state(POCE_SOFTC sc,
186 		 struct oce_async_cqe_link_state *acqe);
187 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
188 static void oce_get_config(POCE_SOFTC sc);
189 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
190 static void oce_read_env_variables(POCE_SOFTC sc);
191 
192 /* IP specific */
193 #if defined(INET6) || defined(INET)
194 static int  oce_init_lro(POCE_SOFTC sc);
195 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
196 #endif
197 
198 static device_method_t oce_dispatch[] = {
199 	DEVMETHOD(device_probe, oce_probe),
200 	DEVMETHOD(device_attach, oce_attach),
201 	DEVMETHOD(device_detach, oce_detach),
202 	DEVMETHOD(device_shutdown, oce_shutdown),
203 
204 	DEVMETHOD_END
205 };
206 
207 static driver_t oce_driver = {
208 	"oce",
209 	oce_dispatch,
210 	sizeof(OCE_SOFTC)
211 };
212 static devclass_t oce_devclass;
213 
214 /* global vars */
215 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
216 
217 /* Module capabilites and parameters */
218 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
219 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
220 uint32_t oce_rq_buf_size = 2048;
221 
222 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
223 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
224 
225 /* Supported devices table */
226 static uint32_t supportedDevices[] =  {
227 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
228 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
229 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
230 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
231 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
232 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
233 };
234 
235 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
236 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
237     nitems(supportedDevices));
238 MODULE_DEPEND(oce, pci, 1, 1, 1);
239 MODULE_DEPEND(oce, ether, 1, 1, 1);
240 MODULE_VERSION(oce, 1);
241 
242 POCE_SOFTC softc_head = NULL;
243 POCE_SOFTC softc_tail = NULL;
244 
245 struct oce_rdma_if *oce_rdma_if = NULL;
246 
247 /*****************************************************************************
248  *			Driver entry points functions                        *
249  *****************************************************************************/
250 
251 static int
252 oce_probe(device_t dev)
253 {
254 	uint16_t vendor = 0;
255 	uint16_t device = 0;
256 	int i = 0;
257 	char str[256] = {0};
258 	POCE_SOFTC sc;
259 
260 	sc = device_get_softc(dev);
261 	bzero(sc, sizeof(OCE_SOFTC));
262 	sc->dev = dev;
263 
264 	vendor = pci_get_vendor(dev);
265 	device = pci_get_device(dev);
266 
267 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
268 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
269 			if (device == (supportedDevices[i] & 0xffff)) {
270 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
271 					component_revision);
272 				device_set_desc_copy(dev, str);
273 
274 				switch (device) {
275 				case PCI_PRODUCT_BE2:
276 					sc->flags |= OCE_FLAGS_BE2;
277 					break;
278 				case PCI_PRODUCT_BE3:
279 					sc->flags |= OCE_FLAGS_BE3;
280 					break;
281 				case PCI_PRODUCT_XE201:
282 				case PCI_PRODUCT_XE201_VF:
283 					sc->flags |= OCE_FLAGS_XE201;
284 					break;
285 				case PCI_PRODUCT_SH:
286 					sc->flags |= OCE_FLAGS_SH;
287 					break;
288 				default:
289 					return ENXIO;
290 				}
291 				return BUS_PROBE_DEFAULT;
292 			}
293 		}
294 	}
295 
296 	return ENXIO;
297 }
298 
299 static int
300 oce_attach(device_t dev)
301 {
302 	POCE_SOFTC sc;
303 	int rc = 0;
304 
305 	sc = device_get_softc(dev);
306 
307 	rc = oce_hw_pci_alloc(sc);
308 	if (rc)
309 		return rc;
310 
311 	sc->tx_ring_size = OCE_TX_RING_SIZE;
312 	sc->rx_ring_size = OCE_RX_RING_SIZE;
313 	/* receive fragment size should be multiple of 2K */
314 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
315 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
316 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
317 
318 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
319 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
320 
321 	/* initialise the hardware */
322 	rc = oce_hw_init(sc);
323 	if (rc)
324 		goto pci_res_free;
325 
326 	oce_read_env_variables(sc);
327 
328 	oce_get_config(sc);
329 
330 	setup_max_queues_want(sc);
331 
332 	rc = oce_setup_intr(sc);
333 	if (rc)
334 		goto mbox_free;
335 
336 	rc = oce_queue_init_all(sc);
337 	if (rc)
338 		goto intr_free;
339 
340 	rc = oce_attach_ifp(sc);
341 	if (rc)
342 		goto queues_free;
343 
344 #if defined(INET6) || defined(INET)
345 	rc = oce_init_lro(sc);
346 	if (rc)
347 		goto ifp_free;
348 #endif
349 
350 	rc = oce_hw_start(sc);
351 	if (rc)
352 		goto lro_free;
353 
354 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
355 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
356 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
357 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
358 
359 	rc = oce_stats_init(sc);
360 	if (rc)
361 		goto vlan_free;
362 
363 	oce_add_sysctls(sc);
364 
365 	callout_init(&sc->timer, CALLOUT_MPSAFE);
366 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
367 	if (rc)
368 		goto stats_free;
369 
370 	sc->next =NULL;
371 	if (softc_tail != NULL) {
372 	  softc_tail->next = sc;
373 	} else {
374 	  softc_head = sc;
375 	}
376 	softc_tail = sc;
377 
378 	return 0;
379 
380 stats_free:
381 	callout_drain(&sc->timer);
382 	oce_stats_free(sc);
383 vlan_free:
384 	if (sc->vlan_attach)
385 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
386 	if (sc->vlan_detach)
387 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
388 	oce_hw_intr_disable(sc);
389 lro_free:
390 #if defined(INET6) || defined(INET)
391 	oce_free_lro(sc);
392 ifp_free:
393 #endif
394 	ether_ifdetach(sc->ifp);
395 	if_free(sc->ifp);
396 queues_free:
397 	oce_queue_release_all(sc);
398 intr_free:
399 	oce_intr_free(sc);
400 mbox_free:
401 	oce_dma_free(sc, &sc->bsmbx);
402 pci_res_free:
403 	oce_hw_pci_free(sc);
404 	LOCK_DESTROY(&sc->dev_lock);
405 	LOCK_DESTROY(&sc->bmbx_lock);
406 	return rc;
407 
408 }
409 
410 static int
411 oce_detach(device_t dev)
412 {
413 	POCE_SOFTC sc = device_get_softc(dev);
414 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
415 
416         poce_sc_tmp = softc_head;
417         ppoce_sc_tmp1 = &softc_head;
418         while (poce_sc_tmp != NULL) {
419           if (poce_sc_tmp == sc) {
420             *ppoce_sc_tmp1 = sc->next;
421             if (sc->next == NULL) {
422               softc_tail = poce_sc_tmp2;
423             }
424             break;
425           }
426           poce_sc_tmp2 = poce_sc_tmp;
427           ppoce_sc_tmp1 = &poce_sc_tmp->next;
428           poce_sc_tmp = poce_sc_tmp->next;
429         }
430 
431 	LOCK(&sc->dev_lock);
432 	oce_if_deactivate(sc);
433 	UNLOCK(&sc->dev_lock);
434 
435 	callout_drain(&sc->timer);
436 
437 	if (sc->vlan_attach != NULL)
438 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
439 	if (sc->vlan_detach != NULL)
440 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
441 
442 	ether_ifdetach(sc->ifp);
443 
444 	if_free(sc->ifp);
445 
446 	oce_hw_shutdown(sc);
447 
448 	bus_generic_detach(dev);
449 
450 	return 0;
451 }
452 
453 static int
454 oce_shutdown(device_t dev)
455 {
456 	int rc;
457 
458 	rc = oce_detach(dev);
459 
460 	return rc;
461 }
462 
463 static int
464 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
465 {
466 	struct ifreq *ifr = (struct ifreq *)data;
467 	POCE_SOFTC sc = ifp->if_softc;
468 	struct ifi2creq i2c;
469 	uint8_t	offset = 0;
470 	int rc = 0;
471 	uint32_t u;
472 
473 	switch (command) {
474 	case SIOCGIFMEDIA:
475 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
476 		break;
477 
478 	case SIOCSIFMTU:
479 		if (ifr->ifr_mtu > OCE_MAX_MTU)
480 			rc = EINVAL;
481 		else
482 			ifp->if_mtu = ifr->ifr_mtu;
483 		break;
484 
485 	case SIOCSIFFLAGS:
486 		if (ifp->if_flags & IFF_UP) {
487 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
488 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
489 				oce_init(sc);
490 			}
491 			device_printf(sc->dev, "Interface Up\n");
492 		} else {
493 			LOCK(&sc->dev_lock);
494 
495 			sc->ifp->if_drv_flags &=
496 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
497 			oce_if_deactivate(sc);
498 
499 			UNLOCK(&sc->dev_lock);
500 
501 			device_printf(sc->dev, "Interface Down\n");
502 		}
503 
504 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
505 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
506 				sc->promisc = TRUE;
507 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
508 			if (!oce_rxf_set_promiscuous(sc, 0))
509 				sc->promisc = FALSE;
510 		}
511 
512 		break;
513 
514 	case SIOCADDMULTI:
515 	case SIOCDELMULTI:
516 		rc = oce_hw_update_multicast(sc);
517 		if (rc)
518 			device_printf(sc->dev,
519 				"Update multicast address failed\n");
520 		break;
521 
522 	case SIOCSIFCAP:
523 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
524 
525 		if (u & IFCAP_TXCSUM) {
526 			ifp->if_capenable ^= IFCAP_TXCSUM;
527 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
528 
529 			if (IFCAP_TSO & ifp->if_capenable &&
530 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
531 				u &= ~IFCAP_TSO;
532 				ifp->if_capenable &= ~IFCAP_TSO;
533 				ifp->if_hwassist &= ~CSUM_TSO;
534 				if_printf(ifp,
535 					 "TSO disabled due to -txcsum.\n");
536 			}
537 		}
538 
539 		if (u & IFCAP_RXCSUM)
540 			ifp->if_capenable ^= IFCAP_RXCSUM;
541 
542 		if (u & IFCAP_TSO4) {
543 			ifp->if_capenable ^= IFCAP_TSO4;
544 
545 			if (IFCAP_TSO & ifp->if_capenable) {
546 				if (IFCAP_TXCSUM & ifp->if_capenable)
547 					ifp->if_hwassist |= CSUM_TSO;
548 				else {
549 					ifp->if_capenable &= ~IFCAP_TSO;
550 					ifp->if_hwassist &= ~CSUM_TSO;
551 					if_printf(ifp,
552 					    "Enable txcsum first.\n");
553 					rc = EAGAIN;
554 				}
555 			} else
556 				ifp->if_hwassist &= ~CSUM_TSO;
557 		}
558 
559 		if (u & IFCAP_VLAN_HWTAGGING)
560 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
561 
562 		if (u & IFCAP_VLAN_HWFILTER) {
563 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
564 			oce_vid_config(sc);
565 		}
566 #if defined(INET6) || defined(INET)
567 		if (u & IFCAP_LRO) {
568 			ifp->if_capenable ^= IFCAP_LRO;
569 			if(sc->enable_hwlro) {
570 				if(ifp->if_capenable & IFCAP_LRO) {
571 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
572 				}else {
573 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
574 				}
575 			}
576 		}
577 #endif
578 
579 		break;
580 
581 	case SIOCGI2C:
582 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
583 		if (rc)
584 			break;
585 
586 		if (i2c.dev_addr == PAGE_NUM_A0) {
587 			offset = i2c.offset;
588 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
589 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
590 		} else {
591 			rc = EINVAL;
592 			break;
593 		}
594 
595 		if (i2c.len > sizeof(i2c.data) ||
596 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
597 			rc = EINVAL;
598 			break;
599 		}
600 
601 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
602 		if (rc) {
603 			rc = -rc;
604 			break;
605 		}
606 
607 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
608 
609 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
610 		break;
611 
612 	case SIOCGPRIVATE_0:
613 		rc = priv_check(curthread, PRIV_DRIVER);
614 		if (rc != 0)
615 			break;
616 		rc = oce_handle_passthrough(ifp, data);
617 		break;
618 	default:
619 		rc = ether_ioctl(ifp, command, data);
620 		break;
621 	}
622 
623 	return rc;
624 }
625 
626 static void
627 oce_init(void *arg)
628 {
629 	POCE_SOFTC sc = arg;
630 
631 	LOCK(&sc->dev_lock);
632 
633 	if (sc->ifp->if_flags & IFF_UP) {
634 		oce_if_deactivate(sc);
635 		oce_if_activate(sc);
636 	}
637 
638 	UNLOCK(&sc->dev_lock);
639 
640 }
641 
642 static int
643 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
644 {
645 	POCE_SOFTC sc = ifp->if_softc;
646 	struct oce_wq *wq = NULL;
647 	int queue_index = 0;
648 	int status = 0;
649 
650 	if (!sc->link_status)
651 		return ENXIO;
652 
653 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
654 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
655 
656 	wq = sc->wq[queue_index];
657 
658 	LOCK(&wq->tx_lock);
659 	status = oce_multiq_transmit(ifp, m, wq);
660 	UNLOCK(&wq->tx_lock);
661 
662 	return status;
663 
664 }
665 
666 static void
667 oce_multiq_flush(struct ifnet *ifp)
668 {
669 	POCE_SOFTC sc = ifp->if_softc;
670 	struct mbuf     *m;
671 	int i = 0;
672 
673 	for (i = 0; i < sc->nwqs; i++) {
674 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
675 			m_freem(m);
676 	}
677 	if_qflush(ifp);
678 }
679 
680 /*****************************************************************************
681  *                   Driver interrupt routines functions                     *
682  *****************************************************************************/
683 
684 static void
685 oce_intr(void *arg, int pending)
686 {
687 
688 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
689 	POCE_SOFTC sc = ii->sc;
690 	struct oce_eq *eq = ii->eq;
691 	struct oce_eqe *eqe;
692 	struct oce_cq *cq = NULL;
693 	int i, num_eqes = 0;
694 
695 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
696 				 BUS_DMASYNC_POSTWRITE);
697 	do {
698 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
699 		if (eqe->evnt == 0)
700 			break;
701 		eqe->evnt = 0;
702 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
703 					BUS_DMASYNC_POSTWRITE);
704 		RING_GET(eq->ring, 1);
705 		num_eqes++;
706 
707 	} while (TRUE);
708 
709 	if (!num_eqes)
710 		goto eq_arm; /* Spurious */
711 
712  	/* Clear EQ entries, but dont arm */
713 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
714 
715 	/* Process TX, RX and MCC. But dont arm CQ*/
716 	for (i = 0; i < eq->cq_valid; i++) {
717 		cq = eq->cq[i];
718 		(*cq->cq_handler)(cq->cb_arg);
719 	}
720 
721 	/* Arm all cqs connected to this EQ */
722 	for (i = 0; i < eq->cq_valid; i++) {
723 		cq = eq->cq[i];
724 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
725 	}
726 
727 eq_arm:
728 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
729 
730 	return;
731 }
732 
733 static int
734 oce_setup_intr(POCE_SOFTC sc)
735 {
736 	int rc = 0, use_intx = 0;
737 	int vector = 0, req_vectors = 0;
738 	int tot_req_vectors, tot_vectors;
739 
740 	if (is_rss_enabled(sc))
741 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
742 	else
743 		req_vectors = 1;
744 
745 	tot_req_vectors = req_vectors;
746 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
747 	  if (req_vectors > 1) {
748 	    tot_req_vectors += OCE_RDMA_VECTORS;
749 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
750 	  }
751 	}
752 
753         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
754 		sc->intr_count = req_vectors;
755                 tot_vectors = tot_req_vectors;
756 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
757 		if (rc != 0) {
758 			use_intx = 1;
759 			pci_release_msi(sc->dev);
760 		} else {
761 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
762 		    if (tot_vectors < tot_req_vectors) {
763 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
764 			sc->roce_intr_count = (tot_vectors / 2);
765 		      }
766 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
767 		    }
768 		  } else {
769 		    sc->intr_count = tot_vectors;
770 		  }
771     		  sc->flags |= OCE_FLAGS_USING_MSIX;
772 		}
773 	} else
774 		use_intx = 1;
775 
776 	if (use_intx)
777 		sc->intr_count = 1;
778 
779 	/* Scale number of queues based on intr we got */
780 	update_queues_got(sc);
781 
782 	if (use_intx) {
783 		device_printf(sc->dev, "Using legacy interrupt\n");
784 		rc = oce_alloc_intr(sc, vector, oce_intr);
785 		if (rc)
786 			goto error;
787 	} else {
788 		for (; vector < sc->intr_count; vector++) {
789 			rc = oce_alloc_intr(sc, vector, oce_intr);
790 			if (rc)
791 				goto error;
792 		}
793 	}
794 
795 	return 0;
796 error:
797 	oce_intr_free(sc);
798 	return rc;
799 }
800 
801 static int
802 oce_fast_isr(void *arg)
803 {
804 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
805 	POCE_SOFTC sc = ii->sc;
806 
807 	if (ii->eq == NULL)
808 		return FILTER_STRAY;
809 
810 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
811 
812 	taskqueue_enqueue(ii->tq, &ii->task);
813 
814  	ii->eq->intr++;
815 
816 	return FILTER_HANDLED;
817 }
818 
819 static int
820 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
821 {
822 	POCE_INTR_INFO ii;
823 	int rc = 0, rr;
824 
825 	if (vector >= OCE_MAX_EQ)
826 		return (EINVAL);
827 
828 	ii = &sc->intrs[vector];
829 
830 	/* Set the resource id for the interrupt.
831 	 * MSIx is vector + 1 for the resource id,
832 	 * INTx is 0 for the resource id.
833 	 */
834 	if (sc->flags & OCE_FLAGS_USING_MSIX)
835 		rr = vector + 1;
836 	else
837 		rr = 0;
838 	ii->intr_res = bus_alloc_resource_any(sc->dev,
839 					      SYS_RES_IRQ,
840 					      &rr, RF_ACTIVE|RF_SHAREABLE);
841 	ii->irq_rr = rr;
842 	if (ii->intr_res == NULL) {
843 		device_printf(sc->dev,
844 			  "Could not allocate interrupt\n");
845 		rc = ENXIO;
846 		return rc;
847 	}
848 
849 	TASK_INIT(&ii->task, 0, isr, ii);
850 	ii->vector = vector;
851 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
852 	ii->tq = taskqueue_create_fast(ii->task_name,
853 			M_NOWAIT,
854 			taskqueue_thread_enqueue,
855 			&ii->tq);
856 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
857 			device_get_nameunit(sc->dev));
858 
859 	ii->sc = sc;
860 	rc = bus_setup_intr(sc->dev,
861 			ii->intr_res,
862 			INTR_TYPE_NET,
863 			oce_fast_isr, NULL, ii, &ii->tag);
864 	return rc;
865 
866 }
867 
868 void
869 oce_intr_free(POCE_SOFTC sc)
870 {
871 	int i = 0;
872 
873 	for (i = 0; i < sc->intr_count; i++) {
874 
875 		if (sc->intrs[i].tag != NULL)
876 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
877 						sc->intrs[i].tag);
878 		if (sc->intrs[i].tq != NULL)
879 			taskqueue_free(sc->intrs[i].tq);
880 
881 		if (sc->intrs[i].intr_res != NULL)
882 			bus_release_resource(sc->dev, SYS_RES_IRQ,
883 						sc->intrs[i].irq_rr,
884 						sc->intrs[i].intr_res);
885 		sc->intrs[i].tag = NULL;
886 		sc->intrs[i].intr_res = NULL;
887 	}
888 
889 	if (sc->flags & OCE_FLAGS_USING_MSIX)
890 		pci_release_msi(sc->dev);
891 
892 }
893 
894 /******************************************************************************
895 *			  Media callbacks functions 			      *
896 ******************************************************************************/
897 
898 static void
899 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
900 {
901 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
902 
903 	req->ifm_status = IFM_AVALID;
904 	req->ifm_active = IFM_ETHER;
905 
906 	if (sc->link_status == 1)
907 		req->ifm_status |= IFM_ACTIVE;
908 	else
909 		return;
910 
911 	switch (sc->link_speed) {
912 	case 1: /* 10 Mbps */
913 		req->ifm_active |= IFM_10_T | IFM_FDX;
914 		sc->speed = 10;
915 		break;
916 	case 2: /* 100 Mbps */
917 		req->ifm_active |= IFM_100_TX | IFM_FDX;
918 		sc->speed = 100;
919 		break;
920 	case 3: /* 1 Gbps */
921 		req->ifm_active |= IFM_1000_T | IFM_FDX;
922 		sc->speed = 1000;
923 		break;
924 	case 4: /* 10 Gbps */
925 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
926 		sc->speed = 10000;
927 		break;
928 	case 5: /* 20 Gbps */
929 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
930 		sc->speed = 20000;
931 		break;
932 	case 6: /* 25 Gbps */
933 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
934 		sc->speed = 25000;
935 		break;
936 	case 7: /* 40 Gbps */
937 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
938 		sc->speed = 40000;
939 		break;
940 	default:
941 		sc->speed = 0;
942 		break;
943 	}
944 
945 	return;
946 }
947 
948 int
949 oce_media_change(struct ifnet *ifp)
950 {
951 	return 0;
952 }
953 
954 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
955 				struct mbuf *m, boolean_t *os2bmc,
956 				struct mbuf **m_new)
957 {
958 	struct ether_header *eh = NULL;
959 
960 	eh = mtod(m, struct ether_header *);
961 
962 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
963 		*os2bmc = FALSE;
964 		goto done;
965 	}
966 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
967 		goto done;
968 
969 	if (is_mc_allowed_on_bmc(sc, eh) ||
970 	    is_bc_allowed_on_bmc(sc, eh) ||
971 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
972 		*os2bmc = TRUE;
973 		goto done;
974 	}
975 
976 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
977 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
978 		uint8_t nexthdr = ip6->ip6_nxt;
979 		if (nexthdr == IPPROTO_ICMPV6) {
980 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
981 			switch (icmp6->icmp6_type) {
982 			case ND_ROUTER_ADVERT:
983 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
984 				goto done;
985 			case ND_NEIGHBOR_ADVERT:
986 				*os2bmc = is_ipv6_na_filt_enabled(sc);
987 				goto done;
988 			default:
989 				break;
990 			}
991 		}
992 	}
993 
994 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
995 		struct ip *ip = mtod(m, struct ip *);
996 		int iphlen = ip->ip_hl << 2;
997 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
998 		switch (uh->uh_dport) {
999 		case DHCP_CLIENT_PORT:
1000 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1001 			goto done;
1002 		case DHCP_SERVER_PORT:
1003 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1004 			goto done;
1005 		case NET_BIOS_PORT1:
1006 		case NET_BIOS_PORT2:
1007 			*os2bmc = is_nbios_filt_enabled(sc);
1008 			goto done;
1009 		case DHCPV6_RAS_PORT:
1010 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1011 			goto done;
1012 		default:
1013 			break;
1014 		}
1015 	}
1016 done:
1017 	if (*os2bmc) {
1018 		*m_new = m_dup(m, M_NOWAIT);
1019 		if (!*m_new) {
1020 			*os2bmc = FALSE;
1021 			return;
1022 		}
1023 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1024 	}
1025 }
1026 
1027 /*****************************************************************************
1028  *			  Transmit routines functions			     *
1029  *****************************************************************************/
1030 
1031 static int
1032 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1033 {
1034 	int rc = 0, i, retry_cnt = 0;
1035 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1036 	struct mbuf *m, *m_temp, *m_new = NULL;
1037 	struct oce_wq *wq = sc->wq[wq_index];
1038 	struct oce_packet_desc *pd;
1039 	struct oce_nic_hdr_wqe *nichdr;
1040 	struct oce_nic_frag_wqe *nicfrag;
1041 	struct ether_header *eh = NULL;
1042 	int num_wqes;
1043 	uint32_t reg_value;
1044 	boolean_t complete = TRUE;
1045 	boolean_t os2bmc = FALSE;
1046 
1047 	m = *mpp;
1048 	if (!m)
1049 		return EINVAL;
1050 
1051 	if (!(m->m_flags & M_PKTHDR)) {
1052 		rc = ENXIO;
1053 		goto free_ret;
1054 	}
1055 
1056 	/* Don't allow non-TSO packets longer than MTU */
1057 	if (!is_tso_pkt(m)) {
1058 		eh = mtod(m, struct ether_header *);
1059 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1060 			 goto free_ret;
1061 	}
1062 
1063 	if(oce_tx_asic_stall_verify(sc, m)) {
1064 		m = oce_insert_vlan_tag(sc, m, &complete);
1065 		if(!m) {
1066 			device_printf(sc->dev, "Insertion unsuccessful\n");
1067 			return 0;
1068 		}
1069 	}
1070 
1071 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1072 	 * may cause a transmit stall on that port. So the work-around is to
1073 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1074 	*/
1075 	if(IS_SH(sc) || IS_XE201(sc) ) {
1076 		if(m->m_pkthdr.len <= 32) {
1077 			char buf[36];
1078 			bzero((void *)buf, 36);
1079 			m_append(m, (36 - m->m_pkthdr.len), buf);
1080 		}
1081 	}
1082 
1083 tx_start:
1084 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1085 		/* consolidate packet buffers for TSO/LSO segment offload */
1086 #if defined(INET6) || defined(INET)
1087 		m = oce_tso_setup(sc, mpp);
1088 #else
1089 		m = NULL;
1090 #endif
1091 		if (m == NULL) {
1092 			rc = ENXIO;
1093 			goto free_ret;
1094 		}
1095 	}
1096 
1097 	pd = &wq->pckts[wq->pkt_desc_head];
1098 
1099 retry:
1100 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1101 				     pd->map,
1102 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1103 	if (rc == 0) {
1104 		num_wqes = pd->nsegs + 1;
1105 		if (IS_BE(sc) || IS_SH(sc)) {
1106 			/*Dummy required only for BE3.*/
1107 			if (num_wqes & 1)
1108 				num_wqes++;
1109 		}
1110 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1111 			bus_dmamap_unload(wq->tag, pd->map);
1112 			return EBUSY;
1113 		}
1114 		atomic_store_rel_int(&wq->pkt_desc_head,
1115 				     (wq->pkt_desc_head + 1) % \
1116 				      OCE_WQ_PACKET_ARRAY_SIZE);
1117 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1118 		pd->mbuf = m;
1119 
1120 		nichdr =
1121 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1122 		nichdr->u0.dw[0] = 0;
1123 		nichdr->u0.dw[1] = 0;
1124 		nichdr->u0.dw[2] = 0;
1125 		nichdr->u0.dw[3] = 0;
1126 
1127 		nichdr->u0.s.complete = complete;
1128 		nichdr->u0.s.mgmt = os2bmc;
1129 		nichdr->u0.s.event = 1;
1130 		nichdr->u0.s.crc = 1;
1131 		nichdr->u0.s.forward = 0;
1132 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1133 		nichdr->u0.s.udpcs =
1134 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1135 		nichdr->u0.s.tcpcs =
1136 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1137 		nichdr->u0.s.num_wqe = num_wqes;
1138 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1139 
1140 		if (m->m_flags & M_VLANTAG) {
1141 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1142 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1143 		}
1144 
1145 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1146 			if (m->m_pkthdr.tso_segsz) {
1147 				nichdr->u0.s.lso = 1;
1148 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1149 			}
1150 			if (!IS_BE(sc) || !IS_SH(sc))
1151 				nichdr->u0.s.ipcs = 1;
1152 		}
1153 
1154 		RING_PUT(wq->ring, 1);
1155 		atomic_add_int(&wq->ring->num_used, 1);
1156 
1157 		for (i = 0; i < pd->nsegs; i++) {
1158 			nicfrag =
1159 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1160 						      struct oce_nic_frag_wqe);
1161 			nicfrag->u0.s.rsvd0 = 0;
1162 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1163 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1164 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1165 			pd->wqe_idx = wq->ring->pidx;
1166 			RING_PUT(wq->ring, 1);
1167 			atomic_add_int(&wq->ring->num_used, 1);
1168 		}
1169 		if (num_wqes > (pd->nsegs + 1)) {
1170 			nicfrag =
1171 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1172 						      struct oce_nic_frag_wqe);
1173 			nicfrag->u0.dw[0] = 0;
1174 			nicfrag->u0.dw[1] = 0;
1175 			nicfrag->u0.dw[2] = 0;
1176 			nicfrag->u0.dw[3] = 0;
1177 			pd->wqe_idx = wq->ring->pidx;
1178 			RING_PUT(wq->ring, 1);
1179 			atomic_add_int(&wq->ring->num_used, 1);
1180 			pd->nsegs++;
1181 		}
1182 
1183 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1184 		wq->tx_stats.tx_reqs++;
1185 		wq->tx_stats.tx_wrbs += num_wqes;
1186 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1187 		wq->tx_stats.tx_pkts++;
1188 
1189 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1190 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1191 		reg_value = (num_wqes << 16) | wq->wq_id;
1192 
1193 		/* if os2bmc is not enabled or if the pkt is already tagged as
1194 		   bmc, do nothing
1195 		 */
1196 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1197 
1198 		if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1199 		if (m->m_flags & M_MCAST)
1200 			if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
1201 		ETHER_BPF_MTAP(sc->ifp, m);
1202 
1203 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1204 
1205 	} else if (rc == EFBIG)	{
1206 		if (retry_cnt == 0) {
1207 			m_temp = m_defrag(m, M_NOWAIT);
1208 			if (m_temp == NULL)
1209 				goto free_ret;
1210 			m = m_temp;
1211 			*mpp = m_temp;
1212 			retry_cnt = retry_cnt + 1;
1213 			goto retry;
1214 		} else
1215 			goto free_ret;
1216 	} else if (rc == ENOMEM)
1217 		return rc;
1218 	else
1219 		goto free_ret;
1220 
1221 	if (os2bmc) {
1222 		m = m_new;
1223 		goto tx_start;
1224 	}
1225 
1226 	return 0;
1227 
1228 free_ret:
1229 	m_freem(*mpp);
1230 	*mpp = NULL;
1231 	return rc;
1232 }
1233 
1234 static void
1235 oce_process_tx_completion(struct oce_wq *wq)
1236 {
1237 	struct oce_packet_desc *pd;
1238 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1239 	struct mbuf *m;
1240 
1241 	pd = &wq->pckts[wq->pkt_desc_tail];
1242 	atomic_store_rel_int(&wq->pkt_desc_tail,
1243 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1244 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1245 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1246 	bus_dmamap_unload(wq->tag, pd->map);
1247 
1248 	m = pd->mbuf;
1249 	m_freem(m);
1250 	pd->mbuf = NULL;
1251 
1252 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1253 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1254 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1255 			oce_tx_restart(sc, wq);
1256 		}
1257 	}
1258 }
1259 
1260 static void
1261 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1262 {
1263 
1264 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1265 		return;
1266 
1267 	if (!drbr_empty(sc->ifp, wq->br))
1268 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1269 
1270 }
1271 
1272 #if defined(INET6) || defined(INET)
1273 static struct mbuf *
1274 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1275 {
1276 	struct mbuf *m;
1277 #ifdef INET
1278 	struct ip *ip;
1279 #endif
1280 #ifdef INET6
1281 	struct ip6_hdr *ip6;
1282 #endif
1283 	struct ether_vlan_header *eh;
1284 	struct tcphdr *th;
1285 	uint16_t etype;
1286 	int total_len = 0, ehdrlen = 0;
1287 
1288 	m = *mpp;
1289 
1290 	if (M_WRITABLE(m) == 0) {
1291 		m = m_dup(*mpp, M_NOWAIT);
1292 		if (!m)
1293 			return NULL;
1294 		m_freem(*mpp);
1295 		*mpp = m;
1296 	}
1297 
1298 	eh = mtod(m, struct ether_vlan_header *);
1299 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1300 		etype = ntohs(eh->evl_proto);
1301 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1302 	} else {
1303 		etype = ntohs(eh->evl_encap_proto);
1304 		ehdrlen = ETHER_HDR_LEN;
1305 	}
1306 
1307 	switch (etype) {
1308 #ifdef INET
1309 	case ETHERTYPE_IP:
1310 		ip = (struct ip *)(m->m_data + ehdrlen);
1311 		if (ip->ip_p != IPPROTO_TCP)
1312 			return NULL;
1313 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1314 
1315 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1316 		break;
1317 #endif
1318 #ifdef INET6
1319 	case ETHERTYPE_IPV6:
1320 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1321 		if (ip6->ip6_nxt != IPPROTO_TCP)
1322 			return NULL;
1323 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1324 
1325 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1326 		break;
1327 #endif
1328 	default:
1329 		return NULL;
1330 	}
1331 
1332 	m = m_pullup(m, total_len);
1333 	*mpp = m;
1334 	return m;
1335 }
1336 #endif /* INET6 || INET */
1337 
1338 void
1339 oce_tx_task(void *arg, int npending)
1340 {
1341 	struct oce_wq *wq = arg;
1342 	POCE_SOFTC sc = wq->parent;
1343 	struct ifnet *ifp = sc->ifp;
1344 	int rc = 0;
1345 
1346 	LOCK(&wq->tx_lock);
1347 	rc = oce_multiq_transmit(ifp, NULL, wq);
1348 	if (rc) {
1349 		device_printf(sc->dev,
1350 				"TX[%d] restart failed\n", wq->queue_index);
1351 	}
1352 	UNLOCK(&wq->tx_lock);
1353 }
1354 
1355 void
1356 oce_start(struct ifnet *ifp)
1357 {
1358 	POCE_SOFTC sc = ifp->if_softc;
1359 	struct mbuf *m;
1360 	int rc = 0;
1361 	int def_q = 0; /* Defualt tx queue is 0*/
1362 
1363 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1364 			IFF_DRV_RUNNING)
1365 		return;
1366 
1367 	if (!sc->link_status)
1368 		return;
1369 
1370 	while (true) {
1371 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1372 		if (m == NULL)
1373 			break;
1374 
1375 		LOCK(&sc->wq[def_q]->tx_lock);
1376 		rc = oce_tx(sc, &m, def_q);
1377 		UNLOCK(&sc->wq[def_q]->tx_lock);
1378 		if (rc) {
1379 			if (m != NULL) {
1380 				sc->wq[def_q]->tx_stats.tx_stops ++;
1381 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1382 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1383 				m = NULL;
1384 			}
1385 			break;
1386 		}
1387 	}
1388 }
1389 
1390 /* Handle the Completion Queue for transmit */
1391 uint16_t
1392 oce_wq_handler(void *arg)
1393 {
1394 	struct oce_wq *wq = (struct oce_wq *)arg;
1395 	POCE_SOFTC sc = wq->parent;
1396 	struct oce_cq *cq = wq->cq;
1397 	struct oce_nic_tx_cqe *cqe;
1398 	int num_cqes = 0;
1399 
1400 	LOCK(&wq->tx_compl_lock);
1401 	bus_dmamap_sync(cq->ring->dma.tag,
1402 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1403 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1404 	while (cqe->u0.dw[3]) {
1405 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1406 
1407 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1408 		if (wq->ring->cidx >= wq->ring->num_items)
1409 			wq->ring->cidx -= wq->ring->num_items;
1410 
1411 		oce_process_tx_completion(wq);
1412 		wq->tx_stats.tx_compl++;
1413 		cqe->u0.dw[3] = 0;
1414 		RING_GET(cq->ring, 1);
1415 		bus_dmamap_sync(cq->ring->dma.tag,
1416 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1417 		cqe =
1418 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1419 		num_cqes++;
1420 	}
1421 
1422 	if (num_cqes)
1423 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1424 
1425 	UNLOCK(&wq->tx_compl_lock);
1426 	return num_cqes;
1427 }
1428 
1429 static int
1430 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1431 {
1432 	POCE_SOFTC sc = ifp->if_softc;
1433 	int status = 0, queue_index = 0;
1434 	struct mbuf *next = NULL;
1435 	struct buf_ring *br = NULL;
1436 
1437 	br  = wq->br;
1438 	queue_index = wq->queue_index;
1439 
1440 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1441 		IFF_DRV_RUNNING) {
1442 		if (m != NULL)
1443 			status = drbr_enqueue(ifp, br, m);
1444 		return status;
1445 	}
1446 
1447 	if (m != NULL) {
1448 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1449 			return status;
1450 	}
1451 	while ((next = drbr_peek(ifp, br)) != NULL) {
1452 		if (oce_tx(sc, &next, queue_index)) {
1453 			if (next == NULL) {
1454 				drbr_advance(ifp, br);
1455 			} else {
1456 				drbr_putback(ifp, br, next);
1457 				wq->tx_stats.tx_stops ++;
1458 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1459 			}
1460 			break;
1461 		}
1462 		drbr_advance(ifp, br);
1463 	}
1464 
1465 	return 0;
1466 }
1467 
1468 /*****************************************************************************
1469  *			    Receive  routines functions 		     *
1470  *****************************************************************************/
1471 
1472 static void
1473 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1474 {
1475 	uint32_t *p;
1476         struct ether_header *eh = NULL;
1477         struct tcphdr *tcp_hdr = NULL;
1478         struct ip *ip4_hdr = NULL;
1479         struct ip6_hdr *ip6 = NULL;
1480         uint32_t payload_len = 0;
1481 
1482         eh = mtod(m, struct ether_header *);
1483         /* correct IP header */
1484         if(!cqe2->ipv6_frame) {
1485 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1486                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1487                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1488                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1489         }else {
1490         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1491                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1492                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1493                                                 - sizeof(struct ip6_hdr);
1494                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1495                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1496         }
1497 
1498         /* correct tcp header */
1499         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1500         if(cqe2->push) {
1501         	tcp_hdr->th_flags |= TH_PUSH;
1502         }
1503         tcp_hdr->th_win = htons(cqe2->tcp_window);
1504         tcp_hdr->th_sum = 0xffff;
1505         if(cqe2->ts_opt) {
1506                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1507                 *p = cqe1->tcp_timestamp_val;
1508                 *(p+1) = cqe1->tcp_timestamp_ecr;
1509         }
1510 
1511 	return;
1512 }
1513 
1514 static void
1515 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1516 {
1517 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1518         uint32_t i = 0, frag_len = 0;
1519 	uint32_t len = cqe_info->pkt_size;
1520         struct oce_packet_desc *pd;
1521         struct mbuf *tail = NULL;
1522 
1523         for (i = 0; i < cqe_info->num_frags; i++) {
1524                 if (rq->ring->cidx == rq->ring->pidx) {
1525                         device_printf(sc->dev,
1526                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1527                         return;
1528                 }
1529                 pd = &rq->pckts[rq->ring->cidx];
1530 
1531                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1532                 bus_dmamap_unload(rq->tag, pd->map);
1533 		RING_GET(rq->ring, 1);
1534                 rq->pending--;
1535 
1536                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1537                 pd->mbuf->m_len = frag_len;
1538 
1539                 if (tail != NULL) {
1540                         /* additional fragments */
1541                         pd->mbuf->m_flags &= ~M_PKTHDR;
1542                         tail->m_next = pd->mbuf;
1543 			if(rq->islro)
1544                         	tail->m_nextpkt = NULL;
1545                         tail = pd->mbuf;
1546                 } else {
1547                         /* first fragment, fill out much of the packet header */
1548                         pd->mbuf->m_pkthdr.len = len;
1549 			if(rq->islro)
1550                         	pd->mbuf->m_nextpkt = NULL;
1551                         pd->mbuf->m_pkthdr.csum_flags = 0;
1552                         if (IF_CSUM_ENABLED(sc)) {
1553                                 if (cqe_info->l4_cksum_pass) {
1554                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1555                                                 pd->mbuf->m_pkthdr.csum_flags |=
1556                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1557                                         }else { /* IPV6 frame */
1558 						if(rq->islro) {
1559                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1560                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1561 						}
1562                                         }
1563                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1564                                 }
1565                                 if (cqe_info->ip_cksum_pass) {
1566                                         pd->mbuf->m_pkthdr.csum_flags |=
1567                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1568                                 }
1569                         }
1570                         *m = tail = pd->mbuf;
1571                }
1572                 pd->mbuf = NULL;
1573                 len -= frag_len;
1574         }
1575 
1576         return;
1577 }
1578 
1579 static void
1580 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1581 {
1582         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1583         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1584         struct mbuf *m = NULL;
1585 	struct oce_common_cqe_info cq_info;
1586 
1587 	/* parse cqe */
1588         if(cqe2 == NULL) {
1589                 cq_info.pkt_size =  cqe->pkt_size;
1590                 cq_info.vtag = cqe->vlan_tag;
1591                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1592                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1593                 cq_info.ipv6_frame = cqe->ipv6_frame;
1594                 cq_info.vtp = cqe->vtp;
1595                 cq_info.qnq = cqe->qnq;
1596         }else {
1597                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1598                 cq_info.pkt_size =  cqe2->coalesced_size;
1599                 cq_info.vtag = cqe2->vlan_tag;
1600                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1601                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1602                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1603                 cq_info.vtp = cqe2->vtp;
1604                 cq_info.qnq = cqe1->qnq;
1605         }
1606 
1607 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1608 
1609         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1610         if(cq_info.pkt_size % rq->cfg.frag_size)
1611                 cq_info.num_frags++;
1612 
1613 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1614 
1615 	if (m) {
1616 		if(cqe2) {
1617 			//assert(cqe2->valid != 0);
1618 
1619 			//assert(cqe2->cqe_type != 2);
1620 			oce_correct_header(m, cqe1, cqe2);
1621 		}
1622 
1623 		m->m_pkthdr.rcvif = sc->ifp;
1624 		if (rq->queue_index)
1625 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1626 		else
1627 			m->m_pkthdr.flowid = rq->queue_index;
1628 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1629 
1630 		/* This deternies if vlan tag is Valid */
1631 		if (cq_info.vtp) {
1632 			if (sc->function_mode & FNM_FLEX10_MODE) {
1633 				/* FLEX10. If QnQ is not set, neglect VLAN */
1634 				if (cq_info.qnq) {
1635 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1636 					m->m_flags |= M_VLANTAG;
1637 				}
1638 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1639 				/* In UMC mode generally pvid will be striped by
1640 				   hw. But in some cases we have seen it comes
1641 				   with pvid. So if pvid == vlan, neglect vlan.
1642 				 */
1643 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1644 				m->m_flags |= M_VLANTAG;
1645 			}
1646 		}
1647 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1648 
1649 		(*sc->ifp->if_input) (sc->ifp, m);
1650 
1651 		/* Update rx stats per queue */
1652 		rq->rx_stats.rx_pkts++;
1653 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1654 		rq->rx_stats.rx_frags += cq_info.num_frags;
1655 		rq->rx_stats.rx_ucast_pkts++;
1656 	}
1657         return;
1658 }
1659 
1660 static void
1661 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1662 {
1663 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1664 	int len;
1665 	struct mbuf *m = NULL;
1666 	struct oce_common_cqe_info cq_info;
1667 	uint16_t vtag = 0;
1668 
1669 	/* Is it a flush compl that has no data */
1670 	if(!cqe->u0.s.num_fragments)
1671 		goto exit;
1672 
1673 	len = cqe->u0.s.pkt_size;
1674 	if (!len) {
1675 		/*partial DMA workaround for Lancer*/
1676 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1677 		goto exit;
1678 	}
1679 
1680 	if (!oce_cqe_portid_valid(sc, cqe)) {
1681 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1682 		goto exit;
1683 	}
1684 
1685 	 /* Get vlan_tag value */
1686 	if(IS_BE(sc) || IS_SH(sc))
1687 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1688 	else
1689 		vtag = cqe->u0.s.vlan_tag;
1690 
1691 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1692 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1693 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1694 	cq_info.num_frags = cqe->u0.s.num_fragments;
1695 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1696 
1697 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1698 
1699 	if (m) {
1700 		m->m_pkthdr.rcvif = sc->ifp;
1701 		if (rq->queue_index)
1702 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1703 		else
1704 			m->m_pkthdr.flowid = rq->queue_index;
1705 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1706 
1707 		/* This deternies if vlan tag is Valid */
1708 		if (oce_cqe_vtp_valid(sc, cqe)) {
1709 			if (sc->function_mode & FNM_FLEX10_MODE) {
1710 				/* FLEX10. If QnQ is not set, neglect VLAN */
1711 				if (cqe->u0.s.qnq) {
1712 					m->m_pkthdr.ether_vtag = vtag;
1713 					m->m_flags |= M_VLANTAG;
1714 				}
1715 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1716 				/* In UMC mode generally pvid will be striped by
1717 				   hw. But in some cases we have seen it comes
1718 				   with pvid. So if pvid == vlan, neglect vlan.
1719 				*/
1720 				m->m_pkthdr.ether_vtag = vtag;
1721 				m->m_flags |= M_VLANTAG;
1722 			}
1723 		}
1724 
1725 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1726 #if defined(INET6) || defined(INET)
1727 		/* Try to queue to LRO */
1728 		if (IF_LRO_ENABLED(sc) &&
1729 		    (cqe->u0.s.ip_cksum_pass) &&
1730 		    (cqe->u0.s.l4_cksum_pass) &&
1731 		    (!cqe->u0.s.ip_ver)       &&
1732 		    (rq->lro.lro_cnt != 0)) {
1733 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1734 				rq->lro_pkts_queued ++;
1735 				goto post_done;
1736 			}
1737 			/* If LRO posting fails then try to post to STACK */
1738 		}
1739 #endif
1740 
1741 		(*sc->ifp->if_input) (sc->ifp, m);
1742 #if defined(INET6) || defined(INET)
1743 post_done:
1744 #endif
1745 		/* Update rx stats per queue */
1746 		rq->rx_stats.rx_pkts++;
1747 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1748 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1749 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1750 			rq->rx_stats.rx_mcast_pkts++;
1751 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1752 			rq->rx_stats.rx_ucast_pkts++;
1753 	}
1754 exit:
1755 	return;
1756 }
1757 
1758 void
1759 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1760 {
1761 	uint32_t i = 0;
1762 	struct oce_packet_desc *pd;
1763 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1764 
1765 	for (i = 0; i < num_frags; i++) {
1766                 if (rq->ring->cidx == rq->ring->pidx) {
1767                         device_printf(sc->dev,
1768                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1769                         return;
1770                 }
1771                 pd = &rq->pckts[rq->ring->cidx];
1772                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1773                 bus_dmamap_unload(rq->tag, pd->map);
1774                 if (pd->mbuf != NULL) {
1775                         m_freem(pd->mbuf);
1776                         pd->mbuf = NULL;
1777                 }
1778 
1779 		RING_GET(rq->ring, 1);
1780                 rq->pending--;
1781 	}
1782 }
1783 
1784 static int
1785 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1786 {
1787 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1788 	int vtp = 0;
1789 
1790 	if (sc->be3_native) {
1791 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1792 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1793 	} else
1794 		vtp = cqe->u0.s.vlan_tag_present;
1795 
1796 	return vtp;
1797 
1798 }
1799 
1800 static int
1801 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1802 {
1803 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1804 	int port_id = 0;
1805 
1806 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1807 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1808 		port_id =  cqe_v1->u0.s.port;
1809 		if (sc->port_id != port_id)
1810 			return 0;
1811 	} else
1812 		;/* For BE3 legacy and Lancer this is dummy */
1813 
1814 	return 1;
1815 
1816 }
1817 
1818 #if defined(INET6) || defined(INET)
1819 void
1820 oce_rx_flush_lro(struct oce_rq *rq)
1821 {
1822 	struct lro_ctrl	*lro = &rq->lro;
1823 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1824 
1825 	if (!IF_LRO_ENABLED(sc))
1826 		return;
1827 
1828 	tcp_lro_flush_all(lro);
1829 	rq->lro_pkts_queued = 0;
1830 
1831 	return;
1832 }
1833 
1834 static int
1835 oce_init_lro(POCE_SOFTC sc)
1836 {
1837 	struct lro_ctrl *lro = NULL;
1838 	int i = 0, rc = 0;
1839 
1840 	for (i = 0; i < sc->nrqs; i++) {
1841 		lro = &sc->rq[i]->lro;
1842 		rc = tcp_lro_init(lro);
1843 		if (rc != 0) {
1844 			device_printf(sc->dev, "LRO init failed\n");
1845 			return rc;
1846 		}
1847 		lro->ifp = sc->ifp;
1848 	}
1849 
1850 	return rc;
1851 }
1852 
1853 void
1854 oce_free_lro(POCE_SOFTC sc)
1855 {
1856 	struct lro_ctrl *lro = NULL;
1857 	int i = 0;
1858 
1859 	for (i = 0; i < sc->nrqs; i++) {
1860 		lro = &sc->rq[i]->lro;
1861 		if (lro)
1862 			tcp_lro_free(lro);
1863 	}
1864 }
1865 #endif
1866 
1867 int
1868 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1869 {
1870 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1871 	int i, in, rc;
1872 	struct oce_packet_desc *pd;
1873 	bus_dma_segment_t segs[6];
1874 	int nsegs, added = 0;
1875 	struct oce_nic_rqe *rqe;
1876 	pd_rxulp_db_t rxdb_reg;
1877 	uint32_t val = 0;
1878 	uint32_t oce_max_rq_posts = 64;
1879 
1880 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1881 	for (i = 0; i < count; i++) {
1882 		in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1883 
1884 		pd = &rq->pckts[rq->ring->pidx];
1885 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1886 		if (pd->mbuf == NULL) {
1887 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1888 			break;
1889 		}
1890 		pd->mbuf->m_nextpkt = NULL;
1891 
1892 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1893 
1894 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1895 					     pd->map,
1896 					     pd->mbuf,
1897 					     segs, &nsegs, BUS_DMA_NOWAIT);
1898 		if (rc) {
1899 			m_free(pd->mbuf);
1900 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1901 			break;
1902 		}
1903 
1904 		if (nsegs != 1) {
1905 			i--;
1906 			continue;
1907 		}
1908 
1909 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1910 
1911 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1912 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1913 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1914 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1915 		RING_PUT(rq->ring, 1);
1916 		added++;
1917 		rq->pending++;
1918 	}
1919 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1920 	if (added != 0) {
1921 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1922 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1923 			rxdb_reg.bits.qid = rq->rq_id;
1924 			if(rq->islro) {
1925                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1926                                 val |= oce_max_rq_posts << 16;
1927                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1928 			}else {
1929 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1930 			}
1931 			added -= oce_max_rq_posts;
1932 		}
1933 		if (added > 0) {
1934 			rxdb_reg.bits.qid = rq->rq_id;
1935 			rxdb_reg.bits.num_posted = added;
1936 			if(rq->islro) {
1937                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1938                                 val |= added << 16;
1939                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1940 			}else {
1941 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1942 			}
1943 		}
1944 	}
1945 
1946 	return 0;
1947 }
1948 
1949 static void
1950 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1951 {
1952         if (num_cqes) {
1953                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1954 		if(!sc->enable_hwlro) {
1955 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1956 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1957 		}else {
1958                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1959                         	oce_alloc_rx_bufs(rq, 64);
1960         	}
1961 	}
1962 
1963         return;
1964 }
1965 
1966 uint16_t
1967 oce_rq_handler_lro(void *arg)
1968 {
1969         struct oce_rq *rq = (struct oce_rq *)arg;
1970         struct oce_cq *cq = rq->cq;
1971         POCE_SOFTC sc = rq->parent;
1972         struct nic_hwlro_singleton_cqe *cqe;
1973         struct nic_hwlro_cqe_part2 *cqe2;
1974         int num_cqes = 0;
1975 
1976 	LOCK(&rq->rx_lock);
1977         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1978         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1979         while (cqe->valid) {
1980                 if(cqe->cqe_type == 0) { /* singleton cqe */
1981 			/* we should not get singleton cqe after cqe1 on same rq */
1982 			if(rq->cqe_firstpart != NULL) {
1983 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1984 				goto exit_rq_handler_lro;
1985 			}
1986                         if(cqe->error != 0) {
1987                                 rq->rx_stats.rxcp_err++;
1988 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1989                         }
1990                         oce_rx_lro(rq, cqe, NULL);
1991                         rq->rx_stats.rx_compl++;
1992                         cqe->valid = 0;
1993                         RING_GET(cq->ring, 1);
1994                         num_cqes++;
1995                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1996                                 break;
1997                 }else if(cqe->cqe_type == 0x1) { /* first part */
1998 			/* we should not get cqe1 after cqe1 on same rq */
1999 			if(rq->cqe_firstpart != NULL) {
2000 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
2001 				goto exit_rq_handler_lro;
2002 			}
2003 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2004                         RING_GET(cq->ring, 1);
2005                 }else if(cqe->cqe_type == 0x2) { /* second part */
2006 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2007                         if(cqe2->error != 0) {
2008                                 rq->rx_stats.rxcp_err++;
2009 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2010                         }
2011 			/* We should not get cqe2 without cqe1 */
2012 			if(rq->cqe_firstpart == NULL) {
2013 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2014 				goto exit_rq_handler_lro;
2015 			}
2016                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2017 
2018                         rq->rx_stats.rx_compl++;
2019                         rq->cqe_firstpart->valid = 0;
2020                         cqe2->valid = 0;
2021 			rq->cqe_firstpart = NULL;
2022 
2023                         RING_GET(cq->ring, 1);
2024                         num_cqes += 2;
2025                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2026                                 break;
2027 		}
2028 
2029                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2030                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2031         }
2032 	oce_check_rx_bufs(sc, num_cqes, rq);
2033 exit_rq_handler_lro:
2034 	UNLOCK(&rq->rx_lock);
2035 	return 0;
2036 }
2037 
2038 /* Handle the Completion Queue for receive */
2039 uint16_t
2040 oce_rq_handler(void *arg)
2041 {
2042 	struct oce_rq *rq = (struct oce_rq *)arg;
2043 	struct oce_cq *cq = rq->cq;
2044 	POCE_SOFTC sc = rq->parent;
2045 	struct oce_nic_rx_cqe *cqe;
2046 	int num_cqes = 0;
2047 
2048 	if(rq->islro) {
2049 		oce_rq_handler_lro(arg);
2050 		return 0;
2051 	}
2052 	LOCK(&rq->rx_lock);
2053 	bus_dmamap_sync(cq->ring->dma.tag,
2054 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2055 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2056 	while (cqe->u0.dw[2]) {
2057 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2058 
2059 		if (cqe->u0.s.error == 0) {
2060 			oce_rx(rq, cqe);
2061 		} else {
2062 			rq->rx_stats.rxcp_err++;
2063 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2064 			/* Post L3/L4 errors to stack.*/
2065 			oce_rx(rq, cqe);
2066 		}
2067 		rq->rx_stats.rx_compl++;
2068 		cqe->u0.dw[2] = 0;
2069 
2070 #if defined(INET6) || defined(INET)
2071 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2072 			oce_rx_flush_lro(rq);
2073 		}
2074 #endif
2075 
2076 		RING_GET(cq->ring, 1);
2077 		bus_dmamap_sync(cq->ring->dma.tag,
2078 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2079 		cqe =
2080 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2081 		num_cqes++;
2082 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2083 			break;
2084 	}
2085 
2086 #if defined(INET6) || defined(INET)
2087         if (IF_LRO_ENABLED(sc))
2088                 oce_rx_flush_lro(rq);
2089 #endif
2090 
2091 	oce_check_rx_bufs(sc, num_cqes, rq);
2092 	UNLOCK(&rq->rx_lock);
2093 	return 0;
2094 
2095 }
2096 
2097 /*****************************************************************************
2098  *		   Helper function prototypes in this file 		     *
2099  *****************************************************************************/
2100 
2101 static int
2102 oce_attach_ifp(POCE_SOFTC sc)
2103 {
2104 
2105 	sc->ifp = if_alloc(IFT_ETHER);
2106 	if (!sc->ifp)
2107 		return ENOMEM;
2108 
2109 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2110 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2111 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2112 
2113 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
2114 	sc->ifp->if_ioctl = oce_ioctl;
2115 	sc->ifp->if_start = oce_start;
2116 	sc->ifp->if_init = oce_init;
2117 	sc->ifp->if_mtu = ETHERMTU;
2118 	sc->ifp->if_softc = sc;
2119 	sc->ifp->if_transmit = oce_multiq_start;
2120 	sc->ifp->if_qflush = oce_multiq_flush;
2121 
2122 	if_initname(sc->ifp,
2123 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2124 
2125 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2126 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2127 	IFQ_SET_READY(&sc->ifp->if_snd);
2128 
2129 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2130 	sc->ifp->if_hwassist |= CSUM_TSO;
2131 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2132 
2133 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2134 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2135 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2136 
2137 #if defined(INET6) || defined(INET)
2138 	sc->ifp->if_capabilities |= IFCAP_TSO;
2139 	sc->ifp->if_capabilities |= IFCAP_LRO;
2140 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2141 #endif
2142 
2143 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2144 	sc->ifp->if_baudrate = IF_Gbps(10);
2145 
2146 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2147 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2148 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2149 
2150 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2151 
2152 	return 0;
2153 }
2154 
2155 static void
2156 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2157 {
2158 	POCE_SOFTC sc = ifp->if_softc;
2159 
2160 	if (ifp->if_softc !=  arg)
2161 		return;
2162 	if ((vtag == 0) || (vtag > 4095))
2163 		return;
2164 
2165 	sc->vlan_tag[vtag] = 1;
2166 	sc->vlans_added++;
2167 	if (sc->vlans_added <= (sc->max_vlans + 1))
2168 		oce_vid_config(sc);
2169 }
2170 
2171 static void
2172 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2173 {
2174 	POCE_SOFTC sc = ifp->if_softc;
2175 
2176 	if (ifp->if_softc !=  arg)
2177 		return;
2178 	if ((vtag == 0) || (vtag > 4095))
2179 		return;
2180 
2181 	sc->vlan_tag[vtag] = 0;
2182 	sc->vlans_added--;
2183 	oce_vid_config(sc);
2184 }
2185 
2186 /*
2187  * A max of 64 vlans can be configured in BE. If the user configures
2188  * more, place the card in vlan promiscuous mode.
2189  */
2190 static int
2191 oce_vid_config(POCE_SOFTC sc)
2192 {
2193 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2194 	uint16_t ntags = 0, i;
2195 	int status = 0;
2196 
2197 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2198 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2199 		for (i = 0; i < MAX_VLANS; i++) {
2200 			if (sc->vlan_tag[i]) {
2201 				vtags[ntags].vtag = i;
2202 				ntags++;
2203 			}
2204 		}
2205 		if (ntags)
2206 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2207 						vtags, ntags, 1, 0);
2208 	} else
2209 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2210 					 	NULL, 0, 1, 1);
2211 	return status;
2212 }
2213 
2214 static void
2215 oce_mac_addr_set(POCE_SOFTC sc)
2216 {
2217 	uint32_t old_pmac_id = sc->pmac_id;
2218 	int status = 0;
2219 
2220 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2221 			 sc->macaddr.size_of_struct);
2222 	if (!status)
2223 		return;
2224 
2225 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2226 					sc->if_id, &sc->pmac_id);
2227 	if (!status) {
2228 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2229 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2230 				 sc->macaddr.size_of_struct);
2231 	}
2232 	if (status)
2233 		device_printf(sc->dev, "Failed update macaddress\n");
2234 
2235 }
2236 
2237 static int
2238 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2239 {
2240 	POCE_SOFTC sc = ifp->if_softc;
2241 	struct ifreq *ifr = (struct ifreq *)data;
2242 	int rc = ENXIO;
2243 	char cookie[32] = {0};
2244 	void *priv_data = ifr_data_get_ptr(ifr);
2245 	void *ioctl_ptr;
2246 	uint32_t req_size;
2247 	struct mbx_hdr req;
2248 	OCE_DMA_MEM dma_mem;
2249 	struct mbx_common_get_cntl_attr *fw_cmd;
2250 
2251 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2252 		return EFAULT;
2253 
2254 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2255 		return EINVAL;
2256 
2257 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2258 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2259 		return EFAULT;
2260 
2261 	req_size = le32toh(req.u0.req.request_length);
2262 	if (req_size > 65536)
2263 		return EINVAL;
2264 
2265 	req_size += sizeof(struct mbx_hdr);
2266 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2267 	if (rc)
2268 		return ENOMEM;
2269 
2270 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2271 		rc = EFAULT;
2272 		goto dma_free;
2273 	}
2274 
2275 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2276 	if (rc) {
2277 		rc = EIO;
2278 		goto dma_free;
2279 	}
2280 
2281 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2282 		rc =  EFAULT;
2283 
2284 	/*
2285 	   firmware is filling all the attributes for this ioctl except
2286 	   the driver version..so fill it
2287 	 */
2288 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2289 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2290 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2291 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2292 	}
2293 
2294 dma_free:
2295 	oce_dma_free(sc, &dma_mem);
2296 	return rc;
2297 
2298 }
2299 
2300 static void
2301 oce_eqd_set_periodic(POCE_SOFTC sc)
2302 {
2303 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2304 	struct oce_aic_obj *aic;
2305 	struct oce_eq *eqo;
2306 	uint64_t now = 0, delta;
2307 	int eqd, i, num = 0;
2308 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2309 	struct oce_wq *wq;
2310 	struct oce_rq *rq;
2311 
2312 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2313 
2314 	for (i = 0 ; i < sc->neqs; i++) {
2315 		eqo = sc->eq[i];
2316 		aic = &sc->aic_obj[i];
2317 		/* When setting the static eq delay from the user space */
2318 		if (!aic->enable) {
2319 			if (aic->ticks)
2320 				aic->ticks = 0;
2321 			eqd = aic->et_eqd;
2322 			goto modify_eqd;
2323 		}
2324 
2325 		if (i == 0) {
2326 			rq = sc->rq[0];
2327 			rxpkts = rq->rx_stats.rx_pkts;
2328 		} else
2329 			rxpkts = 0;
2330 		if (i + 1 < sc->nrqs) {
2331 			rq = sc->rq[i + 1];
2332 			rxpkts += rq->rx_stats.rx_pkts;
2333 		}
2334 		if (i < sc->nwqs) {
2335 			wq = sc->wq[i];
2336 			tx_reqs = wq->tx_stats.tx_reqs;
2337 		} else
2338 			tx_reqs = 0;
2339 		now = ticks;
2340 
2341 		if (!aic->ticks || now < aic->ticks ||
2342 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2343 			aic->prev_rxpkts = rxpkts;
2344 			aic->prev_txreqs = tx_reqs;
2345 			aic->ticks = now;
2346 			continue;
2347 		}
2348 
2349 		delta = ticks_to_msecs(now - aic->ticks);
2350 
2351 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2352 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2353 		eqd = (pps / 15000) << 2;
2354 		if (eqd < 8)
2355 			eqd = 0;
2356 
2357 		/* Make sure that the eq delay is in the known range */
2358 		eqd = min(eqd, aic->max_eqd);
2359 		eqd = max(eqd, aic->min_eqd);
2360 
2361 		aic->prev_rxpkts = rxpkts;
2362 		aic->prev_txreqs = tx_reqs;
2363 		aic->ticks = now;
2364 
2365 modify_eqd:
2366 		if (eqd != aic->cur_eqd) {
2367 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2368 			set_eqd[num].eq_id = eqo->eq_id;
2369 			aic->cur_eqd = eqd;
2370 			num++;
2371 		}
2372 	}
2373 
2374 	/* Is there atleast one eq that needs to be modified? */
2375         for(i = 0; i < num; i += 8) {
2376                 if((num - i) >=8 )
2377                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2378                 else
2379                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2380         }
2381 
2382 }
2383 
2384 static void oce_detect_hw_error(POCE_SOFTC sc)
2385 {
2386 
2387 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2388 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2389 	uint32_t i;
2390 
2391 	if (sc->hw_error)
2392 		return;
2393 
2394 	if (IS_XE201(sc)) {
2395 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2396 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2397 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2398 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2399 		}
2400 	} else {
2401 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2402 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2403 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2404 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2405 
2406 		ue_low = (ue_low & ~ue_low_mask);
2407 		ue_high = (ue_high & ~ue_high_mask);
2408 	}
2409 
2410 	/* On certain platforms BE hardware can indicate spurious UEs.
2411 	 * Allow the h/w to stop working completely in case of a real UE.
2412 	 * Hence not setting the hw_error for UE detection.
2413 	 */
2414 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2415 		sc->hw_error = TRUE;
2416 		device_printf(sc->dev, "Error detected in the card\n");
2417 	}
2418 
2419 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2420 		device_printf(sc->dev,
2421 				"ERR: sliport status 0x%x\n", sliport_status);
2422 		device_printf(sc->dev,
2423 				"ERR: sliport error1 0x%x\n", sliport_err1);
2424 		device_printf(sc->dev,
2425 				"ERR: sliport error2 0x%x\n", sliport_err2);
2426 	}
2427 
2428 	if (ue_low) {
2429 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2430 			if (ue_low & 1)
2431 				device_printf(sc->dev, "UE: %s bit set\n",
2432 							ue_status_low_desc[i]);
2433 		}
2434 	}
2435 
2436 	if (ue_high) {
2437 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2438 			if (ue_high & 1)
2439 				device_printf(sc->dev, "UE: %s bit set\n",
2440 							ue_status_hi_desc[i]);
2441 		}
2442 	}
2443 
2444 }
2445 
2446 static void
2447 oce_local_timer(void *arg)
2448 {
2449 	POCE_SOFTC sc = arg;
2450 	int i = 0;
2451 
2452 	oce_detect_hw_error(sc);
2453 	oce_refresh_nic_stats(sc);
2454 	oce_refresh_queue_stats(sc);
2455 	oce_mac_addr_set(sc);
2456 
2457 	/* TX Watch Dog*/
2458 	for (i = 0; i < sc->nwqs; i++)
2459 		oce_tx_restart(sc, sc->wq[i]);
2460 
2461 	/* calculate and set the eq delay for optimal interrupt rate */
2462 	if (IS_BE(sc) || IS_SH(sc))
2463 		oce_eqd_set_periodic(sc);
2464 
2465 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2466 }
2467 
2468 static void
2469 oce_tx_compl_clean(POCE_SOFTC sc)
2470 {
2471 	struct oce_wq *wq;
2472 	int i = 0, timeo = 0, num_wqes = 0;
2473 	int pending_txqs = sc->nwqs;
2474 
2475 	/* Stop polling for compls when HW has been silent for 10ms or
2476 	 * hw_error or no outstanding completions expected
2477 	 */
2478 	do {
2479 		pending_txqs = sc->nwqs;
2480 
2481 		for_all_wq_queues(sc, wq, i) {
2482 			num_wqes = oce_wq_handler(wq);
2483 
2484 			if(num_wqes)
2485 				timeo = 0;
2486 
2487 			if(!wq->ring->num_used)
2488 				pending_txqs--;
2489 		}
2490 
2491 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2492 			break;
2493 
2494 		DELAY(1000);
2495 	} while (TRUE);
2496 
2497 	for_all_wq_queues(sc, wq, i) {
2498 		while(wq->ring->num_used) {
2499 			LOCK(&wq->tx_compl_lock);
2500 			oce_process_tx_completion(wq);
2501 			UNLOCK(&wq->tx_compl_lock);
2502 		}
2503 	}
2504 
2505 }
2506 
2507 /* NOTE : This should only be called holding
2508  *        DEVICE_LOCK.
2509  */
2510 static void
2511 oce_if_deactivate(POCE_SOFTC sc)
2512 {
2513 	int i;
2514 	struct oce_rq *rq;
2515 	struct oce_wq *wq;
2516 	struct oce_eq *eq;
2517 
2518 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2519 
2520 	oce_tx_compl_clean(sc);
2521 
2522 	/* Stop intrs and finish any bottom halves pending */
2523 	oce_hw_intr_disable(sc);
2524 
2525 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2526 	   any other lock. So unlock device lock and require after
2527 	   completing taskqueue_drain.
2528 	*/
2529 	UNLOCK(&sc->dev_lock);
2530 	for (i = 0; i < sc->intr_count; i++) {
2531 		if (sc->intrs[i].tq != NULL) {
2532 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2533 		}
2534 	}
2535 	LOCK(&sc->dev_lock);
2536 
2537 	/* Delete RX queue in card with flush param */
2538 	oce_stop_rx(sc);
2539 
2540 	/* Invalidate any pending cq and eq entries*/
2541 	for_all_evnt_queues(sc, eq, i)
2542 		oce_drain_eq(eq);
2543 	for_all_rq_queues(sc, rq, i)
2544 		oce_drain_rq_cq(rq);
2545 	for_all_wq_queues(sc, wq, i)
2546 		oce_drain_wq_cq(wq);
2547 
2548 	/* But still we need to get MCC aync events.
2549 	   So enable intrs and also arm first EQ
2550 	*/
2551 	oce_hw_intr_enable(sc);
2552 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2553 
2554 	DELAY(10);
2555 }
2556 
2557 static void
2558 oce_if_activate(POCE_SOFTC sc)
2559 {
2560 	struct oce_eq *eq;
2561 	struct oce_rq *rq;
2562 	struct oce_wq *wq;
2563 	int i, rc = 0;
2564 
2565 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2566 
2567 	oce_hw_intr_disable(sc);
2568 
2569 	oce_start_rx(sc);
2570 
2571 	for_all_rq_queues(sc, rq, i) {
2572 		rc = oce_start_rq(rq);
2573 		if (rc)
2574 			device_printf(sc->dev, "Unable to start RX\n");
2575 	}
2576 
2577 	for_all_wq_queues(sc, wq, i) {
2578 		rc = oce_start_wq(wq);
2579 		if (rc)
2580 			device_printf(sc->dev, "Unable to start TX\n");
2581 	}
2582 
2583 	for_all_evnt_queues(sc, eq, i)
2584 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2585 
2586 	oce_hw_intr_enable(sc);
2587 
2588 }
2589 
2590 static void
2591 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2592 {
2593 	/* Update Link status */
2594 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2595 	     ASYNC_EVENT_LINK_UP) {
2596 		sc->link_status = ASYNC_EVENT_LINK_UP;
2597 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2598 	} else {
2599 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2600 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2601 	}
2602 }
2603 
2604 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2605 					 struct oce_async_evt_grp5_os2bmc *evt)
2606 {
2607 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2608 	if (evt->u.s.mgmt_enable)
2609 		sc->flags |= OCE_FLAGS_OS2BMC;
2610 	else
2611 		return;
2612 
2613 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2614 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2615 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2616 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2617 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2618 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2619 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2620 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2621 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2622 }
2623 
2624 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2625 {
2626 	struct oce_async_event_grp5_pvid_state *gcqe;
2627 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2628 
2629 	switch (cqe->u0.s.async_type) {
2630 	case ASYNC_EVENT_PVID_STATE:
2631 		/* GRP5 PVID */
2632 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2633 		if (gcqe->enabled)
2634 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2635 		else
2636 			sc->pvid = 0;
2637 		break;
2638 	case ASYNC_EVENT_OS2BMC:
2639 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2640 		oce_async_grp5_osbmc_process(sc, bmccqe);
2641 		break;
2642 	default:
2643 		break;
2644 	}
2645 }
2646 
2647 /* Handle the Completion Queue for the Mailbox/Async notifications */
2648 uint16_t
2649 oce_mq_handler(void *arg)
2650 {
2651 	struct oce_mq *mq = (struct oce_mq *)arg;
2652 	POCE_SOFTC sc = mq->parent;
2653 	struct oce_cq *cq = mq->cq;
2654 	int num_cqes = 0, evt_type = 0, optype = 0;
2655 	struct oce_mq_cqe *cqe;
2656 	struct oce_async_cqe_link_state *acqe;
2657 	struct oce_async_event_qnq *dbgcqe;
2658 
2659 	bus_dmamap_sync(cq->ring->dma.tag,
2660 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2661 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2662 
2663 	while (cqe->u0.dw[3]) {
2664 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2665 		if (cqe->u0.s.async_event) {
2666 			evt_type = cqe->u0.s.event_type;
2667 			optype = cqe->u0.s.async_type;
2668 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2669 				/* Link status evt */
2670 				acqe = (struct oce_async_cqe_link_state *)cqe;
2671 				process_link_state(sc, acqe);
2672 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2673 				oce_process_grp5_events(sc, cqe);
2674 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2675 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2676 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2677 				if(dbgcqe->valid)
2678 					sc->qnqid = dbgcqe->vlan_tag;
2679 				sc->qnq_debug_event = TRUE;
2680 			}
2681 		}
2682 		cqe->u0.dw[3] = 0;
2683 		RING_GET(cq->ring, 1);
2684 		bus_dmamap_sync(cq->ring->dma.tag,
2685 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2686 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2687 		num_cqes++;
2688 	}
2689 
2690 	if (num_cqes)
2691 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2692 
2693 	return 0;
2694 }
2695 
2696 static void
2697 setup_max_queues_want(POCE_SOFTC sc)
2698 {
2699 	/* Check if it is FLEX machine. Is so dont use RSS */
2700 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2701 	    (sc->function_mode & FNM_UMC_MODE)    ||
2702 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2703 	    (!is_rss_enabled(sc))		  ||
2704 	    IS_BE2(sc)) {
2705 		sc->nrqs = 1;
2706 		sc->nwqs = 1;
2707 	} else {
2708 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2709 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2710 	}
2711 
2712 	if (IS_BE2(sc) && is_rss_enabled(sc))
2713 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2714 }
2715 
2716 static void
2717 update_queues_got(POCE_SOFTC sc)
2718 {
2719 	if (is_rss_enabled(sc)) {
2720 		sc->nrqs = sc->intr_count + 1;
2721 		sc->nwqs = sc->intr_count;
2722 	} else {
2723 		sc->nrqs = 1;
2724 		sc->nwqs = 1;
2725 	}
2726 
2727 	if (IS_BE2(sc))
2728 		sc->nwqs = 1;
2729 }
2730 
2731 static int
2732 oce_check_ipv6_ext_hdr(struct mbuf *m)
2733 {
2734 	struct ether_header *eh = mtod(m, struct ether_header *);
2735 	caddr_t m_datatemp = m->m_data;
2736 
2737 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2738 		m->m_data += sizeof(struct ether_header);
2739 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2740 
2741 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2742 				(ip6->ip6_nxt != IPPROTO_UDP)){
2743 			struct ip6_ext *ip6e = NULL;
2744 			m->m_data += sizeof(struct ip6_hdr);
2745 
2746 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2747 			if(ip6e->ip6e_len == 0xff) {
2748 				m->m_data = m_datatemp;
2749 				return TRUE;
2750 			}
2751 		}
2752 		m->m_data = m_datatemp;
2753 	}
2754 	return FALSE;
2755 }
2756 
2757 static int
2758 is_be3_a1(POCE_SOFTC sc)
2759 {
2760 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2761 		return TRUE;
2762 	}
2763 	return FALSE;
2764 }
2765 
2766 static struct mbuf *
2767 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2768 {
2769 	uint16_t vlan_tag = 0;
2770 
2771 	if(!M_WRITABLE(m))
2772 		return NULL;
2773 
2774 	/* Embed vlan tag in the packet if it is not part of it */
2775 	if(m->m_flags & M_VLANTAG) {
2776 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2777 		m->m_flags &= ~M_VLANTAG;
2778 	}
2779 
2780 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2781 	if(sc->pvid) {
2782 		if(!vlan_tag)
2783 			vlan_tag = sc->pvid;
2784 		if (complete)
2785 			*complete = FALSE;
2786 	}
2787 
2788 	if(vlan_tag) {
2789 		m = ether_vlanencap(m, vlan_tag);
2790 	}
2791 
2792 	if(sc->qnqid) {
2793 		m = ether_vlanencap(m, sc->qnqid);
2794 
2795 		if (complete)
2796 			*complete = FALSE;
2797 	}
2798 	return m;
2799 }
2800 
2801 static int
2802 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2803 {
2804 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2805 			oce_check_ipv6_ext_hdr(m)) {
2806 		return TRUE;
2807 	}
2808 	return FALSE;
2809 }
2810 
2811 static void
2812 oce_get_config(POCE_SOFTC sc)
2813 {
2814 	int rc = 0;
2815 	uint32_t max_rss = 0;
2816 
2817 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2818 		max_rss = OCE_LEGACY_MODE_RSS;
2819 	else
2820 		max_rss = OCE_MAX_RSS;
2821 
2822 	if (!IS_BE(sc)) {
2823 		rc = oce_get_profile_config(sc, max_rss);
2824 		if (rc) {
2825 			sc->nwqs = OCE_MAX_WQ;
2826 			sc->nrssqs = max_rss;
2827 			sc->nrqs = sc->nrssqs + 1;
2828 		}
2829 	}
2830 	else { /* For BE3 don't rely on fw for determining the resources */
2831 		sc->nrssqs = max_rss;
2832 		sc->nrqs = sc->nrssqs + 1;
2833 		sc->nwqs = OCE_MAX_WQ;
2834 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2835 	}
2836 }
2837 
2838 static void
2839 oce_rdma_close(void)
2840 {
2841   if (oce_rdma_if != NULL) {
2842     oce_rdma_if = NULL;
2843   }
2844 }
2845 
2846 static void
2847 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2848 {
2849   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2850 }
2851 
2852 int
2853 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2854 {
2855   POCE_SOFTC sc;
2856   struct oce_dev_info di;
2857   int i;
2858 
2859   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2860     return -EINVAL;
2861   }
2862 
2863   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2864       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2865     return -ENXIO;
2866   }
2867 
2868   rdma_info->close = oce_rdma_close;
2869   rdma_info->mbox_post = oce_mbox_post;
2870   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2871   rdma_info->get_mac_addr = oce_get_mac_addr;
2872 
2873   oce_rdma_if = rdma_if;
2874 
2875   sc = softc_head;
2876   while (sc != NULL) {
2877     if (oce_rdma_if->announce != NULL) {
2878       memset(&di, 0, sizeof(di));
2879       di.dev = sc->dev;
2880       di.softc = sc;
2881       di.ifp = sc->ifp;
2882       di.db_bhandle = sc->db_bhandle;
2883       di.db_btag = sc->db_btag;
2884       di.db_page_size = 4096;
2885       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2886         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2887       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2888         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2889       } else {
2890         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2891       }
2892       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2893       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2894         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2895         di.msix.start_vector = sc->intr_count;
2896         for (i=0; i<di.msix.num_vectors; i++) {
2897           di.msix.vector_list[i] = sc->intrs[i].vector;
2898         }
2899       } else {
2900       }
2901       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2902       di.vendor_id = pci_get_vendor(sc->dev);
2903       di.dev_id = pci_get_device(sc->dev);
2904 
2905       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2906           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2907       }
2908 
2909       rdma_if->announce(&di);
2910       sc = sc->next;
2911     }
2912   }
2913 
2914   return 0;
2915 }
2916 
2917 static void
2918 oce_read_env_variables( POCE_SOFTC sc )
2919 {
2920 	char *value = NULL;
2921 	int rc = 0;
2922 
2923         /* read if user wants to enable hwlro or swlro */
2924         //value = getenv("oce_enable_hwlro");
2925         if(value && IS_SH(sc)) {
2926                 sc->enable_hwlro = strtol(value, NULL, 10);
2927                 if(sc->enable_hwlro) {
2928                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2929                         if(rc) {
2930                                 device_printf(sc->dev, "no hardware lro support\n");
2931                 		device_printf(sc->dev, "software lro enabled\n");
2932                                 sc->enable_hwlro = 0;
2933                         }else {
2934                                 device_printf(sc->dev, "hardware lro enabled\n");
2935 				oce_max_rsp_handled = 32;
2936                         }
2937                 }else {
2938                         device_printf(sc->dev, "software lro enabled\n");
2939                 }
2940         }else {
2941                 sc->enable_hwlro = 0;
2942         }
2943 
2944         /* read mbuf size */
2945         //value = getenv("oce_rq_buf_size");
2946         if(value && IS_SH(sc)) {
2947                 oce_rq_buf_size = strtol(value, NULL, 10);
2948                 switch(oce_rq_buf_size) {
2949                 case 2048:
2950                 case 4096:
2951                 case 9216:
2952                 case 16384:
2953                         break;
2954 
2955                 default:
2956                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
2957                         oce_rq_buf_size = 2048;
2958                 }
2959         }
2960 
2961 	return;
2962 }
2963