xref: /freebsd/sys/dev/oce/oce_if.c (revision 8b012980b8cc5f8619cd552ee6df7b98b97c5d62)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 
42 #include "opt_inet6.h"
43 #include "opt_inet.h"
44 
45 #include "oce_if.h"
46 #include "oce_user.h"
47 
48 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
49 
50 /* UE Status Low CSR */
51 static char *ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 
86 /* UE Status High CSR */
87 static char *ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121 
122 struct oce_common_cqe_info{
123         uint8_t vtp:1;
124         uint8_t l4_cksum_pass:1;
125         uint8_t ip_cksum_pass:1;
126         uint8_t ipv6_frame:1;
127         uint8_t qnq:1;
128         uint8_t rsvd:3;
129         uint8_t num_frags;
130         uint16_t pkt_size;
131         uint16_t vtag;
132 };
133 
134 /* Driver entry points prototypes */
135 static int  oce_probe(device_t dev);
136 static int  oce_attach(device_t dev);
137 static int  oce_detach(device_t dev);
138 static int  oce_shutdown(device_t dev);
139 static int  oce_ioctl(if_t ifp, u_long command, caddr_t data);
140 static void oce_init(void *xsc);
141 static int  oce_multiq_start(if_t ifp, struct mbuf *m);
142 static void oce_multiq_flush(if_t ifp);
143 
144 /* Driver interrupt routines protypes */
145 static void oce_intr(void *arg, int pending);
146 static int  oce_setup_intr(POCE_SOFTC sc);
147 static int  oce_fast_isr(void *arg);
148 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
149 			  void (*isr) (void *arg, int pending));
150 
151 /* Media callbacks prototypes */
152 static void oce_media_status(if_t ifp, struct ifmediareq *req);
153 static int  oce_media_change(if_t ifp);
154 
155 /* Transmit routines prototypes */
156 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
157 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
158 static void oce_process_tx_completion(struct oce_wq *wq);
159 static int  oce_multiq_transmit(if_t ifp, struct mbuf *m,
160 				 struct oce_wq *wq);
161 
162 /* Receive routines prototypes */
163 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
164 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
166 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
167 static uint16_t oce_rq_handler_lro(void *arg);
168 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
171 
172 /* Helper function prototypes in this file */
173 static void oce_attach_ifp(POCE_SOFTC sc);
174 static void oce_add_vlan(void *arg, if_t ifp, uint16_t vtag);
175 static void oce_del_vlan(void *arg, if_t ifp, uint16_t vtag);
176 static int  oce_vid_config(POCE_SOFTC sc);
177 static void oce_mac_addr_set(POCE_SOFTC sc);
178 static int  oce_handle_passthrough(if_t ifp, caddr_t data);
179 static void oce_local_timer(void *arg);
180 static void oce_if_deactivate(POCE_SOFTC sc);
181 static void oce_if_activate(POCE_SOFTC sc);
182 static void setup_max_queues_want(POCE_SOFTC sc);
183 static void update_queues_got(POCE_SOFTC sc);
184 static void process_link_state(POCE_SOFTC sc,
185 		 struct oce_async_cqe_link_state *acqe);
186 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
187 static void oce_get_config(POCE_SOFTC sc);
188 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
189 static void oce_read_env_variables(POCE_SOFTC sc);
190 
191 /* IP specific */
192 #if defined(INET6) || defined(INET)
193 static int  oce_init_lro(POCE_SOFTC sc);
194 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
195 #endif
196 
197 static device_method_t oce_dispatch[] = {
198 	DEVMETHOD(device_probe, oce_probe),
199 	DEVMETHOD(device_attach, oce_attach),
200 	DEVMETHOD(device_detach, oce_detach),
201 	DEVMETHOD(device_shutdown, oce_shutdown),
202 
203 	DEVMETHOD_END
204 };
205 
206 static driver_t oce_driver = {
207 	"oce",
208 	oce_dispatch,
209 	sizeof(OCE_SOFTC)
210 };
211 
212 /* global vars */
213 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
214 
215 /* Module capabilites and parameters */
216 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
217 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
218 uint32_t oce_rq_buf_size = 2048;
219 
220 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
221 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
222 
223 /* Supported devices table */
224 static uint32_t supportedDevices[] =  {
225 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
226 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
227 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
228 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
229 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
230 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
231 };
232 
233 DRIVER_MODULE(oce, pci, oce_driver, 0, 0);
234 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
235     nitems(supportedDevices));
236 MODULE_DEPEND(oce, pci, 1, 1, 1);
237 MODULE_DEPEND(oce, ether, 1, 1, 1);
238 MODULE_VERSION(oce, 1);
239 
240 POCE_SOFTC softc_head = NULL;
241 POCE_SOFTC softc_tail = NULL;
242 
243 struct oce_rdma_if *oce_rdma_if = NULL;
244 
245 /*****************************************************************************
246  *			Driver entry points functions                        *
247  *****************************************************************************/
248 
249 static int
250 oce_probe(device_t dev)
251 {
252 	uint16_t vendor = 0;
253 	uint16_t device = 0;
254 	int i = 0;
255 	POCE_SOFTC sc;
256 
257 	sc = device_get_softc(dev);
258 	bzero(sc, sizeof(OCE_SOFTC));
259 	sc->dev = dev;
260 
261 	vendor = pci_get_vendor(dev);
262 	device = pci_get_device(dev);
263 
264 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
265 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
266 			if (device == (supportedDevices[i] & 0xffff)) {
267 				device_set_descf(dev,
268 				    "%s:%s", "Emulex CNA NIC function",
269 				    component_revision);
270 
271 				switch (device) {
272 				case PCI_PRODUCT_BE2:
273 					sc->flags |= OCE_FLAGS_BE2;
274 					break;
275 				case PCI_PRODUCT_BE3:
276 					sc->flags |= OCE_FLAGS_BE3;
277 					break;
278 				case PCI_PRODUCT_XE201:
279 				case PCI_PRODUCT_XE201_VF:
280 					sc->flags |= OCE_FLAGS_XE201;
281 					break;
282 				case PCI_PRODUCT_SH:
283 					sc->flags |= OCE_FLAGS_SH;
284 					break;
285 				default:
286 					return ENXIO;
287 				}
288 				return BUS_PROBE_DEFAULT;
289 			}
290 		}
291 	}
292 
293 	return ENXIO;
294 }
295 
296 static int
297 oce_attach(device_t dev)
298 {
299 	POCE_SOFTC sc;
300 	int rc = 0;
301 
302 	sc = device_get_softc(dev);
303 
304 	rc = oce_hw_pci_alloc(sc);
305 	if (rc)
306 		return rc;
307 
308 	sc->tx_ring_size = OCE_TX_RING_SIZE;
309 	sc->rx_ring_size = OCE_RX_RING_SIZE;
310 	/* receive fragment size should be multiple of 2K */
311 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
312 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
313 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
314 
315 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
316 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
317 
318 	/* initialise the hardware */
319 	rc = oce_hw_init(sc);
320 	if (rc)
321 		goto pci_res_free;
322 
323 	oce_read_env_variables(sc);
324 
325 	oce_get_config(sc);
326 
327 	setup_max_queues_want(sc);
328 
329 	rc = oce_setup_intr(sc);
330 	if (rc)
331 		goto mbox_free;
332 
333 	rc = oce_queue_init_all(sc);
334 	if (rc)
335 		goto intr_free;
336 
337 	oce_attach_ifp(sc);
338 
339 #if defined(INET6) || defined(INET)
340 	rc = oce_init_lro(sc);
341 	if (rc)
342 		goto ifp_free;
343 #endif
344 
345 	rc = oce_hw_start(sc);
346 	if (rc)
347 		goto lro_free;
348 
349 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
350 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
351 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
352 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
353 
354 	rc = oce_stats_init(sc);
355 	if (rc)
356 		goto vlan_free;
357 
358 	oce_add_sysctls(sc);
359 
360 	callout_init(&sc->timer, CALLOUT_MPSAFE);
361 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
362 	if (rc)
363 		goto stats_free;
364 
365 	sc->next =NULL;
366 	if (softc_tail != NULL) {
367 	  softc_tail->next = sc;
368 	} else {
369 	  softc_head = sc;
370 	}
371 	softc_tail = sc;
372 
373 	return 0;
374 
375 stats_free:
376 	callout_drain(&sc->timer);
377 	oce_stats_free(sc);
378 vlan_free:
379 	if (sc->vlan_attach)
380 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
381 	if (sc->vlan_detach)
382 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
383 	oce_hw_intr_disable(sc);
384 lro_free:
385 #if defined(INET6) || defined(INET)
386 	oce_free_lro(sc);
387 ifp_free:
388 #endif
389 	ether_ifdetach(sc->ifp);
390 	if_free(sc->ifp);
391 	oce_queue_release_all(sc);
392 intr_free:
393 	oce_intr_free(sc);
394 mbox_free:
395 	oce_dma_free(sc, &sc->bsmbx);
396 pci_res_free:
397 	oce_hw_pci_free(sc);
398 	LOCK_DESTROY(&sc->dev_lock);
399 	LOCK_DESTROY(&sc->bmbx_lock);
400 	return rc;
401 
402 }
403 
404 static int
405 oce_detach(device_t dev)
406 {
407 	POCE_SOFTC sc = device_get_softc(dev);
408 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
409 
410         poce_sc_tmp = softc_head;
411         ppoce_sc_tmp1 = &softc_head;
412         while (poce_sc_tmp != NULL) {
413           if (poce_sc_tmp == sc) {
414             *ppoce_sc_tmp1 = sc->next;
415             if (sc->next == NULL) {
416               softc_tail = poce_sc_tmp2;
417             }
418             break;
419           }
420           poce_sc_tmp2 = poce_sc_tmp;
421           ppoce_sc_tmp1 = &poce_sc_tmp->next;
422           poce_sc_tmp = poce_sc_tmp->next;
423         }
424 
425 	LOCK(&sc->dev_lock);
426 	oce_if_deactivate(sc);
427 	UNLOCK(&sc->dev_lock);
428 
429 	callout_drain(&sc->timer);
430 
431 	if (sc->vlan_attach != NULL)
432 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
433 	if (sc->vlan_detach != NULL)
434 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
435 
436 	ether_ifdetach(sc->ifp);
437 
438 	if_free(sc->ifp);
439 
440 	oce_hw_shutdown(sc);
441 
442 	bus_generic_detach(dev);
443 
444 	return 0;
445 }
446 
447 static int
448 oce_shutdown(device_t dev)
449 {
450 	int rc;
451 
452 	rc = oce_detach(dev);
453 
454 	return rc;
455 }
456 
457 static int
458 oce_ioctl(if_t ifp, u_long command, caddr_t data)
459 {
460 	struct ifreq *ifr = (struct ifreq *)data;
461 	POCE_SOFTC sc = if_getsoftc(ifp);
462 	struct ifi2creq i2c;
463 	uint8_t	offset = 0;
464 	int rc = 0;
465 	uint32_t u;
466 
467 	switch (command) {
468 	case SIOCGIFMEDIA:
469 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
470 		break;
471 
472 	case SIOCSIFMTU:
473 		if (ifr->ifr_mtu > OCE_MAX_MTU)
474 			rc = EINVAL;
475 		else
476 			if_setmtu(ifp, ifr->ifr_mtu);
477 		break;
478 
479 	case SIOCSIFFLAGS:
480 		if (if_getflags(ifp) & IFF_UP) {
481 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
482 				if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
483 				oce_init(sc);
484 			}
485 			device_printf(sc->dev, "Interface Up\n");
486 		} else {
487 			LOCK(&sc->dev_lock);
488 
489 			if_setdrvflagbits(sc->ifp, 0,
490 			    IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
491 			oce_if_deactivate(sc);
492 
493 			UNLOCK(&sc->dev_lock);
494 
495 			device_printf(sc->dev, "Interface Down\n");
496 		}
497 
498 		if ((if_getflags(ifp) & IFF_PROMISC) && !sc->promisc) {
499 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
500 				sc->promisc = TRUE;
501 		} else if (!(if_getflags(ifp) & IFF_PROMISC) && sc->promisc) {
502 			if (!oce_rxf_set_promiscuous(sc, 0))
503 				sc->promisc = FALSE;
504 		}
505 
506 		break;
507 
508 	case SIOCADDMULTI:
509 	case SIOCDELMULTI:
510 		rc = oce_hw_update_multicast(sc);
511 		if (rc)
512 			device_printf(sc->dev,
513 				"Update multicast address failed\n");
514 		break;
515 
516 	case SIOCSIFCAP:
517 		u = ifr->ifr_reqcap ^ if_getcapenable(ifp);
518 
519 		if (u & IFCAP_TXCSUM) {
520 			if_togglecapenable(ifp, IFCAP_TXCSUM);
521 			if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
522 
523 			if (IFCAP_TSO & if_getcapenable(ifp) &&
524 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
525 				u &= ~IFCAP_TSO;
526 				if_setcapenablebit(ifp, 0, IFCAP_TSO);
527 				if_sethwassistbits(ifp, 0, CSUM_TSO);
528 				if_printf(ifp,
529 					 "TSO disabled due to -txcsum.\n");
530 			}
531 		}
532 
533 		if (u & IFCAP_RXCSUM)
534 			if_togglecapenable(ifp, IFCAP_RXCSUM);
535 
536 		if (u & IFCAP_TSO4) {
537 			if_togglecapenable(ifp, IFCAP_TSO4);
538 
539 			if (IFCAP_TSO & if_getcapenable(ifp)) {
540 				if (IFCAP_TXCSUM & if_getcapenable(ifp))
541 					if_sethwassistbits(ifp, CSUM_TSO, 0);
542 				else {
543 					if_setcapenablebit(ifp, 0, IFCAP_TSO);
544 					if_sethwassistbits(ifp, 0, CSUM_TSO);
545 					if_printf(ifp,
546 					    "Enable txcsum first.\n");
547 					rc = EAGAIN;
548 				}
549 			} else
550 				if_sethwassistbits(ifp, 0, CSUM_TSO);
551 		}
552 
553 		if (u & IFCAP_VLAN_HWTAGGING)
554 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
555 
556 		if (u & IFCAP_VLAN_HWFILTER) {
557 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
558 			oce_vid_config(sc);
559 		}
560 #if defined(INET6) || defined(INET)
561 		if (u & IFCAP_LRO) {
562 			if_togglecapenable(ifp, IFCAP_LRO);
563 			if(sc->enable_hwlro) {
564 				if(if_getcapenable(ifp) & IFCAP_LRO) {
565 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
566 				}else {
567 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
568 				}
569 			}
570 		}
571 #endif
572 
573 		break;
574 
575 	case SIOCGI2C:
576 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
577 		if (rc)
578 			break;
579 
580 		if (i2c.dev_addr == PAGE_NUM_A0) {
581 			offset = i2c.offset;
582 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
583 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
584 		} else {
585 			rc = EINVAL;
586 			break;
587 		}
588 
589 		if (i2c.len > sizeof(i2c.data) ||
590 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
591 			rc = EINVAL;
592 			break;
593 		}
594 
595 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
596 		if (rc) {
597 			rc = -rc;
598 			break;
599 		}
600 
601 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
602 
603 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
604 		break;
605 
606 	case SIOCGPRIVATE_0:
607 		rc = priv_check(curthread, PRIV_DRIVER);
608 		if (rc != 0)
609 			break;
610 		rc = oce_handle_passthrough(ifp, data);
611 		break;
612 	default:
613 		rc = ether_ioctl(ifp, command, data);
614 		break;
615 	}
616 
617 	return rc;
618 }
619 
620 static void
621 oce_init(void *arg)
622 {
623 	POCE_SOFTC sc = arg;
624 
625 	LOCK(&sc->dev_lock);
626 
627 	if (if_getflags(sc->ifp) & IFF_UP) {
628 		oce_if_deactivate(sc);
629 		oce_if_activate(sc);
630 	}
631 
632 	UNLOCK(&sc->dev_lock);
633 
634 }
635 
636 static int
637 oce_multiq_start(if_t ifp, struct mbuf *m)
638 {
639 	POCE_SOFTC sc = if_getsoftc(ifp);
640 	struct oce_wq *wq = NULL;
641 	int queue_index = 0;
642 	int status = 0;
643 
644 	if (!sc->link_status)
645 		return ENXIO;
646 
647 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
648 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
649 
650 	wq = sc->wq[queue_index];
651 
652 	LOCK(&wq->tx_lock);
653 	status = oce_multiq_transmit(ifp, m, wq);
654 	UNLOCK(&wq->tx_lock);
655 
656 	return status;
657 
658 }
659 
660 static void
661 oce_multiq_flush(if_t ifp)
662 {
663 	POCE_SOFTC sc = if_getsoftc(ifp);
664 	struct mbuf     *m;
665 	int i = 0;
666 
667 	for (i = 0; i < sc->nwqs; i++) {
668 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
669 			m_freem(m);
670 	}
671 	if_qflush(ifp);
672 }
673 
674 /*****************************************************************************
675  *                   Driver interrupt routines functions                     *
676  *****************************************************************************/
677 
678 static void
679 oce_intr(void *arg, int pending)
680 {
681 
682 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
683 	POCE_SOFTC sc = ii->sc;
684 	struct oce_eq *eq = ii->eq;
685 	struct oce_eqe *eqe;
686 	struct oce_cq *cq = NULL;
687 	int i, num_eqes = 0;
688 
689 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
690 				 BUS_DMASYNC_POSTWRITE);
691 	do {
692 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
693 		if (eqe->evnt == 0)
694 			break;
695 		eqe->evnt = 0;
696 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
697 					BUS_DMASYNC_POSTWRITE);
698 		RING_GET(eq->ring, 1);
699 		num_eqes++;
700 
701 	} while (TRUE);
702 
703 	if (!num_eqes)
704 		goto eq_arm; /* Spurious */
705 
706  	/* Clear EQ entries, but dont arm */
707 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
708 
709 	/* Process TX, RX and MCC. But dont arm CQ*/
710 	for (i = 0; i < eq->cq_valid; i++) {
711 		cq = eq->cq[i];
712 		(*cq->cq_handler)(cq->cb_arg);
713 	}
714 
715 	/* Arm all cqs connected to this EQ */
716 	for (i = 0; i < eq->cq_valid; i++) {
717 		cq = eq->cq[i];
718 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
719 	}
720 
721 eq_arm:
722 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
723 
724 	return;
725 }
726 
727 static int
728 oce_setup_intr(POCE_SOFTC sc)
729 {
730 	int rc = 0, use_intx = 0;
731 	int vector = 0, req_vectors = 0;
732 	int tot_req_vectors, tot_vectors;
733 
734 	if (is_rss_enabled(sc))
735 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
736 	else
737 		req_vectors = 1;
738 
739 	tot_req_vectors = req_vectors;
740 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
741 	  if (req_vectors > 1) {
742 	    tot_req_vectors += OCE_RDMA_VECTORS;
743 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
744 	  }
745 	}
746 
747         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
748 		sc->intr_count = req_vectors;
749                 tot_vectors = tot_req_vectors;
750 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
751 		if (rc != 0) {
752 			use_intx = 1;
753 			pci_release_msi(sc->dev);
754 		} else {
755 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
756 		    if (tot_vectors < tot_req_vectors) {
757 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
758 			sc->roce_intr_count = (tot_vectors / 2);
759 		      }
760 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
761 		    }
762 		  } else {
763 		    sc->intr_count = tot_vectors;
764 		  }
765     		  sc->flags |= OCE_FLAGS_USING_MSIX;
766 		}
767 	} else
768 		use_intx = 1;
769 
770 	if (use_intx)
771 		sc->intr_count = 1;
772 
773 	/* Scale number of queues based on intr we got */
774 	update_queues_got(sc);
775 
776 	if (use_intx) {
777 		device_printf(sc->dev, "Using legacy interrupt\n");
778 		rc = oce_alloc_intr(sc, vector, oce_intr);
779 		if (rc)
780 			goto error;
781 	} else {
782 		for (; vector < sc->intr_count; vector++) {
783 			rc = oce_alloc_intr(sc, vector, oce_intr);
784 			if (rc)
785 				goto error;
786 		}
787 	}
788 
789 	return 0;
790 error:
791 	oce_intr_free(sc);
792 	return rc;
793 }
794 
795 static int
796 oce_fast_isr(void *arg)
797 {
798 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
799 	POCE_SOFTC sc = ii->sc;
800 
801 	if (ii->eq == NULL)
802 		return FILTER_STRAY;
803 
804 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
805 
806 	taskqueue_enqueue(ii->tq, &ii->task);
807 
808  	ii->eq->intr++;
809 
810 	return FILTER_HANDLED;
811 }
812 
813 static int
814 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
815 {
816 	POCE_INTR_INFO ii;
817 	int rc = 0, rr;
818 
819 	if (vector >= OCE_MAX_EQ)
820 		return (EINVAL);
821 
822 	ii = &sc->intrs[vector];
823 
824 	/* Set the resource id for the interrupt.
825 	 * MSIx is vector + 1 for the resource id,
826 	 * INTx is 0 for the resource id.
827 	 */
828 	if (sc->flags & OCE_FLAGS_USING_MSIX)
829 		rr = vector + 1;
830 	else
831 		rr = 0;
832 	ii->intr_res = bus_alloc_resource_any(sc->dev,
833 					      SYS_RES_IRQ,
834 					      &rr, RF_ACTIVE|RF_SHAREABLE);
835 	ii->irq_rr = rr;
836 	if (ii->intr_res == NULL) {
837 		device_printf(sc->dev,
838 			  "Could not allocate interrupt\n");
839 		rc = ENXIO;
840 		return rc;
841 	}
842 
843 	TASK_INIT(&ii->task, 0, isr, ii);
844 	ii->vector = vector;
845 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
846 	ii->tq = taskqueue_create_fast(ii->task_name,
847 			M_NOWAIT,
848 			taskqueue_thread_enqueue,
849 			&ii->tq);
850 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
851 			device_get_nameunit(sc->dev));
852 
853 	ii->sc = sc;
854 	rc = bus_setup_intr(sc->dev,
855 			ii->intr_res,
856 			INTR_TYPE_NET,
857 			oce_fast_isr, NULL, ii, &ii->tag);
858 	return rc;
859 
860 }
861 
862 void
863 oce_intr_free(POCE_SOFTC sc)
864 {
865 	int i = 0;
866 
867 	for (i = 0; i < sc->intr_count; i++) {
868 
869 		if (sc->intrs[i].tag != NULL)
870 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
871 						sc->intrs[i].tag);
872 		if (sc->intrs[i].tq != NULL)
873 			taskqueue_free(sc->intrs[i].tq);
874 
875 		if (sc->intrs[i].intr_res != NULL)
876 			bus_release_resource(sc->dev, SYS_RES_IRQ,
877 						sc->intrs[i].irq_rr,
878 						sc->intrs[i].intr_res);
879 		sc->intrs[i].tag = NULL;
880 		sc->intrs[i].intr_res = NULL;
881 	}
882 
883 	if (sc->flags & OCE_FLAGS_USING_MSIX)
884 		pci_release_msi(sc->dev);
885 
886 }
887 
888 /******************************************************************************
889 *			  Media callbacks functions 			      *
890 ******************************************************************************/
891 
892 static void
893 oce_media_status(if_t ifp, struct ifmediareq *req)
894 {
895 	POCE_SOFTC sc = (POCE_SOFTC) if_getsoftc(ifp);
896 
897 	req->ifm_status = IFM_AVALID;
898 	req->ifm_active = IFM_ETHER;
899 
900 	if (sc->link_status == 1)
901 		req->ifm_status |= IFM_ACTIVE;
902 	else
903 		return;
904 
905 	switch (sc->link_speed) {
906 	case 1: /* 10 Mbps */
907 		req->ifm_active |= IFM_10_T | IFM_FDX;
908 		sc->speed = 10;
909 		break;
910 	case 2: /* 100 Mbps */
911 		req->ifm_active |= IFM_100_TX | IFM_FDX;
912 		sc->speed = 100;
913 		break;
914 	case 3: /* 1 Gbps */
915 		req->ifm_active |= IFM_1000_T | IFM_FDX;
916 		sc->speed = 1000;
917 		break;
918 	case 4: /* 10 Gbps */
919 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
920 		sc->speed = 10000;
921 		break;
922 	case 5: /* 20 Gbps */
923 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
924 		sc->speed = 20000;
925 		break;
926 	case 6: /* 25 Gbps */
927 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
928 		sc->speed = 25000;
929 		break;
930 	case 7: /* 40 Gbps */
931 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
932 		sc->speed = 40000;
933 		break;
934 	default:
935 		sc->speed = 0;
936 		break;
937 	}
938 
939 	return;
940 }
941 
942 int
943 oce_media_change(if_t ifp)
944 {
945 	return 0;
946 }
947 
948 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
949 				struct mbuf *m, boolean_t *os2bmc,
950 				struct mbuf **m_new)
951 {
952 	struct ether_header *eh = NULL;
953 
954 	eh = mtod(m, struct ether_header *);
955 
956 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
957 		*os2bmc = FALSE;
958 		goto done;
959 	}
960 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
961 		goto done;
962 
963 	if (is_mc_allowed_on_bmc(sc, eh) ||
964 	    is_bc_allowed_on_bmc(sc, eh) ||
965 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
966 		*os2bmc = TRUE;
967 		goto done;
968 	}
969 
970 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
971 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
972 		uint8_t nexthdr = ip6->ip6_nxt;
973 		if (nexthdr == IPPROTO_ICMPV6) {
974 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
975 			switch (icmp6->icmp6_type) {
976 			case ND_ROUTER_ADVERT:
977 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
978 				goto done;
979 			case ND_NEIGHBOR_ADVERT:
980 				*os2bmc = is_ipv6_na_filt_enabled(sc);
981 				goto done;
982 			default:
983 				break;
984 			}
985 		}
986 	}
987 
988 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
989 		struct ip *ip = mtod(m, struct ip *);
990 		int iphlen = ip->ip_hl << 2;
991 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
992 		switch (uh->uh_dport) {
993 		case DHCP_CLIENT_PORT:
994 			*os2bmc = is_dhcp_client_filt_enabled(sc);
995 			goto done;
996 		case DHCP_SERVER_PORT:
997 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
998 			goto done;
999 		case NET_BIOS_PORT1:
1000 		case NET_BIOS_PORT2:
1001 			*os2bmc = is_nbios_filt_enabled(sc);
1002 			goto done;
1003 		case DHCPV6_RAS_PORT:
1004 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1005 			goto done;
1006 		default:
1007 			break;
1008 		}
1009 	}
1010 done:
1011 	if (*os2bmc) {
1012 		*m_new = m_dup(m, M_NOWAIT);
1013 		if (!*m_new) {
1014 			*os2bmc = FALSE;
1015 			return;
1016 		}
1017 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1018 	}
1019 }
1020 
1021 /*****************************************************************************
1022  *			  Transmit routines functions			     *
1023  *****************************************************************************/
1024 
1025 static int
1026 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1027 {
1028 	int rc = 0, i, retry_cnt = 0;
1029 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1030 	struct mbuf *m, *m_temp, *m_new = NULL;
1031 	struct oce_wq *wq = sc->wq[wq_index];
1032 	struct oce_packet_desc *pd;
1033 	struct oce_nic_hdr_wqe *nichdr;
1034 	struct oce_nic_frag_wqe *nicfrag;
1035 	struct ether_header *eh = NULL;
1036 	int num_wqes;
1037 	uint32_t reg_value;
1038 	boolean_t complete = TRUE;
1039 	boolean_t os2bmc = FALSE;
1040 
1041 	m = *mpp;
1042 	if (!m)
1043 		return EINVAL;
1044 
1045 	if (!(m->m_flags & M_PKTHDR)) {
1046 		rc = ENXIO;
1047 		goto free_ret;
1048 	}
1049 
1050 	/* Don't allow non-TSO packets longer than MTU */
1051 	if (!is_tso_pkt(m)) {
1052 		eh = mtod(m, struct ether_header *);
1053 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1054 			 goto free_ret;
1055 	}
1056 
1057 	if(oce_tx_asic_stall_verify(sc, m)) {
1058 		m = oce_insert_vlan_tag(sc, m, &complete);
1059 		if(!m) {
1060 			device_printf(sc->dev, "Insertion unsuccessful\n");
1061 			return 0;
1062 		}
1063 	}
1064 
1065 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1066 	 * may cause a transmit stall on that port. So the work-around is to
1067 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1068 	*/
1069 	if(IS_SH(sc) || IS_XE201(sc) ) {
1070 		if(m->m_pkthdr.len <= 32) {
1071 			char buf[36];
1072 			bzero((void *)buf, 36);
1073 			m_append(m, (36 - m->m_pkthdr.len), buf);
1074 		}
1075 	}
1076 
1077 tx_start:
1078 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1079 		/* consolidate packet buffers for TSO/LSO segment offload */
1080 #if defined(INET6) || defined(INET)
1081 		m = oce_tso_setup(sc, mpp);
1082 #else
1083 		m = NULL;
1084 #endif
1085 		if (m == NULL) {
1086 			rc = ENXIO;
1087 			goto free_ret;
1088 		}
1089 	}
1090 
1091 	pd = &wq->pckts[wq->pkt_desc_head];
1092 
1093 retry:
1094 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1095 				     pd->map,
1096 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1097 	if (rc == 0) {
1098 		num_wqes = pd->nsegs + 1;
1099 		if (IS_BE(sc) || IS_SH(sc)) {
1100 			/*Dummy required only for BE3.*/
1101 			if (num_wqes & 1)
1102 				num_wqes++;
1103 		}
1104 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1105 			bus_dmamap_unload(wq->tag, pd->map);
1106 			return EBUSY;
1107 		}
1108 		atomic_store_rel_int(&wq->pkt_desc_head,
1109 				     (wq->pkt_desc_head + 1) % \
1110 				      OCE_WQ_PACKET_ARRAY_SIZE);
1111 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1112 		pd->mbuf = m;
1113 
1114 		nichdr =
1115 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1116 		nichdr->u0.dw[0] = 0;
1117 		nichdr->u0.dw[1] = 0;
1118 		nichdr->u0.dw[2] = 0;
1119 		nichdr->u0.dw[3] = 0;
1120 
1121 		nichdr->u0.s.complete = complete;
1122 		nichdr->u0.s.mgmt = os2bmc;
1123 		nichdr->u0.s.event = 1;
1124 		nichdr->u0.s.crc = 1;
1125 		nichdr->u0.s.forward = 0;
1126 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1127 		nichdr->u0.s.udpcs =
1128 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1129 		nichdr->u0.s.tcpcs =
1130 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1131 		nichdr->u0.s.num_wqe = num_wqes;
1132 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1133 
1134 		if (m->m_flags & M_VLANTAG) {
1135 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1136 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1137 		}
1138 
1139 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1140 			if (m->m_pkthdr.tso_segsz) {
1141 				nichdr->u0.s.lso = 1;
1142 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1143 			}
1144 			if (!IS_BE(sc) || !IS_SH(sc))
1145 				nichdr->u0.s.ipcs = 1;
1146 		}
1147 
1148 		RING_PUT(wq->ring, 1);
1149 		atomic_add_int(&wq->ring->num_used, 1);
1150 
1151 		for (i = 0; i < pd->nsegs; i++) {
1152 			nicfrag =
1153 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1154 						      struct oce_nic_frag_wqe);
1155 			nicfrag->u0.s.rsvd0 = 0;
1156 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1157 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1158 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1159 			pd->wqe_idx = wq->ring->pidx;
1160 			RING_PUT(wq->ring, 1);
1161 			atomic_add_int(&wq->ring->num_used, 1);
1162 		}
1163 		if (num_wqes > (pd->nsegs + 1)) {
1164 			nicfrag =
1165 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1166 						      struct oce_nic_frag_wqe);
1167 			nicfrag->u0.dw[0] = 0;
1168 			nicfrag->u0.dw[1] = 0;
1169 			nicfrag->u0.dw[2] = 0;
1170 			nicfrag->u0.dw[3] = 0;
1171 			pd->wqe_idx = wq->ring->pidx;
1172 			RING_PUT(wq->ring, 1);
1173 			atomic_add_int(&wq->ring->num_used, 1);
1174 			pd->nsegs++;
1175 		}
1176 
1177 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1178 		wq->tx_stats.tx_reqs++;
1179 		wq->tx_stats.tx_wrbs += num_wqes;
1180 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1181 		wq->tx_stats.tx_pkts++;
1182 
1183 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1184 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1185 		reg_value = (num_wqes << 16) | wq->wq_id;
1186 
1187 		/* if os2bmc is not enabled or if the pkt is already tagged as
1188 		   bmc, do nothing
1189 		 */
1190 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1191 
1192 		if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1193 		if (m->m_flags & M_MCAST)
1194 			if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
1195 		ETHER_BPF_MTAP(sc->ifp, m);
1196 
1197 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1198 
1199 	} else if (rc == EFBIG)	{
1200 		if (retry_cnt == 0) {
1201 			m_temp = m_defrag(m, M_NOWAIT);
1202 			if (m_temp == NULL)
1203 				goto free_ret;
1204 			m = m_temp;
1205 			*mpp = m_temp;
1206 			retry_cnt = retry_cnt + 1;
1207 			goto retry;
1208 		} else
1209 			goto free_ret;
1210 	} else if (rc == ENOMEM)
1211 		return rc;
1212 	else
1213 		goto free_ret;
1214 
1215 	if (os2bmc) {
1216 		m = m_new;
1217 		goto tx_start;
1218 	}
1219 
1220 	return 0;
1221 
1222 free_ret:
1223 	m_freem(*mpp);
1224 	*mpp = NULL;
1225 	return rc;
1226 }
1227 
1228 static void
1229 oce_process_tx_completion(struct oce_wq *wq)
1230 {
1231 	struct oce_packet_desc *pd;
1232 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1233 	struct mbuf *m;
1234 
1235 	pd = &wq->pckts[wq->pkt_desc_tail];
1236 	atomic_store_rel_int(&wq->pkt_desc_tail,
1237 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1238 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1239 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1240 	bus_dmamap_unload(wq->tag, pd->map);
1241 
1242 	m = pd->mbuf;
1243 	m_freem(m);
1244 	pd->mbuf = NULL;
1245 
1246 	if (if_getdrvflags(sc->ifp) & IFF_DRV_OACTIVE) {
1247 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1248 			if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_OACTIVE));
1249 			oce_tx_restart(sc, wq);
1250 		}
1251 	}
1252 }
1253 
1254 static void
1255 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1256 {
1257 
1258 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1259 		return;
1260 
1261 	if (!drbr_empty(sc->ifp, wq->br))
1262 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1263 
1264 }
1265 
1266 #if defined(INET6) || defined(INET)
1267 static struct mbuf *
1268 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1269 {
1270 	struct mbuf *m;
1271 #ifdef INET
1272 	struct ip *ip;
1273 #endif
1274 #ifdef INET6
1275 	struct ip6_hdr *ip6;
1276 #endif
1277 	struct ether_vlan_header *eh;
1278 	struct tcphdr *th;
1279 	uint16_t etype;
1280 	int total_len = 0, ehdrlen = 0;
1281 
1282 	m = *mpp;
1283 
1284 	if (M_WRITABLE(m) == 0) {
1285 		m = m_dup(*mpp, M_NOWAIT);
1286 		if (!m)
1287 			return NULL;
1288 		m_freem(*mpp);
1289 		*mpp = m;
1290 	}
1291 
1292 	eh = mtod(m, struct ether_vlan_header *);
1293 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1294 		etype = ntohs(eh->evl_proto);
1295 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1296 	} else {
1297 		etype = ntohs(eh->evl_encap_proto);
1298 		ehdrlen = ETHER_HDR_LEN;
1299 	}
1300 
1301 	switch (etype) {
1302 #ifdef INET
1303 	case ETHERTYPE_IP:
1304 		ip = (struct ip *)(m->m_data + ehdrlen);
1305 		if (ip->ip_p != IPPROTO_TCP)
1306 			return NULL;
1307 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1308 
1309 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1310 		break;
1311 #endif
1312 #ifdef INET6
1313 	case ETHERTYPE_IPV6:
1314 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1315 		if (ip6->ip6_nxt != IPPROTO_TCP)
1316 			return NULL;
1317 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1318 
1319 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1320 		break;
1321 #endif
1322 	default:
1323 		return NULL;
1324 	}
1325 
1326 	m = m_pullup(m, total_len);
1327 	*mpp = m;
1328 	return m;
1329 }
1330 #endif /* INET6 || INET */
1331 
1332 void
1333 oce_tx_task(void *arg, int npending)
1334 {
1335 	struct oce_wq *wq = arg;
1336 	POCE_SOFTC sc = wq->parent;
1337 	if_t ifp = sc->ifp;
1338 	int rc = 0;
1339 
1340 	LOCK(&wq->tx_lock);
1341 	rc = oce_multiq_transmit(ifp, NULL, wq);
1342 	if (rc) {
1343 		device_printf(sc->dev,
1344 				"TX[%d] restart failed\n", wq->queue_index);
1345 	}
1346 	UNLOCK(&wq->tx_lock);
1347 }
1348 
1349 void
1350 oce_start(if_t ifp)
1351 {
1352 	POCE_SOFTC sc = if_getsoftc(ifp);
1353 	struct mbuf *m;
1354 	int rc = 0;
1355 	int def_q = 0; /* Defualt tx queue is 0*/
1356 
1357 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1358 			IFF_DRV_RUNNING)
1359 		return;
1360 
1361 	if (!sc->link_status)
1362 		return;
1363 
1364 	while (true) {
1365 		m = if_dequeue(sc->ifp);
1366 		if (m == NULL)
1367 			break;
1368 
1369 		LOCK(&sc->wq[def_q]->tx_lock);
1370 		rc = oce_tx(sc, &m, def_q);
1371 		UNLOCK(&sc->wq[def_q]->tx_lock);
1372 		if (rc) {
1373 			if (m != NULL) {
1374 				sc->wq[def_q]->tx_stats.tx_stops ++;
1375 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1376 				if_sendq_prepend(ifp, m);
1377 				m = NULL;
1378 			}
1379 			break;
1380 		}
1381 	}
1382 }
1383 
1384 /* Handle the Completion Queue for transmit */
1385 uint16_t
1386 oce_wq_handler(void *arg)
1387 {
1388 	struct oce_wq *wq = (struct oce_wq *)arg;
1389 	POCE_SOFTC sc = wq->parent;
1390 	struct oce_cq *cq = wq->cq;
1391 	struct oce_nic_tx_cqe *cqe;
1392 	int num_cqes = 0;
1393 
1394 	LOCK(&wq->tx_compl_lock);
1395 	bus_dmamap_sync(cq->ring->dma.tag,
1396 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1397 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1398 	while (cqe->u0.dw[3]) {
1399 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1400 
1401 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1402 		if (wq->ring->cidx >= wq->ring->num_items)
1403 			wq->ring->cidx -= wq->ring->num_items;
1404 
1405 		oce_process_tx_completion(wq);
1406 		wq->tx_stats.tx_compl++;
1407 		cqe->u0.dw[3] = 0;
1408 		RING_GET(cq->ring, 1);
1409 		bus_dmamap_sync(cq->ring->dma.tag,
1410 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1411 		cqe =
1412 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1413 		num_cqes++;
1414 	}
1415 
1416 	if (num_cqes)
1417 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1418 
1419 	UNLOCK(&wq->tx_compl_lock);
1420 	return num_cqes;
1421 }
1422 
1423 static int
1424 oce_multiq_transmit(if_t ifp, struct mbuf *m, struct oce_wq *wq)
1425 {
1426 	POCE_SOFTC sc = if_getsoftc(ifp);
1427 	int status = 0, queue_index = 0;
1428 	struct mbuf *next = NULL;
1429 	struct buf_ring *br = NULL;
1430 
1431 	br  = wq->br;
1432 	queue_index = wq->queue_index;
1433 
1434 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1435 		IFF_DRV_RUNNING) {
1436 		if (m != NULL)
1437 			status = drbr_enqueue(ifp, br, m);
1438 		return status;
1439 	}
1440 
1441 	if (m != NULL) {
1442 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1443 			return status;
1444 	}
1445 	while ((next = drbr_peek(ifp, br)) != NULL) {
1446 		if (oce_tx(sc, &next, queue_index)) {
1447 			if (next == NULL) {
1448 				drbr_advance(ifp, br);
1449 			} else {
1450 				drbr_putback(ifp, br, next);
1451 				wq->tx_stats.tx_stops ++;
1452 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1453 			}
1454 			break;
1455 		}
1456 		drbr_advance(ifp, br);
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 /*****************************************************************************
1463  *			    Receive  routines functions 		     *
1464  *****************************************************************************/
1465 
1466 static void
1467 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1468 {
1469 	uint32_t *p;
1470         struct ether_header *eh = NULL;
1471         struct tcphdr *tcp_hdr = NULL;
1472         struct ip *ip4_hdr = NULL;
1473         struct ip6_hdr *ip6 = NULL;
1474         uint32_t payload_len = 0;
1475 
1476         eh = mtod(m, struct ether_header *);
1477         /* correct IP header */
1478         if(!cqe2->ipv6_frame) {
1479 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1480                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1481                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1482                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1483         }else {
1484         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1485                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1486                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1487                                                 - sizeof(struct ip6_hdr);
1488                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1489                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1490         }
1491 
1492         /* correct tcp header */
1493         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1494         if(cqe2->push) {
1495 		tcp_set_flags(tcp_hdr, tcp_get_flags(tcp_hdr) | TH_PUSH);
1496         }
1497         tcp_hdr->th_win = htons(cqe2->tcp_window);
1498         tcp_hdr->th_sum = 0xffff;
1499         if(cqe2->ts_opt) {
1500                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1501                 *p = cqe1->tcp_timestamp_val;
1502                 *(p+1) = cqe1->tcp_timestamp_ecr;
1503         }
1504 
1505 	return;
1506 }
1507 
1508 static void
1509 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1510 {
1511 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1512         uint32_t i = 0, frag_len = 0;
1513 	uint32_t len = cqe_info->pkt_size;
1514         struct oce_packet_desc *pd;
1515         struct mbuf *tail = NULL;
1516 
1517         for (i = 0; i < cqe_info->num_frags; i++) {
1518                 if (rq->ring->cidx == rq->ring->pidx) {
1519                         device_printf(sc->dev,
1520                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1521                         return;
1522                 }
1523                 pd = &rq->pckts[rq->ring->cidx];
1524 
1525                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1526                 bus_dmamap_unload(rq->tag, pd->map);
1527 		RING_GET(rq->ring, 1);
1528                 rq->pending--;
1529 
1530                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1531                 pd->mbuf->m_len = frag_len;
1532 
1533                 if (tail != NULL) {
1534                         /* additional fragments */
1535                         pd->mbuf->m_flags &= ~M_PKTHDR;
1536                         tail->m_next = pd->mbuf;
1537 			if(rq->islro)
1538                         	tail->m_nextpkt = NULL;
1539                         tail = pd->mbuf;
1540                 } else {
1541                         /* first fragment, fill out much of the packet header */
1542                         pd->mbuf->m_pkthdr.len = len;
1543 			if(rq->islro)
1544                         	pd->mbuf->m_nextpkt = NULL;
1545                         pd->mbuf->m_pkthdr.csum_flags = 0;
1546                         if (IF_CSUM_ENABLED(sc)) {
1547                                 if (cqe_info->l4_cksum_pass) {
1548                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1549                                                 pd->mbuf->m_pkthdr.csum_flags |=
1550                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1551                                         }else { /* IPV6 frame */
1552 						if(rq->islro) {
1553                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1554                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1555 						}
1556                                         }
1557                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1558                                 }
1559                                 if (cqe_info->ip_cksum_pass) {
1560                                         pd->mbuf->m_pkthdr.csum_flags |=
1561                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1562                                 }
1563                         }
1564                         *m = tail = pd->mbuf;
1565                }
1566                 pd->mbuf = NULL;
1567                 len -= frag_len;
1568         }
1569 
1570         return;
1571 }
1572 
1573 static void
1574 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1575 {
1576         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1577         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1578         struct mbuf *m = NULL;
1579 	struct oce_common_cqe_info cq_info;
1580 
1581 	/* parse cqe */
1582         if(cqe2 == NULL) {
1583                 cq_info.pkt_size =  cqe->pkt_size;
1584                 cq_info.vtag = cqe->vlan_tag;
1585                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1586                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1587                 cq_info.ipv6_frame = cqe->ipv6_frame;
1588                 cq_info.vtp = cqe->vtp;
1589                 cq_info.qnq = cqe->qnq;
1590         }else {
1591                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1592                 cq_info.pkt_size =  cqe2->coalesced_size;
1593                 cq_info.vtag = cqe2->vlan_tag;
1594                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1595                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1596                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1597                 cq_info.vtp = cqe2->vtp;
1598                 cq_info.qnq = cqe1->qnq;
1599         }
1600 
1601 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1602 
1603         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1604         if(cq_info.pkt_size % rq->cfg.frag_size)
1605                 cq_info.num_frags++;
1606 
1607 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1608 
1609 	if (m) {
1610 		if(cqe2) {
1611 			//assert(cqe2->valid != 0);
1612 
1613 			//assert(cqe2->cqe_type != 2);
1614 			oce_correct_header(m, cqe1, cqe2);
1615 		}
1616 
1617 		m->m_pkthdr.rcvif = sc->ifp;
1618 		if (rq->queue_index)
1619 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1620 		else
1621 			m->m_pkthdr.flowid = rq->queue_index;
1622 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1623 
1624 		/* This deternies if vlan tag is Valid */
1625 		if (cq_info.vtp) {
1626 			if (sc->function_mode & FNM_FLEX10_MODE) {
1627 				/* FLEX10. If QnQ is not set, neglect VLAN */
1628 				if (cq_info.qnq) {
1629 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1630 					m->m_flags |= M_VLANTAG;
1631 				}
1632 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1633 				/* In UMC mode generally pvid will be striped by
1634 				   hw. But in some cases we have seen it comes
1635 				   with pvid. So if pvid == vlan, neglect vlan.
1636 				 */
1637 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1638 				m->m_flags |= M_VLANTAG;
1639 			}
1640 		}
1641 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1642 
1643 		if_input(sc->ifp, m);
1644 
1645 		/* Update rx stats per queue */
1646 		rq->rx_stats.rx_pkts++;
1647 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1648 		rq->rx_stats.rx_frags += cq_info.num_frags;
1649 		rq->rx_stats.rx_ucast_pkts++;
1650 	}
1651         return;
1652 }
1653 
1654 static void
1655 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1656 {
1657 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1658 	int len;
1659 	struct mbuf *m = NULL;
1660 	struct oce_common_cqe_info cq_info;
1661 	uint16_t vtag = 0;
1662 
1663 	/* Is it a flush compl that has no data */
1664 	if(!cqe->u0.s.num_fragments)
1665 		goto exit;
1666 
1667 	len = cqe->u0.s.pkt_size;
1668 	if (!len) {
1669 		/*partial DMA workaround for Lancer*/
1670 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1671 		goto exit;
1672 	}
1673 
1674 	if (!oce_cqe_portid_valid(sc, cqe)) {
1675 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1676 		goto exit;
1677 	}
1678 
1679 	 /* Get vlan_tag value */
1680 	if(IS_BE(sc) || IS_SH(sc))
1681 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1682 	else
1683 		vtag = cqe->u0.s.vlan_tag;
1684 
1685 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1686 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1687 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1688 	cq_info.num_frags = cqe->u0.s.num_fragments;
1689 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1690 
1691 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1692 
1693 	if (m) {
1694 		m->m_pkthdr.rcvif = sc->ifp;
1695 		if (rq->queue_index)
1696 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1697 		else
1698 			m->m_pkthdr.flowid = rq->queue_index;
1699 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1700 
1701 		/* This deternies if vlan tag is Valid */
1702 		if (oce_cqe_vtp_valid(sc, cqe)) {
1703 			if (sc->function_mode & FNM_FLEX10_MODE) {
1704 				/* FLEX10. If QnQ is not set, neglect VLAN */
1705 				if (cqe->u0.s.qnq) {
1706 					m->m_pkthdr.ether_vtag = vtag;
1707 					m->m_flags |= M_VLANTAG;
1708 				}
1709 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1710 				/* In UMC mode generally pvid will be striped by
1711 				   hw. But in some cases we have seen it comes
1712 				   with pvid. So if pvid == vlan, neglect vlan.
1713 				*/
1714 				m->m_pkthdr.ether_vtag = vtag;
1715 				m->m_flags |= M_VLANTAG;
1716 			}
1717 		}
1718 
1719 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1720 #if defined(INET6) || defined(INET)
1721 		/* Try to queue to LRO */
1722 		if (IF_LRO_ENABLED(sc) &&
1723 		    (cqe->u0.s.ip_cksum_pass) &&
1724 		    (cqe->u0.s.l4_cksum_pass) &&
1725 		    (!cqe->u0.s.ip_ver)       &&
1726 		    (rq->lro.lro_cnt != 0)) {
1727 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1728 				rq->lro_pkts_queued ++;
1729 				goto post_done;
1730 			}
1731 			/* If LRO posting fails then try to post to STACK */
1732 		}
1733 #endif
1734 
1735 		if_input(sc->ifp, m);
1736 #if defined(INET6) || defined(INET)
1737 post_done:
1738 #endif
1739 		/* Update rx stats per queue */
1740 		rq->rx_stats.rx_pkts++;
1741 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1742 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1743 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1744 			rq->rx_stats.rx_mcast_pkts++;
1745 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1746 			rq->rx_stats.rx_ucast_pkts++;
1747 	}
1748 exit:
1749 	return;
1750 }
1751 
1752 void
1753 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1754 {
1755 	uint32_t i = 0;
1756 	struct oce_packet_desc *pd;
1757 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1758 
1759 	for (i = 0; i < num_frags; i++) {
1760                 if (rq->ring->cidx == rq->ring->pidx) {
1761                         device_printf(sc->dev,
1762                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1763                         return;
1764                 }
1765                 pd = &rq->pckts[rq->ring->cidx];
1766                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1767                 bus_dmamap_unload(rq->tag, pd->map);
1768                 if (pd->mbuf != NULL) {
1769                         m_freem(pd->mbuf);
1770                         pd->mbuf = NULL;
1771                 }
1772 
1773 		RING_GET(rq->ring, 1);
1774                 rq->pending--;
1775 	}
1776 }
1777 
1778 static int
1779 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1780 {
1781 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1782 	int vtp = 0;
1783 
1784 	if (sc->be3_native) {
1785 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1786 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1787 	} else
1788 		vtp = cqe->u0.s.vlan_tag_present;
1789 
1790 	return vtp;
1791 
1792 }
1793 
1794 static int
1795 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1796 {
1797 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1798 	int port_id = 0;
1799 
1800 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1801 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1802 		port_id =  cqe_v1->u0.s.port;
1803 		if (sc->port_id != port_id)
1804 			return 0;
1805 	} else
1806 		;/* For BE3 legacy and Lancer this is dummy */
1807 
1808 	return 1;
1809 
1810 }
1811 
1812 #if defined(INET6) || defined(INET)
1813 void
1814 oce_rx_flush_lro(struct oce_rq *rq)
1815 {
1816 	struct lro_ctrl	*lro = &rq->lro;
1817 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1818 
1819 	if (!IF_LRO_ENABLED(sc))
1820 		return;
1821 
1822 	tcp_lro_flush_all(lro);
1823 	rq->lro_pkts_queued = 0;
1824 
1825 	return;
1826 }
1827 
1828 static int
1829 oce_init_lro(POCE_SOFTC sc)
1830 {
1831 	struct lro_ctrl *lro = NULL;
1832 	int i = 0, rc = 0;
1833 
1834 	for (i = 0; i < sc->nrqs; i++) {
1835 		lro = &sc->rq[i]->lro;
1836 		rc = tcp_lro_init(lro);
1837 		if (rc != 0) {
1838 			device_printf(sc->dev, "LRO init failed\n");
1839 			return rc;
1840 		}
1841 		lro->ifp = sc->ifp;
1842 	}
1843 
1844 	return rc;
1845 }
1846 
1847 void
1848 oce_free_lro(POCE_SOFTC sc)
1849 {
1850 	struct lro_ctrl *lro = NULL;
1851 	int i = 0;
1852 
1853 	for (i = 0; i < sc->nrqs; i++) {
1854 		lro = &sc->rq[i]->lro;
1855 		if (lro)
1856 			tcp_lro_free(lro);
1857 	}
1858 }
1859 #endif
1860 
1861 int
1862 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1863 {
1864 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1865 	int i, rc;
1866 	struct oce_packet_desc *pd;
1867 	bus_dma_segment_t segs[6];
1868 	int nsegs, added = 0;
1869 	struct oce_nic_rqe *rqe;
1870 	pd_rxulp_db_t rxdb_reg;
1871 	uint32_t val = 0;
1872 	uint32_t oce_max_rq_posts = 64;
1873 
1874 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1875 	for (i = 0; i < count; i++) {
1876 		pd = &rq->pckts[rq->ring->pidx];
1877 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1878 		if (pd->mbuf == NULL) {
1879 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1880 			break;
1881 		}
1882 		pd->mbuf->m_nextpkt = NULL;
1883 
1884 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1885 
1886 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1887 					     pd->map,
1888 					     pd->mbuf,
1889 					     segs, &nsegs, BUS_DMA_NOWAIT);
1890 		if (rc) {
1891 			m_free(pd->mbuf);
1892 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1893 			break;
1894 		}
1895 
1896 		if (nsegs != 1) {
1897 			i--;
1898 			continue;
1899 		}
1900 
1901 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1902 
1903 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1904 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1905 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1906 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1907 		RING_PUT(rq->ring, 1);
1908 		added++;
1909 		rq->pending++;
1910 	}
1911 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1912 	if (added != 0) {
1913 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1914 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1915 			rxdb_reg.bits.qid = rq->rq_id;
1916 			if(rq->islro) {
1917                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1918                                 val |= oce_max_rq_posts << 16;
1919                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1920 			}else {
1921 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1922 			}
1923 			added -= oce_max_rq_posts;
1924 		}
1925 		if (added > 0) {
1926 			rxdb_reg.bits.qid = rq->rq_id;
1927 			rxdb_reg.bits.num_posted = added;
1928 			if(rq->islro) {
1929                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1930                                 val |= added << 16;
1931                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1932 			}else {
1933 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1934 			}
1935 		}
1936 	}
1937 
1938 	return 0;
1939 }
1940 
1941 static void
1942 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1943 {
1944         if (num_cqes) {
1945                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1946 		if(!sc->enable_hwlro) {
1947 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1948 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1949 		}else {
1950                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1951                         	oce_alloc_rx_bufs(rq, 64);
1952         	}
1953 	}
1954 
1955         return;
1956 }
1957 
1958 uint16_t
1959 oce_rq_handler_lro(void *arg)
1960 {
1961         struct oce_rq *rq = (struct oce_rq *)arg;
1962         struct oce_cq *cq = rq->cq;
1963         POCE_SOFTC sc = rq->parent;
1964         struct nic_hwlro_singleton_cqe *cqe;
1965         struct nic_hwlro_cqe_part2 *cqe2;
1966         int num_cqes = 0;
1967 
1968 	LOCK(&rq->rx_lock);
1969         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1970         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1971         while (cqe->valid) {
1972                 if(cqe->cqe_type == 0) { /* singleton cqe */
1973 			/* we should not get singleton cqe after cqe1 on same rq */
1974 			if(rq->cqe_firstpart != NULL) {
1975 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1976 				goto exit_rq_handler_lro;
1977 			}
1978                         if(cqe->error != 0) {
1979                                 rq->rx_stats.rxcp_err++;
1980 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1981                         }
1982                         oce_rx_lro(rq, cqe, NULL);
1983                         rq->rx_stats.rx_compl++;
1984                         cqe->valid = 0;
1985                         RING_GET(cq->ring, 1);
1986                         num_cqes++;
1987                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1988                                 break;
1989                 }else if(cqe->cqe_type == 0x1) { /* first part */
1990 			/* we should not get cqe1 after cqe1 on same rq */
1991 			if(rq->cqe_firstpart != NULL) {
1992 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1993 				goto exit_rq_handler_lro;
1994 			}
1995 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1996                         RING_GET(cq->ring, 1);
1997                 }else if(cqe->cqe_type == 0x2) { /* second part */
1998 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1999                         if(cqe2->error != 0) {
2000                                 rq->rx_stats.rxcp_err++;
2001 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2002                         }
2003 			/* We should not get cqe2 without cqe1 */
2004 			if(rq->cqe_firstpart == NULL) {
2005 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2006 				goto exit_rq_handler_lro;
2007 			}
2008                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2009 
2010                         rq->rx_stats.rx_compl++;
2011                         rq->cqe_firstpart->valid = 0;
2012                         cqe2->valid = 0;
2013 			rq->cqe_firstpart = NULL;
2014 
2015                         RING_GET(cq->ring, 1);
2016                         num_cqes += 2;
2017                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2018                                 break;
2019 		}
2020 
2021                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2022                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2023         }
2024 	oce_check_rx_bufs(sc, num_cqes, rq);
2025 exit_rq_handler_lro:
2026 	UNLOCK(&rq->rx_lock);
2027 	return 0;
2028 }
2029 
2030 /* Handle the Completion Queue for receive */
2031 uint16_t
2032 oce_rq_handler(void *arg)
2033 {
2034 	struct epoch_tracker et;
2035 	struct oce_rq *rq = (struct oce_rq *)arg;
2036 	struct oce_cq *cq = rq->cq;
2037 	POCE_SOFTC sc = rq->parent;
2038 	struct oce_nic_rx_cqe *cqe;
2039 	int num_cqes = 0;
2040 
2041 	NET_EPOCH_ENTER(et);
2042 	if(rq->islro) {
2043 		oce_rq_handler_lro(arg);
2044 		NET_EPOCH_EXIT(et);
2045 		return 0;
2046 	}
2047 	LOCK(&rq->rx_lock);
2048 	bus_dmamap_sync(cq->ring->dma.tag,
2049 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2050 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2051 	while (cqe->u0.dw[2]) {
2052 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2053 
2054 		if (cqe->u0.s.error == 0) {
2055 			oce_rx(rq, cqe);
2056 		} else {
2057 			rq->rx_stats.rxcp_err++;
2058 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2059 			/* Post L3/L4 errors to stack.*/
2060 			oce_rx(rq, cqe);
2061 		}
2062 		rq->rx_stats.rx_compl++;
2063 		cqe->u0.dw[2] = 0;
2064 
2065 #if defined(INET6) || defined(INET)
2066 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2067 			oce_rx_flush_lro(rq);
2068 		}
2069 #endif
2070 
2071 		RING_GET(cq->ring, 1);
2072 		bus_dmamap_sync(cq->ring->dma.tag,
2073 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2074 		cqe =
2075 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2076 		num_cqes++;
2077 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2078 			break;
2079 	}
2080 
2081 #if defined(INET6) || defined(INET)
2082         if (IF_LRO_ENABLED(sc))
2083                 oce_rx_flush_lro(rq);
2084 #endif
2085 
2086 	oce_check_rx_bufs(sc, num_cqes, rq);
2087 	UNLOCK(&rq->rx_lock);
2088 	NET_EPOCH_EXIT(et);
2089 	return 0;
2090 
2091 }
2092 
2093 /*****************************************************************************
2094  *		   Helper function prototypes in this file 		     *
2095  *****************************************************************************/
2096 
2097 static void
2098 oce_attach_ifp(POCE_SOFTC sc)
2099 {
2100 
2101 	sc->ifp = if_alloc(IFT_ETHER);
2102 
2103 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2104 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2105 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2106 
2107 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_MULTICAST);
2108 	if_setioctlfn(sc->ifp, oce_ioctl);
2109 	if_setstartfn(sc->ifp, oce_start);
2110 	if_setinitfn(sc->ifp, oce_init);
2111 	if_setmtu(sc->ifp, ETHERMTU);
2112 	if_setsoftc(sc->ifp, sc);
2113 	if_settransmitfn(sc->ifp, oce_multiq_start);
2114 	if_setqflushfn(sc->ifp, oce_multiq_flush);
2115 
2116 	if_initname(sc->ifp,
2117 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2118 
2119 	if_setsendqlen(sc->ifp, OCE_MAX_TX_DESC - 1);
2120 	if_setsendqready(sc->ifp);
2121 
2122 	if_sethwassist(sc->ifp, OCE_IF_HWASSIST);
2123 	if_sethwassistbits(sc->ifp, CSUM_TSO, 0);
2124 	if_sethwassistbits(sc->ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0);
2125 
2126 	if_setcapabilities(sc->ifp, OCE_IF_CAPABILITIES);
2127 	if_setcapabilitiesbit(sc->ifp, IFCAP_HWCSUM, 0);
2128 	if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWFILTER, 0);
2129 
2130 #if defined(INET6) || defined(INET)
2131 	if_setcapabilitiesbit(sc->ifp, IFCAP_TSO, 0);
2132 	if_setcapabilitiesbit(sc->ifp, IFCAP_LRO, 0);
2133 	if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWTSO, 0);
2134 #endif
2135 
2136 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
2137 	if_setbaudrate(sc->ifp, IF_Gbps(10));
2138 
2139 	if_sethwtsomax(sc->ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2140 	if_sethwtsomaxsegcount(sc->ifp, OCE_MAX_TX_ELEMENTS);
2141 	if_sethwtsomaxsegsize(sc->ifp, 4096);
2142 
2143 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2144 }
2145 
2146 static void
2147 oce_add_vlan(void *arg, if_t ifp, uint16_t vtag)
2148 {
2149 	POCE_SOFTC sc = if_getsoftc(ifp);
2150 
2151 	if (if_getsoftc(ifp) !=  arg)
2152 		return;
2153 	if ((vtag == 0) || (vtag > 4095))
2154 		return;
2155 
2156 	sc->vlan_tag[vtag] = 1;
2157 	sc->vlans_added++;
2158 	if (sc->vlans_added <= (sc->max_vlans + 1))
2159 		oce_vid_config(sc);
2160 }
2161 
2162 static void
2163 oce_del_vlan(void *arg, if_t ifp, uint16_t vtag)
2164 {
2165 	POCE_SOFTC sc = if_getsoftc(ifp);
2166 
2167 	if (if_getsoftc(ifp) !=  arg)
2168 		return;
2169 	if ((vtag == 0) || (vtag > 4095))
2170 		return;
2171 
2172 	sc->vlan_tag[vtag] = 0;
2173 	sc->vlans_added--;
2174 	oce_vid_config(sc);
2175 }
2176 
2177 /*
2178  * A max of 64 vlans can be configured in BE. If the user configures
2179  * more, place the card in vlan promiscuous mode.
2180  */
2181 static int
2182 oce_vid_config(POCE_SOFTC sc)
2183 {
2184 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2185 	uint16_t ntags = 0, i;
2186 	int status = 0;
2187 
2188 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2189 			(if_getcapenable(sc->ifp) & IFCAP_VLAN_HWFILTER)) {
2190 		for (i = 0; i < MAX_VLANS; i++) {
2191 			if (sc->vlan_tag[i]) {
2192 				vtags[ntags].vtag = i;
2193 				ntags++;
2194 			}
2195 		}
2196 		if (ntags)
2197 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2198 						vtags, ntags, 1, 0);
2199 	} else
2200 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2201 					 	NULL, 0, 1, 1);
2202 	return status;
2203 }
2204 
2205 static void
2206 oce_mac_addr_set(POCE_SOFTC sc)
2207 {
2208 	uint32_t old_pmac_id = sc->pmac_id;
2209 	int status = 0;
2210 
2211 	status = bcmp((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2212 			 sc->macaddr.size_of_struct);
2213 	if (!status)
2214 		return;
2215 
2216 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(if_getlladdr(sc->ifp)),
2217 					sc->if_id, &sc->pmac_id);
2218 	if (!status) {
2219 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2220 		bcopy((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2221 				 sc->macaddr.size_of_struct);
2222 	}
2223 	if (status)
2224 		device_printf(sc->dev, "Failed update macaddress\n");
2225 
2226 }
2227 
2228 static int
2229 oce_handle_passthrough(if_t ifp, caddr_t data)
2230 {
2231 	POCE_SOFTC sc = if_getsoftc(ifp);
2232 	struct ifreq *ifr = (struct ifreq *)data;
2233 	int rc = ENXIO;
2234 	char cookie[32] = {0};
2235 	void *priv_data = ifr_data_get_ptr(ifr);
2236 	void *ioctl_ptr;
2237 	uint32_t req_size;
2238 	struct mbx_hdr req;
2239 	OCE_DMA_MEM dma_mem;
2240 
2241 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2242 		return EFAULT;
2243 
2244 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2245 		return EINVAL;
2246 
2247 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2248 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2249 		return EFAULT;
2250 
2251 	req_size = le32toh(req.u0.req.request_length);
2252 	if (req_size > 65536)
2253 		return EINVAL;
2254 
2255 	req_size += sizeof(struct mbx_hdr);
2256 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2257 	if (rc)
2258 		return ENOMEM;
2259 
2260 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2261 		rc = EFAULT;
2262 		goto dma_free;
2263 	}
2264 
2265 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2266 	if (rc) {
2267 		rc = EIO;
2268 		goto dma_free;
2269 	}
2270 
2271 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size)) {
2272 		rc =  EFAULT;
2273 		goto dma_free;
2274 	}
2275 
2276 	/*
2277 	   firmware is filling all the attributes for this ioctl except
2278 	   the driver version..so fill it
2279 	 */
2280 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2281 		struct mbx_common_get_cntl_attr *fw_cmd =
2282 		    (struct mbx_common_get_cntl_attr *)ioctl_ptr;
2283 		_Static_assert(sizeof(COMPONENT_REVISION) <=
2284 		     sizeof(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str),
2285 		     "driver version string too long");
2286 
2287 		rc = copyout(COMPONENT_REVISION,
2288 		    fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2289 		    sizeof(COMPONENT_REVISION));
2290 	}
2291 
2292 dma_free:
2293 	oce_dma_free(sc, &dma_mem);
2294 	return rc;
2295 
2296 }
2297 
2298 static void
2299 oce_eqd_set_periodic(POCE_SOFTC sc)
2300 {
2301 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2302 	struct oce_aic_obj *aic;
2303 	struct oce_eq *eqo;
2304 	uint64_t now = 0, delta;
2305 	int eqd, i, num = 0;
2306 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2307 	struct oce_wq *wq;
2308 	struct oce_rq *rq;
2309 
2310 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2311 
2312 	for (i = 0 ; i < sc->neqs; i++) {
2313 		eqo = sc->eq[i];
2314 		aic = &sc->aic_obj[i];
2315 		/* When setting the static eq delay from the user space */
2316 		if (!aic->enable) {
2317 			if (aic->ticks)
2318 				aic->ticks = 0;
2319 			eqd = aic->et_eqd;
2320 			goto modify_eqd;
2321 		}
2322 
2323 		if (i == 0) {
2324 			rq = sc->rq[0];
2325 			rxpkts = rq->rx_stats.rx_pkts;
2326 		} else
2327 			rxpkts = 0;
2328 		if (i + 1 < sc->nrqs) {
2329 			rq = sc->rq[i + 1];
2330 			rxpkts += rq->rx_stats.rx_pkts;
2331 		}
2332 		if (i < sc->nwqs) {
2333 			wq = sc->wq[i];
2334 			tx_reqs = wq->tx_stats.tx_reqs;
2335 		} else
2336 			tx_reqs = 0;
2337 		now = ticks;
2338 
2339 		if (!aic->ticks || now < aic->ticks ||
2340 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2341 			aic->prev_rxpkts = rxpkts;
2342 			aic->prev_txreqs = tx_reqs;
2343 			aic->ticks = now;
2344 			continue;
2345 		}
2346 
2347 		delta = ticks_to_msecs(now - aic->ticks);
2348 
2349 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2350 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2351 		eqd = (pps / 15000) << 2;
2352 		if (eqd < 8)
2353 			eqd = 0;
2354 
2355 		/* Make sure that the eq delay is in the known range */
2356 		eqd = min(eqd, aic->max_eqd);
2357 		eqd = max(eqd, aic->min_eqd);
2358 
2359 		aic->prev_rxpkts = rxpkts;
2360 		aic->prev_txreqs = tx_reqs;
2361 		aic->ticks = now;
2362 
2363 modify_eqd:
2364 		if (eqd != aic->cur_eqd) {
2365 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2366 			set_eqd[num].eq_id = eqo->eq_id;
2367 			aic->cur_eqd = eqd;
2368 			num++;
2369 		}
2370 	}
2371 
2372 	/* Is there atleast one eq that needs to be modified? */
2373         for(i = 0; i < num; i += 8) {
2374                 if((num - i) >=8 )
2375                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2376                 else
2377                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2378         }
2379 
2380 }
2381 
2382 static void oce_detect_hw_error(POCE_SOFTC sc)
2383 {
2384 
2385 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2386 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2387 	uint32_t i;
2388 
2389 	if (sc->hw_error)
2390 		return;
2391 
2392 	if (IS_XE201(sc)) {
2393 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2394 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2395 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2396 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2397 		}
2398 	} else {
2399 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2400 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2401 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2402 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2403 
2404 		ue_low = (ue_low & ~ue_low_mask);
2405 		ue_high = (ue_high & ~ue_high_mask);
2406 	}
2407 
2408 	/* On certain platforms BE hardware can indicate spurious UEs.
2409 	 * Allow the h/w to stop working completely in case of a real UE.
2410 	 * Hence not setting the hw_error for UE detection.
2411 	 */
2412 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2413 		sc->hw_error = TRUE;
2414 		device_printf(sc->dev, "Error detected in the card\n");
2415 	}
2416 
2417 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2418 		device_printf(sc->dev,
2419 				"ERR: sliport status 0x%x\n", sliport_status);
2420 		device_printf(sc->dev,
2421 				"ERR: sliport error1 0x%x\n", sliport_err1);
2422 		device_printf(sc->dev,
2423 				"ERR: sliport error2 0x%x\n", sliport_err2);
2424 	}
2425 
2426 	if (ue_low) {
2427 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2428 			if (ue_low & 1)
2429 				device_printf(sc->dev, "UE: %s bit set\n",
2430 							ue_status_low_desc[i]);
2431 		}
2432 	}
2433 
2434 	if (ue_high) {
2435 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2436 			if (ue_high & 1)
2437 				device_printf(sc->dev, "UE: %s bit set\n",
2438 							ue_status_hi_desc[i]);
2439 		}
2440 	}
2441 
2442 }
2443 
2444 static void
2445 oce_local_timer(void *arg)
2446 {
2447 	POCE_SOFTC sc = arg;
2448 	int i = 0;
2449 
2450 	oce_detect_hw_error(sc);
2451 	oce_refresh_nic_stats(sc);
2452 	oce_refresh_queue_stats(sc);
2453 	oce_mac_addr_set(sc);
2454 
2455 	/* TX Watch Dog*/
2456 	for (i = 0; i < sc->nwqs; i++)
2457 		oce_tx_restart(sc, sc->wq[i]);
2458 
2459 	/* calculate and set the eq delay for optimal interrupt rate */
2460 	if (IS_BE(sc) || IS_SH(sc))
2461 		oce_eqd_set_periodic(sc);
2462 
2463 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2464 }
2465 
2466 static void
2467 oce_tx_compl_clean(POCE_SOFTC sc)
2468 {
2469 	struct oce_wq *wq;
2470 	int i = 0, timeo = 0, num_wqes = 0;
2471 	int pending_txqs = sc->nwqs;
2472 
2473 	/* Stop polling for compls when HW has been silent for 10ms or
2474 	 * hw_error or no outstanding completions expected
2475 	 */
2476 	do {
2477 		pending_txqs = sc->nwqs;
2478 
2479 		for_all_wq_queues(sc, wq, i) {
2480 			num_wqes = oce_wq_handler(wq);
2481 
2482 			if(num_wqes)
2483 				timeo = 0;
2484 
2485 			if(!wq->ring->num_used)
2486 				pending_txqs--;
2487 		}
2488 
2489 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2490 			break;
2491 
2492 		DELAY(1000);
2493 	} while (TRUE);
2494 
2495 	for_all_wq_queues(sc, wq, i) {
2496 		while(wq->ring->num_used) {
2497 			LOCK(&wq->tx_compl_lock);
2498 			oce_process_tx_completion(wq);
2499 			UNLOCK(&wq->tx_compl_lock);
2500 		}
2501 	}
2502 
2503 }
2504 
2505 /* NOTE : This should only be called holding
2506  *        DEVICE_LOCK.
2507  */
2508 static void
2509 oce_if_deactivate(POCE_SOFTC sc)
2510 {
2511 	int i;
2512 	struct oce_rq *rq;
2513 	struct oce_wq *wq;
2514 	struct oce_eq *eq;
2515 
2516 	if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2517 
2518 	oce_tx_compl_clean(sc);
2519 
2520 	/* Stop intrs and finish any bottom halves pending */
2521 	oce_hw_intr_disable(sc);
2522 
2523 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2524 	   any other lock. So unlock device lock and require after
2525 	   completing taskqueue_drain.
2526 	*/
2527 	UNLOCK(&sc->dev_lock);
2528 	for (i = 0; i < sc->intr_count; i++) {
2529 		if (sc->intrs[i].tq != NULL) {
2530 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2531 		}
2532 	}
2533 	LOCK(&sc->dev_lock);
2534 
2535 	/* Delete RX queue in card with flush param */
2536 	oce_stop_rx(sc);
2537 
2538 	/* Invalidate any pending cq and eq entries*/
2539 	for_all_evnt_queues(sc, eq, i)
2540 		oce_drain_eq(eq);
2541 	for_all_rq_queues(sc, rq, i)
2542 		oce_drain_rq_cq(rq);
2543 	for_all_wq_queues(sc, wq, i)
2544 		oce_drain_wq_cq(wq);
2545 
2546 	/* But still we need to get MCC aync events.
2547 	   So enable intrs and also arm first EQ
2548 	*/
2549 	oce_hw_intr_enable(sc);
2550 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2551 
2552 	DELAY(10);
2553 }
2554 
2555 static void
2556 oce_if_activate(POCE_SOFTC sc)
2557 {
2558 	struct oce_eq *eq;
2559 	struct oce_rq *rq;
2560 	struct oce_wq *wq;
2561 	int i, rc = 0;
2562 
2563 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING , 0);
2564 
2565 	oce_hw_intr_disable(sc);
2566 
2567 	oce_start_rx(sc);
2568 
2569 	for_all_rq_queues(sc, rq, i) {
2570 		rc = oce_start_rq(rq);
2571 		if (rc)
2572 			device_printf(sc->dev, "Unable to start RX\n");
2573 	}
2574 
2575 	for_all_wq_queues(sc, wq, i) {
2576 		rc = oce_start_wq(wq);
2577 		if (rc)
2578 			device_printf(sc->dev, "Unable to start TX\n");
2579 	}
2580 
2581 	for_all_evnt_queues(sc, eq, i)
2582 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2583 
2584 	oce_hw_intr_enable(sc);
2585 
2586 }
2587 
2588 static void
2589 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2590 {
2591 	/* Update Link status */
2592 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2593 	     ASYNC_EVENT_LINK_UP) {
2594 		sc->link_status = ASYNC_EVENT_LINK_UP;
2595 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2596 	} else {
2597 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2598 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2599 	}
2600 }
2601 
2602 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2603 					 struct oce_async_evt_grp5_os2bmc *evt)
2604 {
2605 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2606 	if (evt->u.s.mgmt_enable)
2607 		sc->flags |= OCE_FLAGS_OS2BMC;
2608 	else
2609 		return;
2610 
2611 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2612 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2613 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2614 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2615 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2616 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2617 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2618 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2619 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2620 }
2621 
2622 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2623 {
2624 	struct oce_async_event_grp5_pvid_state *gcqe;
2625 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2626 
2627 	switch (cqe->u0.s.async_type) {
2628 	case ASYNC_EVENT_PVID_STATE:
2629 		/* GRP5 PVID */
2630 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2631 		if (gcqe->enabled)
2632 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2633 		else
2634 			sc->pvid = 0;
2635 		break;
2636 	case ASYNC_EVENT_OS2BMC:
2637 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2638 		oce_async_grp5_osbmc_process(sc, bmccqe);
2639 		break;
2640 	default:
2641 		break;
2642 	}
2643 }
2644 
2645 /* Handle the Completion Queue for the Mailbox/Async notifications */
2646 uint16_t
2647 oce_mq_handler(void *arg)
2648 {
2649 	struct oce_mq *mq = (struct oce_mq *)arg;
2650 	POCE_SOFTC sc = mq->parent;
2651 	struct oce_cq *cq = mq->cq;
2652 	int num_cqes = 0, evt_type = 0, optype = 0;
2653 	struct oce_mq_cqe *cqe;
2654 	struct oce_async_cqe_link_state *acqe;
2655 	struct oce_async_event_qnq *dbgcqe;
2656 
2657 	bus_dmamap_sync(cq->ring->dma.tag,
2658 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2659 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2660 
2661 	while (cqe->u0.dw[3]) {
2662 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2663 		if (cqe->u0.s.async_event) {
2664 			evt_type = cqe->u0.s.event_type;
2665 			optype = cqe->u0.s.async_type;
2666 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2667 				/* Link status evt */
2668 				acqe = (struct oce_async_cqe_link_state *)cqe;
2669 				process_link_state(sc, acqe);
2670 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2671 				oce_process_grp5_events(sc, cqe);
2672 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2673 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2674 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2675 				if(dbgcqe->valid)
2676 					sc->qnqid = dbgcqe->vlan_tag;
2677 				sc->qnq_debug_event = TRUE;
2678 			}
2679 		}
2680 		cqe->u0.dw[3] = 0;
2681 		RING_GET(cq->ring, 1);
2682 		bus_dmamap_sync(cq->ring->dma.tag,
2683 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2684 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2685 		num_cqes++;
2686 	}
2687 
2688 	if (num_cqes)
2689 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2690 
2691 	return 0;
2692 }
2693 
2694 static void
2695 setup_max_queues_want(POCE_SOFTC sc)
2696 {
2697 	/* Check if it is FLEX machine. Is so dont use RSS */
2698 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2699 	    (sc->function_mode & FNM_UMC_MODE)    ||
2700 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2701 	    (!is_rss_enabled(sc))		  ||
2702 	    IS_BE2(sc)) {
2703 		sc->nrqs = 1;
2704 		sc->nwqs = 1;
2705 	} else {
2706 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2707 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2708 	}
2709 
2710 	if (IS_BE2(sc) && is_rss_enabled(sc))
2711 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2712 }
2713 
2714 static void
2715 update_queues_got(POCE_SOFTC sc)
2716 {
2717 	if (is_rss_enabled(sc)) {
2718 		sc->nrqs = sc->intr_count + 1;
2719 		sc->nwqs = sc->intr_count;
2720 	} else {
2721 		sc->nrqs = 1;
2722 		sc->nwqs = 1;
2723 	}
2724 
2725 	if (IS_BE2(sc))
2726 		sc->nwqs = 1;
2727 }
2728 
2729 static int
2730 oce_check_ipv6_ext_hdr(struct mbuf *m)
2731 {
2732 	struct ether_header *eh = mtod(m, struct ether_header *);
2733 	caddr_t m_datatemp = m->m_data;
2734 
2735 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2736 		m->m_data += sizeof(struct ether_header);
2737 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2738 
2739 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2740 				(ip6->ip6_nxt != IPPROTO_UDP)){
2741 			struct ip6_ext *ip6e = NULL;
2742 			m->m_data += sizeof(struct ip6_hdr);
2743 
2744 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2745 			if(ip6e->ip6e_len == 0xff) {
2746 				m->m_data = m_datatemp;
2747 				return TRUE;
2748 			}
2749 		}
2750 		m->m_data = m_datatemp;
2751 	}
2752 	return FALSE;
2753 }
2754 
2755 static int
2756 is_be3_a1(POCE_SOFTC sc)
2757 {
2758 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2759 		return TRUE;
2760 	}
2761 	return FALSE;
2762 }
2763 
2764 static struct mbuf *
2765 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2766 {
2767 	uint16_t vlan_tag = 0;
2768 
2769 	if(!M_WRITABLE(m))
2770 		return NULL;
2771 
2772 	/* Embed vlan tag in the packet if it is not part of it */
2773 	if(m->m_flags & M_VLANTAG) {
2774 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2775 		m->m_flags &= ~M_VLANTAG;
2776 	}
2777 
2778 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2779 	if(sc->pvid) {
2780 		if(!vlan_tag)
2781 			vlan_tag = sc->pvid;
2782 		if (complete)
2783 			*complete = FALSE;
2784 	}
2785 
2786 	if(vlan_tag) {
2787 		m = ether_vlanencap(m, vlan_tag);
2788 	}
2789 
2790 	if(sc->qnqid) {
2791 		m = ether_vlanencap(m, sc->qnqid);
2792 
2793 		if (complete)
2794 			*complete = FALSE;
2795 	}
2796 	return m;
2797 }
2798 
2799 static int
2800 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2801 {
2802 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2803 			oce_check_ipv6_ext_hdr(m)) {
2804 		return TRUE;
2805 	}
2806 	return FALSE;
2807 }
2808 
2809 static void
2810 oce_get_config(POCE_SOFTC sc)
2811 {
2812 	int rc = 0;
2813 	uint32_t max_rss = 0;
2814 
2815 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2816 		max_rss = OCE_LEGACY_MODE_RSS;
2817 	else
2818 		max_rss = OCE_MAX_RSS;
2819 
2820 	if (!IS_BE(sc)) {
2821 		rc = oce_get_profile_config(sc, max_rss);
2822 		if (rc) {
2823 			sc->nwqs = OCE_MAX_WQ;
2824 			sc->nrssqs = max_rss;
2825 			sc->nrqs = sc->nrssqs + 1;
2826 		}
2827 	}
2828 	else { /* For BE3 don't rely on fw for determining the resources */
2829 		sc->nrssqs = max_rss;
2830 		sc->nrqs = sc->nrssqs + 1;
2831 		sc->nwqs = OCE_MAX_WQ;
2832 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2833 	}
2834 }
2835 
2836 static void
2837 oce_rdma_close(void)
2838 {
2839   if (oce_rdma_if != NULL) {
2840     oce_rdma_if = NULL;
2841   }
2842 }
2843 
2844 static void
2845 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2846 {
2847   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2848 }
2849 
2850 int
2851 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2852 {
2853   POCE_SOFTC sc;
2854   struct oce_dev_info di;
2855   int i;
2856 
2857   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2858     return -EINVAL;
2859   }
2860 
2861   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2862       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2863     return -ENXIO;
2864   }
2865 
2866   rdma_info->close = oce_rdma_close;
2867   rdma_info->mbox_post = oce_mbox_post;
2868   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2869   rdma_info->get_mac_addr = oce_get_mac_addr;
2870 
2871   oce_rdma_if = rdma_if;
2872 
2873   sc = softc_head;
2874   while (sc != NULL) {
2875     if (oce_rdma_if->announce != NULL) {
2876       memset(&di, 0, sizeof(di));
2877       di.dev = sc->dev;
2878       di.softc = sc;
2879       di.ifp = sc->ifp;
2880       di.db_bhandle = sc->db_bhandle;
2881       di.db_btag = sc->db_btag;
2882       di.db_page_size = 4096;
2883       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2884         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2885       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2886         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2887       } else {
2888         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2889       }
2890       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2891       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2892         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2893         di.msix.start_vector = sc->intr_count;
2894         for (i=0; i<di.msix.num_vectors; i++) {
2895           di.msix.vector_list[i] = sc->intrs[i].vector;
2896         }
2897       } else {
2898       }
2899       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2900       di.vendor_id = pci_get_vendor(sc->dev);
2901       di.dev_id = pci_get_device(sc->dev);
2902 
2903       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2904           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2905       }
2906 
2907       rdma_if->announce(&di);
2908       sc = sc->next;
2909     }
2910   }
2911 
2912   return 0;
2913 }
2914 
2915 static void
2916 oce_read_env_variables( POCE_SOFTC sc )
2917 {
2918 	char *value = NULL;
2919 	int rc = 0;
2920 
2921         /* read if user wants to enable hwlro or swlro */
2922         //value = getenv("oce_enable_hwlro");
2923         if(value && IS_SH(sc)) {
2924                 sc->enable_hwlro = strtol(value, NULL, 10);
2925                 if(sc->enable_hwlro) {
2926                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2927                         if(rc) {
2928                                 device_printf(sc->dev, "no hardware lro support\n");
2929                 		device_printf(sc->dev, "software lro enabled\n");
2930                                 sc->enable_hwlro = 0;
2931                         }else {
2932                                 device_printf(sc->dev, "hardware lro enabled\n");
2933 				oce_max_rsp_handled = 32;
2934                         }
2935                 }else {
2936                         device_printf(sc->dev, "software lro enabled\n");
2937                 }
2938         }else {
2939                 sc->enable_hwlro = 0;
2940         }
2941 
2942         /* read mbuf size */
2943         //value = getenv("oce_rq_buf_size");
2944         if(value && IS_SH(sc)) {
2945                 oce_rq_buf_size = strtol(value, NULL, 10);
2946                 switch(oce_rq_buf_size) {
2947                 case 2048:
2948                 case 4096:
2949                 case 9216:
2950                 case 16384:
2951                         break;
2952 
2953                 default:
2954                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
2955                         oce_rq_buf_size = 2048;
2956                 }
2957         }
2958 
2959 	return;
2960 }
2961