xref: /freebsd/sys/dev/oce/oce_if.c (revision 0bf48626aaa33768078f5872b922b1487b3a9296)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 
136 /* Driver entry points prototypes */
137 static int  oce_probe(device_t dev);
138 static int  oce_attach(device_t dev);
139 static int  oce_detach(device_t dev);
140 static int  oce_shutdown(device_t dev);
141 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
142 static void oce_init(void *xsc);
143 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
144 static void oce_multiq_flush(struct ifnet *ifp);
145 
146 /* Driver interrupt routines protypes */
147 static void oce_intr(void *arg, int pending);
148 static int  oce_setup_intr(POCE_SOFTC sc);
149 static int  oce_fast_isr(void *arg);
150 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
151 			  void (*isr) (void *arg, int pending));
152 
153 /* Media callbacks prototypes */
154 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
155 static int  oce_media_change(struct ifnet *ifp);
156 
157 /* Transmit routines prototypes */
158 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
159 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
160 static void oce_process_tx_completion(struct oce_wq *wq);
161 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
162 				 struct oce_wq *wq);
163 
164 /* Receive routines prototypes */
165 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
167 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
168 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static uint16_t oce_rq_handler_lro(void *arg);
170 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
172 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
173 
174 /* Helper function prototypes in this file */
175 static int  oce_attach_ifp(POCE_SOFTC sc);
176 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
178 static int  oce_vid_config(POCE_SOFTC sc);
179 static void oce_mac_addr_set(POCE_SOFTC sc);
180 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
181 static void oce_local_timer(void *arg);
182 static void oce_if_deactivate(POCE_SOFTC sc);
183 static void oce_if_activate(POCE_SOFTC sc);
184 static void setup_max_queues_want(POCE_SOFTC sc);
185 static void update_queues_got(POCE_SOFTC sc);
186 static void process_link_state(POCE_SOFTC sc,
187 		 struct oce_async_cqe_link_state *acqe);
188 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
189 static void oce_get_config(POCE_SOFTC sc);
190 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
191 static void oce_read_env_variables(POCE_SOFTC sc);
192 
193 
194 /* IP specific */
195 #if defined(INET6) || defined(INET)
196 static int  oce_init_lro(POCE_SOFTC sc);
197 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
198 #endif
199 
200 static device_method_t oce_dispatch[] = {
201 	DEVMETHOD(device_probe, oce_probe),
202 	DEVMETHOD(device_attach, oce_attach),
203 	DEVMETHOD(device_detach, oce_detach),
204 	DEVMETHOD(device_shutdown, oce_shutdown),
205 
206 	DEVMETHOD_END
207 };
208 
209 static driver_t oce_driver = {
210 	"oce",
211 	oce_dispatch,
212 	sizeof(OCE_SOFTC)
213 };
214 static devclass_t oce_devclass;
215 
216 
217 /* global vars */
218 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
219 
220 /* Module capabilites and parameters */
221 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
222 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
223 uint32_t oce_rq_buf_size = 2048;
224 
225 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
226 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
227 
228 
229 /* Supported devices table */
230 static uint32_t supportedDevices[] =  {
231 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
232 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
233 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
234 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
235 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
236 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
237 };
238 
239 
240 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
241 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
242     nitems(supportedDevices));
243 MODULE_DEPEND(oce, pci, 1, 1, 1);
244 MODULE_DEPEND(oce, ether, 1, 1, 1);
245 MODULE_VERSION(oce, 1);
246 
247 
248 POCE_SOFTC softc_head = NULL;
249 POCE_SOFTC softc_tail = NULL;
250 
251 struct oce_rdma_if *oce_rdma_if = NULL;
252 
253 /*****************************************************************************
254  *			Driver entry points functions                        *
255  *****************************************************************************/
256 
257 static int
258 oce_probe(device_t dev)
259 {
260 	uint16_t vendor = 0;
261 	uint16_t device = 0;
262 	int i = 0;
263 	char str[256] = {0};
264 	POCE_SOFTC sc;
265 
266 	sc = device_get_softc(dev);
267 	bzero(sc, sizeof(OCE_SOFTC));
268 	sc->dev = dev;
269 
270 	vendor = pci_get_vendor(dev);
271 	device = pci_get_device(dev);
272 
273 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
274 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
275 			if (device == (supportedDevices[i] & 0xffff)) {
276 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
277 					component_revision);
278 				device_set_desc_copy(dev, str);
279 
280 				switch (device) {
281 				case PCI_PRODUCT_BE2:
282 					sc->flags |= OCE_FLAGS_BE2;
283 					break;
284 				case PCI_PRODUCT_BE3:
285 					sc->flags |= OCE_FLAGS_BE3;
286 					break;
287 				case PCI_PRODUCT_XE201:
288 				case PCI_PRODUCT_XE201_VF:
289 					sc->flags |= OCE_FLAGS_XE201;
290 					break;
291 				case PCI_PRODUCT_SH:
292 					sc->flags |= OCE_FLAGS_SH;
293 					break;
294 				default:
295 					return ENXIO;
296 				}
297 				return BUS_PROBE_DEFAULT;
298 			}
299 		}
300 	}
301 
302 	return ENXIO;
303 }
304 
305 
306 static int
307 oce_attach(device_t dev)
308 {
309 	POCE_SOFTC sc;
310 	int rc = 0;
311 
312 	sc = device_get_softc(dev);
313 
314 	rc = oce_hw_pci_alloc(sc);
315 	if (rc)
316 		return rc;
317 
318 	sc->tx_ring_size = OCE_TX_RING_SIZE;
319 	sc->rx_ring_size = OCE_RX_RING_SIZE;
320 	/* receive fragment size should be multiple of 2K */
321 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
322 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
323 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
324 
325 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
326 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
327 
328 	/* initialise the hardware */
329 	rc = oce_hw_init(sc);
330 	if (rc)
331 		goto pci_res_free;
332 
333 	oce_read_env_variables(sc);
334 
335 	oce_get_config(sc);
336 
337 	setup_max_queues_want(sc);
338 
339 	rc = oce_setup_intr(sc);
340 	if (rc)
341 		goto mbox_free;
342 
343 	rc = oce_queue_init_all(sc);
344 	if (rc)
345 		goto intr_free;
346 
347 	rc = oce_attach_ifp(sc);
348 	if (rc)
349 		goto queues_free;
350 
351 #if defined(INET6) || defined(INET)
352 	rc = oce_init_lro(sc);
353 	if (rc)
354 		goto ifp_free;
355 #endif
356 
357 	rc = oce_hw_start(sc);
358 	if (rc)
359 		goto lro_free;
360 
361 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
362 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
363 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
364 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
365 
366 	rc = oce_stats_init(sc);
367 	if (rc)
368 		goto vlan_free;
369 
370 	oce_add_sysctls(sc);
371 
372 	callout_init(&sc->timer, CALLOUT_MPSAFE);
373 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
374 	if (rc)
375 		goto stats_free;
376 
377 	sc->next =NULL;
378 	if (softc_tail != NULL) {
379 	  softc_tail->next = sc;
380 	} else {
381 	  softc_head = sc;
382 	}
383 	softc_tail = sc;
384 
385 	return 0;
386 
387 stats_free:
388 	callout_drain(&sc->timer);
389 	oce_stats_free(sc);
390 vlan_free:
391 	if (sc->vlan_attach)
392 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
393 	if (sc->vlan_detach)
394 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
395 	oce_hw_intr_disable(sc);
396 lro_free:
397 #if defined(INET6) || defined(INET)
398 	oce_free_lro(sc);
399 ifp_free:
400 #endif
401 	ether_ifdetach(sc->ifp);
402 	if_free(sc->ifp);
403 queues_free:
404 	oce_queue_release_all(sc);
405 intr_free:
406 	oce_intr_free(sc);
407 mbox_free:
408 	oce_dma_free(sc, &sc->bsmbx);
409 pci_res_free:
410 	oce_hw_pci_free(sc);
411 	LOCK_DESTROY(&sc->dev_lock);
412 	LOCK_DESTROY(&sc->bmbx_lock);
413 	return rc;
414 
415 }
416 
417 
418 static int
419 oce_detach(device_t dev)
420 {
421 	POCE_SOFTC sc = device_get_softc(dev);
422 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
423 
424         poce_sc_tmp = softc_head;
425         ppoce_sc_tmp1 = &softc_head;
426         while (poce_sc_tmp != NULL) {
427           if (poce_sc_tmp == sc) {
428             *ppoce_sc_tmp1 = sc->next;
429             if (sc->next == NULL) {
430               softc_tail = poce_sc_tmp2;
431             }
432             break;
433           }
434           poce_sc_tmp2 = poce_sc_tmp;
435           ppoce_sc_tmp1 = &poce_sc_tmp->next;
436           poce_sc_tmp = poce_sc_tmp->next;
437         }
438 
439 	LOCK(&sc->dev_lock);
440 	oce_if_deactivate(sc);
441 	UNLOCK(&sc->dev_lock);
442 
443 	callout_drain(&sc->timer);
444 
445 	if (sc->vlan_attach != NULL)
446 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
447 	if (sc->vlan_detach != NULL)
448 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
449 
450 	ether_ifdetach(sc->ifp);
451 
452 	if_free(sc->ifp);
453 
454 	oce_hw_shutdown(sc);
455 
456 	bus_generic_detach(dev);
457 
458 	return 0;
459 }
460 
461 
462 static int
463 oce_shutdown(device_t dev)
464 {
465 	int rc;
466 
467 	rc = oce_detach(dev);
468 
469 	return rc;
470 }
471 
472 
473 static int
474 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
475 {
476 	struct ifreq *ifr = (struct ifreq *)data;
477 	POCE_SOFTC sc = ifp->if_softc;
478 	struct ifi2creq i2c;
479 	uint8_t	offset = 0;
480 	int rc = 0;
481 	uint32_t u;
482 
483 	switch (command) {
484 
485 	case SIOCGIFMEDIA:
486 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
487 		break;
488 
489 	case SIOCSIFMTU:
490 		if (ifr->ifr_mtu > OCE_MAX_MTU)
491 			rc = EINVAL;
492 		else
493 			ifp->if_mtu = ifr->ifr_mtu;
494 		break;
495 
496 	case SIOCSIFFLAGS:
497 		if (ifp->if_flags & IFF_UP) {
498 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
499 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
500 				oce_init(sc);
501 			}
502 			device_printf(sc->dev, "Interface Up\n");
503 		} else {
504 			LOCK(&sc->dev_lock);
505 
506 			sc->ifp->if_drv_flags &=
507 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
508 			oce_if_deactivate(sc);
509 
510 			UNLOCK(&sc->dev_lock);
511 
512 			device_printf(sc->dev, "Interface Down\n");
513 		}
514 
515 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
516 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
517 				sc->promisc = TRUE;
518 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
519 			if (!oce_rxf_set_promiscuous(sc, 0))
520 				sc->promisc = FALSE;
521 		}
522 
523 		break;
524 
525 	case SIOCADDMULTI:
526 	case SIOCDELMULTI:
527 		rc = oce_hw_update_multicast(sc);
528 		if (rc)
529 			device_printf(sc->dev,
530 				"Update multicast address failed\n");
531 		break;
532 
533 	case SIOCSIFCAP:
534 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
535 
536 		if (u & IFCAP_TXCSUM) {
537 			ifp->if_capenable ^= IFCAP_TXCSUM;
538 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
539 
540 			if (IFCAP_TSO & ifp->if_capenable &&
541 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
542 				ifp->if_capenable &= ~IFCAP_TSO;
543 				ifp->if_hwassist &= ~CSUM_TSO;
544 				if_printf(ifp,
545 					 "TSO disabled due to -txcsum.\n");
546 			}
547 		}
548 
549 		if (u & IFCAP_RXCSUM)
550 			ifp->if_capenable ^= IFCAP_RXCSUM;
551 
552 		if (u & IFCAP_TSO4) {
553 			ifp->if_capenable ^= IFCAP_TSO4;
554 
555 			if (IFCAP_TSO & ifp->if_capenable) {
556 				if (IFCAP_TXCSUM & ifp->if_capenable)
557 					ifp->if_hwassist |= CSUM_TSO;
558 				else {
559 					ifp->if_capenable &= ~IFCAP_TSO;
560 					ifp->if_hwassist &= ~CSUM_TSO;
561 					if_printf(ifp,
562 					    "Enable txcsum first.\n");
563 					rc = EAGAIN;
564 				}
565 			} else
566 				ifp->if_hwassist &= ~CSUM_TSO;
567 		}
568 
569 		if (u & IFCAP_VLAN_HWTAGGING)
570 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
571 
572 		if (u & IFCAP_VLAN_HWFILTER) {
573 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
574 			oce_vid_config(sc);
575 		}
576 #if defined(INET6) || defined(INET)
577 		if (u & IFCAP_LRO) {
578 			ifp->if_capenable ^= IFCAP_LRO;
579 			if(sc->enable_hwlro) {
580 				if(ifp->if_capenable & IFCAP_LRO) {
581 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
582 				}else {
583 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
584 				}
585 			}
586 		}
587 #endif
588 
589 		break;
590 
591 	case SIOCGI2C:
592 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
593 		if (rc)
594 			break;
595 
596 		if (i2c.dev_addr != PAGE_NUM_A0 &&
597 		    i2c.dev_addr != PAGE_NUM_A2) {
598 			rc = EINVAL;
599 			break;
600 		}
601 
602 		if (i2c.len > sizeof(i2c.data)) {
603 			rc = EINVAL;
604 			break;
605 		}
606 
607 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
608 		if(rc) {
609 			rc = -rc;
610 			break;
611 		}
612 
613 		if (i2c.dev_addr == PAGE_NUM_A0)
614 			offset = i2c.offset;
615 		else
616 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
617 
618 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
619 
620 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
621 		break;
622 
623 	case SIOCGPRIVATE_0:
624 		rc = oce_handle_passthrough(ifp, data);
625 		break;
626 	default:
627 		rc = ether_ioctl(ifp, command, data);
628 		break;
629 	}
630 
631 	return rc;
632 }
633 
634 
635 static void
636 oce_init(void *arg)
637 {
638 	POCE_SOFTC sc = arg;
639 
640 	LOCK(&sc->dev_lock);
641 
642 	if (sc->ifp->if_flags & IFF_UP) {
643 		oce_if_deactivate(sc);
644 		oce_if_activate(sc);
645 	}
646 
647 	UNLOCK(&sc->dev_lock);
648 
649 }
650 
651 
652 static int
653 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
654 {
655 	POCE_SOFTC sc = ifp->if_softc;
656 	struct oce_wq *wq = NULL;
657 	int queue_index = 0;
658 	int status = 0;
659 
660 	if (!sc->link_status)
661 		return ENXIO;
662 
663 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
664 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
665 
666 	wq = sc->wq[queue_index];
667 
668 	LOCK(&wq->tx_lock);
669 	status = oce_multiq_transmit(ifp, m, wq);
670 	UNLOCK(&wq->tx_lock);
671 
672 	return status;
673 
674 }
675 
676 
677 static void
678 oce_multiq_flush(struct ifnet *ifp)
679 {
680 	POCE_SOFTC sc = ifp->if_softc;
681 	struct mbuf     *m;
682 	int i = 0;
683 
684 	for (i = 0; i < sc->nwqs; i++) {
685 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
686 			m_freem(m);
687 	}
688 	if_qflush(ifp);
689 }
690 
691 
692 
693 /*****************************************************************************
694  *                   Driver interrupt routines functions                     *
695  *****************************************************************************/
696 
697 static void
698 oce_intr(void *arg, int pending)
699 {
700 
701 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
702 	POCE_SOFTC sc = ii->sc;
703 	struct oce_eq *eq = ii->eq;
704 	struct oce_eqe *eqe;
705 	struct oce_cq *cq = NULL;
706 	int i, num_eqes = 0;
707 
708 
709 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
710 				 BUS_DMASYNC_POSTWRITE);
711 	do {
712 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
713 		if (eqe->evnt == 0)
714 			break;
715 		eqe->evnt = 0;
716 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
717 					BUS_DMASYNC_POSTWRITE);
718 		RING_GET(eq->ring, 1);
719 		num_eqes++;
720 
721 	} while (TRUE);
722 
723 	if (!num_eqes)
724 		goto eq_arm; /* Spurious */
725 
726  	/* Clear EQ entries, but dont arm */
727 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
728 
729 	/* Process TX, RX and MCC. But dont arm CQ*/
730 	for (i = 0; i < eq->cq_valid; i++) {
731 		cq = eq->cq[i];
732 		(*cq->cq_handler)(cq->cb_arg);
733 	}
734 
735 	/* Arm all cqs connected to this EQ */
736 	for (i = 0; i < eq->cq_valid; i++) {
737 		cq = eq->cq[i];
738 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
739 	}
740 
741 eq_arm:
742 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
743 
744 	return;
745 }
746 
747 
748 static int
749 oce_setup_intr(POCE_SOFTC sc)
750 {
751 	int rc = 0, use_intx = 0;
752 	int vector = 0, req_vectors = 0;
753 	int tot_req_vectors, tot_vectors;
754 
755 	if (is_rss_enabled(sc))
756 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
757 	else
758 		req_vectors = 1;
759 
760 	tot_req_vectors = req_vectors;
761 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
762 	  if (req_vectors > 1) {
763 	    tot_req_vectors += OCE_RDMA_VECTORS;
764 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
765 	  }
766 	}
767 
768         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
769 		sc->intr_count = req_vectors;
770                 tot_vectors = tot_req_vectors;
771 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
772 		if (rc != 0) {
773 			use_intx = 1;
774 			pci_release_msi(sc->dev);
775 		} else {
776 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
777 		    if (tot_vectors < tot_req_vectors) {
778 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
779 			sc->roce_intr_count = (tot_vectors / 2);
780 		      }
781 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
782 		    }
783 		  } else {
784 		    sc->intr_count = tot_vectors;
785 		  }
786     		  sc->flags |= OCE_FLAGS_USING_MSIX;
787 		}
788 	} else
789 		use_intx = 1;
790 
791 	if (use_intx)
792 		sc->intr_count = 1;
793 
794 	/* Scale number of queues based on intr we got */
795 	update_queues_got(sc);
796 
797 	if (use_intx) {
798 		device_printf(sc->dev, "Using legacy interrupt\n");
799 		rc = oce_alloc_intr(sc, vector, oce_intr);
800 		if (rc)
801 			goto error;
802 	} else {
803 		for (; vector < sc->intr_count; vector++) {
804 			rc = oce_alloc_intr(sc, vector, oce_intr);
805 			if (rc)
806 				goto error;
807 		}
808 	}
809 
810 	return 0;
811 error:
812 	oce_intr_free(sc);
813 	return rc;
814 }
815 
816 
817 static int
818 oce_fast_isr(void *arg)
819 {
820 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
821 	POCE_SOFTC sc = ii->sc;
822 
823 	if (ii->eq == NULL)
824 		return FILTER_STRAY;
825 
826 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
827 
828 	taskqueue_enqueue(ii->tq, &ii->task);
829 
830  	ii->eq->intr++;
831 
832 	return FILTER_HANDLED;
833 }
834 
835 
836 static int
837 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
838 {
839 	POCE_INTR_INFO ii;
840 	int rc = 0, rr;
841 
842 	if (vector >= OCE_MAX_EQ)
843 		return (EINVAL);
844 
845 	ii = &sc->intrs[vector];
846 
847 	/* Set the resource id for the interrupt.
848 	 * MSIx is vector + 1 for the resource id,
849 	 * INTx is 0 for the resource id.
850 	 */
851 	if (sc->flags & OCE_FLAGS_USING_MSIX)
852 		rr = vector + 1;
853 	else
854 		rr = 0;
855 	ii->intr_res = bus_alloc_resource_any(sc->dev,
856 					      SYS_RES_IRQ,
857 					      &rr, RF_ACTIVE|RF_SHAREABLE);
858 	ii->irq_rr = rr;
859 	if (ii->intr_res == NULL) {
860 		device_printf(sc->dev,
861 			  "Could not allocate interrupt\n");
862 		rc = ENXIO;
863 		return rc;
864 	}
865 
866 	TASK_INIT(&ii->task, 0, isr, ii);
867 	ii->vector = vector;
868 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
869 	ii->tq = taskqueue_create_fast(ii->task_name,
870 			M_NOWAIT,
871 			taskqueue_thread_enqueue,
872 			&ii->tq);
873 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
874 			device_get_nameunit(sc->dev));
875 
876 	ii->sc = sc;
877 	rc = bus_setup_intr(sc->dev,
878 			ii->intr_res,
879 			INTR_TYPE_NET,
880 			oce_fast_isr, NULL, ii, &ii->tag);
881 	return rc;
882 
883 }
884 
885 
886 void
887 oce_intr_free(POCE_SOFTC sc)
888 {
889 	int i = 0;
890 
891 	for (i = 0; i < sc->intr_count; i++) {
892 
893 		if (sc->intrs[i].tag != NULL)
894 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
895 						sc->intrs[i].tag);
896 		if (sc->intrs[i].tq != NULL)
897 			taskqueue_free(sc->intrs[i].tq);
898 
899 		if (sc->intrs[i].intr_res != NULL)
900 			bus_release_resource(sc->dev, SYS_RES_IRQ,
901 						sc->intrs[i].irq_rr,
902 						sc->intrs[i].intr_res);
903 		sc->intrs[i].tag = NULL;
904 		sc->intrs[i].intr_res = NULL;
905 	}
906 
907 	if (sc->flags & OCE_FLAGS_USING_MSIX)
908 		pci_release_msi(sc->dev);
909 
910 }
911 
912 
913 
914 /******************************************************************************
915 *			  Media callbacks functions 			      *
916 ******************************************************************************/
917 
918 static void
919 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
920 {
921 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
922 
923 
924 	req->ifm_status = IFM_AVALID;
925 	req->ifm_active = IFM_ETHER;
926 
927 	if (sc->link_status == 1)
928 		req->ifm_status |= IFM_ACTIVE;
929 	else
930 		return;
931 
932 	switch (sc->link_speed) {
933 	case 1: /* 10 Mbps */
934 		req->ifm_active |= IFM_10_T | IFM_FDX;
935 		sc->speed = 10;
936 		break;
937 	case 2: /* 100 Mbps */
938 		req->ifm_active |= IFM_100_TX | IFM_FDX;
939 		sc->speed = 100;
940 		break;
941 	case 3: /* 1 Gbps */
942 		req->ifm_active |= IFM_1000_T | IFM_FDX;
943 		sc->speed = 1000;
944 		break;
945 	case 4: /* 10 Gbps */
946 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
947 		sc->speed = 10000;
948 		break;
949 	case 5: /* 20 Gbps */
950 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
951 		sc->speed = 20000;
952 		break;
953 	case 6: /* 25 Gbps */
954 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
955 		sc->speed = 25000;
956 		break;
957 	case 7: /* 40 Gbps */
958 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
959 		sc->speed = 40000;
960 		break;
961 	default:
962 		sc->speed = 0;
963 		break;
964 	}
965 
966 	return;
967 }
968 
969 
970 int
971 oce_media_change(struct ifnet *ifp)
972 {
973 	return 0;
974 }
975 
976 
977 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
978 				struct mbuf *m, boolean_t *os2bmc,
979 				struct mbuf **m_new)
980 {
981 	struct ether_header *eh = NULL;
982 
983 	eh = mtod(m, struct ether_header *);
984 
985 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
986 		*os2bmc = FALSE;
987 		goto done;
988 	}
989 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
990 		goto done;
991 
992 	if (is_mc_allowed_on_bmc(sc, eh) ||
993 	    is_bc_allowed_on_bmc(sc, eh) ||
994 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
995 		*os2bmc = TRUE;
996 		goto done;
997 	}
998 
999 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
1000 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1001 		uint8_t nexthdr = ip6->ip6_nxt;
1002 		if (nexthdr == IPPROTO_ICMPV6) {
1003 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
1004 			switch (icmp6->icmp6_type) {
1005 			case ND_ROUTER_ADVERT:
1006 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
1007 				goto done;
1008 			case ND_NEIGHBOR_ADVERT:
1009 				*os2bmc = is_ipv6_na_filt_enabled(sc);
1010 				goto done;
1011 			default:
1012 				break;
1013 			}
1014 		}
1015 	}
1016 
1017 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
1018 		struct ip *ip = mtod(m, struct ip *);
1019 		int iphlen = ip->ip_hl << 2;
1020 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
1021 		switch (uh->uh_dport) {
1022 		case DHCP_CLIENT_PORT:
1023 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1024 			goto done;
1025 		case DHCP_SERVER_PORT:
1026 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1027 			goto done;
1028 		case NET_BIOS_PORT1:
1029 		case NET_BIOS_PORT2:
1030 			*os2bmc = is_nbios_filt_enabled(sc);
1031 			goto done;
1032 		case DHCPV6_RAS_PORT:
1033 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1034 			goto done;
1035 		default:
1036 			break;
1037 		}
1038 	}
1039 done:
1040 	if (*os2bmc) {
1041 		*m_new = m_dup(m, M_NOWAIT);
1042 		if (!*m_new) {
1043 			*os2bmc = FALSE;
1044 			return;
1045 		}
1046 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1047 	}
1048 }
1049 
1050 
1051 
1052 /*****************************************************************************
1053  *			  Transmit routines functions			     *
1054  *****************************************************************************/
1055 
1056 static int
1057 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1058 {
1059 	int rc = 0, i, retry_cnt = 0;
1060 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1061 	struct mbuf *m, *m_temp, *m_new = NULL;
1062 	struct oce_wq *wq = sc->wq[wq_index];
1063 	struct oce_packet_desc *pd;
1064 	struct oce_nic_hdr_wqe *nichdr;
1065 	struct oce_nic_frag_wqe *nicfrag;
1066 	struct ether_header *eh = NULL;
1067 	int num_wqes;
1068 	uint32_t reg_value;
1069 	boolean_t complete = TRUE;
1070 	boolean_t os2bmc = FALSE;
1071 
1072 	m = *mpp;
1073 	if (!m)
1074 		return EINVAL;
1075 
1076 	if (!(m->m_flags & M_PKTHDR)) {
1077 		rc = ENXIO;
1078 		goto free_ret;
1079 	}
1080 
1081 	/* Don't allow non-TSO packets longer than MTU */
1082 	if (!is_tso_pkt(m)) {
1083 		eh = mtod(m, struct ether_header *);
1084 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1085 			 goto free_ret;
1086 	}
1087 
1088 	if(oce_tx_asic_stall_verify(sc, m)) {
1089 		m = oce_insert_vlan_tag(sc, m, &complete);
1090 		if(!m) {
1091 			device_printf(sc->dev, "Insertion unsuccessful\n");
1092 			return 0;
1093 		}
1094 
1095 	}
1096 
1097 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1098 	 * may cause a transmit stall on that port. So the work-around is to
1099 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1100 	*/
1101 	if(IS_SH(sc) || IS_XE201(sc) ) {
1102 		if(m->m_pkthdr.len <= 32) {
1103 			char buf[36];
1104 			bzero((void *)buf, 36);
1105 			m_append(m, (36 - m->m_pkthdr.len), buf);
1106 		}
1107 	}
1108 
1109 tx_start:
1110 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1111 		/* consolidate packet buffers for TSO/LSO segment offload */
1112 #if defined(INET6) || defined(INET)
1113 		m = oce_tso_setup(sc, mpp);
1114 #else
1115 		m = NULL;
1116 #endif
1117 		if (m == NULL) {
1118 			rc = ENXIO;
1119 			goto free_ret;
1120 		}
1121 	}
1122 
1123 
1124 	pd = &wq->pckts[wq->pkt_desc_head];
1125 
1126 retry:
1127 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1128 				     pd->map,
1129 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1130 	if (rc == 0) {
1131 		num_wqes = pd->nsegs + 1;
1132 		if (IS_BE(sc) || IS_SH(sc)) {
1133 			/*Dummy required only for BE3.*/
1134 			if (num_wqes & 1)
1135 				num_wqes++;
1136 		}
1137 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1138 			bus_dmamap_unload(wq->tag, pd->map);
1139 			return EBUSY;
1140 		}
1141 		atomic_store_rel_int(&wq->pkt_desc_head,
1142 				     (wq->pkt_desc_head + 1) % \
1143 				      OCE_WQ_PACKET_ARRAY_SIZE);
1144 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1145 		pd->mbuf = m;
1146 
1147 		nichdr =
1148 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1149 		nichdr->u0.dw[0] = 0;
1150 		nichdr->u0.dw[1] = 0;
1151 		nichdr->u0.dw[2] = 0;
1152 		nichdr->u0.dw[3] = 0;
1153 
1154 		nichdr->u0.s.complete = complete;
1155 		nichdr->u0.s.mgmt = os2bmc;
1156 		nichdr->u0.s.event = 1;
1157 		nichdr->u0.s.crc = 1;
1158 		nichdr->u0.s.forward = 0;
1159 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1160 		nichdr->u0.s.udpcs =
1161 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1162 		nichdr->u0.s.tcpcs =
1163 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1164 		nichdr->u0.s.num_wqe = num_wqes;
1165 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1166 
1167 		if (m->m_flags & M_VLANTAG) {
1168 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1169 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1170 		}
1171 
1172 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1173 			if (m->m_pkthdr.tso_segsz) {
1174 				nichdr->u0.s.lso = 1;
1175 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1176 			}
1177 			if (!IS_BE(sc) || !IS_SH(sc))
1178 				nichdr->u0.s.ipcs = 1;
1179 		}
1180 
1181 		RING_PUT(wq->ring, 1);
1182 		atomic_add_int(&wq->ring->num_used, 1);
1183 
1184 		for (i = 0; i < pd->nsegs; i++) {
1185 			nicfrag =
1186 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1187 						      struct oce_nic_frag_wqe);
1188 			nicfrag->u0.s.rsvd0 = 0;
1189 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1190 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1191 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1192 			pd->wqe_idx = wq->ring->pidx;
1193 			RING_PUT(wq->ring, 1);
1194 			atomic_add_int(&wq->ring->num_used, 1);
1195 		}
1196 		if (num_wqes > (pd->nsegs + 1)) {
1197 			nicfrag =
1198 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1199 						      struct oce_nic_frag_wqe);
1200 			nicfrag->u0.dw[0] = 0;
1201 			nicfrag->u0.dw[1] = 0;
1202 			nicfrag->u0.dw[2] = 0;
1203 			nicfrag->u0.dw[3] = 0;
1204 			pd->wqe_idx = wq->ring->pidx;
1205 			RING_PUT(wq->ring, 1);
1206 			atomic_add_int(&wq->ring->num_used, 1);
1207 			pd->nsegs++;
1208 		}
1209 
1210 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1211 		wq->tx_stats.tx_reqs++;
1212 		wq->tx_stats.tx_wrbs += num_wqes;
1213 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1214 		wq->tx_stats.tx_pkts++;
1215 
1216 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1217 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1218 		reg_value = (num_wqes << 16) | wq->wq_id;
1219 
1220 		/* if os2bmc is not enabled or if the pkt is already tagged as
1221 		   bmc, do nothing
1222 		 */
1223 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1224 
1225 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1226 
1227 	} else if (rc == EFBIG)	{
1228 		if (retry_cnt == 0) {
1229 			m_temp = m_defrag(m, M_NOWAIT);
1230 			if (m_temp == NULL)
1231 				goto free_ret;
1232 			m = m_temp;
1233 			*mpp = m_temp;
1234 			retry_cnt = retry_cnt + 1;
1235 			goto retry;
1236 		} else
1237 			goto free_ret;
1238 	} else if (rc == ENOMEM)
1239 		return rc;
1240 	else
1241 		goto free_ret;
1242 
1243 	if (os2bmc) {
1244 		m = m_new;
1245 		goto tx_start;
1246 	}
1247 
1248 	return 0;
1249 
1250 free_ret:
1251 	m_freem(*mpp);
1252 	*mpp = NULL;
1253 	return rc;
1254 }
1255 
1256 
1257 static void
1258 oce_process_tx_completion(struct oce_wq *wq)
1259 {
1260 	struct oce_packet_desc *pd;
1261 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1262 	struct mbuf *m;
1263 
1264 	pd = &wq->pckts[wq->pkt_desc_tail];
1265 	atomic_store_rel_int(&wq->pkt_desc_tail,
1266 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1267 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1268 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1269 	bus_dmamap_unload(wq->tag, pd->map);
1270 
1271 	m = pd->mbuf;
1272 	m_freem(m);
1273 	pd->mbuf = NULL;
1274 
1275 
1276 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1277 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1278 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1279 			oce_tx_restart(sc, wq);
1280 		}
1281 	}
1282 }
1283 
1284 
1285 static void
1286 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1287 {
1288 
1289 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1290 		return;
1291 
1292 #if __FreeBSD_version >= 800000
1293 	if (!drbr_empty(sc->ifp, wq->br))
1294 #else
1295 	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1296 #endif
1297 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1298 
1299 }
1300 
1301 
1302 #if defined(INET6) || defined(INET)
1303 static struct mbuf *
1304 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1305 {
1306 	struct mbuf *m;
1307 #ifdef INET
1308 	struct ip *ip;
1309 #endif
1310 #ifdef INET6
1311 	struct ip6_hdr *ip6;
1312 #endif
1313 	struct ether_vlan_header *eh;
1314 	struct tcphdr *th;
1315 	uint16_t etype;
1316 	int total_len = 0, ehdrlen = 0;
1317 
1318 	m = *mpp;
1319 
1320 	if (M_WRITABLE(m) == 0) {
1321 		m = m_dup(*mpp, M_NOWAIT);
1322 		if (!m)
1323 			return NULL;
1324 		m_freem(*mpp);
1325 		*mpp = m;
1326 	}
1327 
1328 	eh = mtod(m, struct ether_vlan_header *);
1329 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1330 		etype = ntohs(eh->evl_proto);
1331 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1332 	} else {
1333 		etype = ntohs(eh->evl_encap_proto);
1334 		ehdrlen = ETHER_HDR_LEN;
1335 	}
1336 
1337 	switch (etype) {
1338 #ifdef INET
1339 	case ETHERTYPE_IP:
1340 		ip = (struct ip *)(m->m_data + ehdrlen);
1341 		if (ip->ip_p != IPPROTO_TCP)
1342 			return NULL;
1343 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1344 
1345 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1346 		break;
1347 #endif
1348 #ifdef INET6
1349 	case ETHERTYPE_IPV6:
1350 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1351 		if (ip6->ip6_nxt != IPPROTO_TCP)
1352 			return NULL;
1353 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1354 
1355 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1356 		break;
1357 #endif
1358 	default:
1359 		return NULL;
1360 	}
1361 
1362 	m = m_pullup(m, total_len);
1363 	if (!m)
1364 		return NULL;
1365 	*mpp = m;
1366 	return m;
1367 
1368 }
1369 #endif /* INET6 || INET */
1370 
1371 void
1372 oce_tx_task(void *arg, int npending)
1373 {
1374 	struct oce_wq *wq = arg;
1375 	POCE_SOFTC sc = wq->parent;
1376 	struct ifnet *ifp = sc->ifp;
1377 	int rc = 0;
1378 
1379 #if __FreeBSD_version >= 800000
1380 	LOCK(&wq->tx_lock);
1381 	rc = oce_multiq_transmit(ifp, NULL, wq);
1382 	if (rc) {
1383 		device_printf(sc->dev,
1384 				"TX[%d] restart failed\n", wq->queue_index);
1385 	}
1386 	UNLOCK(&wq->tx_lock);
1387 #else
1388 	oce_start(ifp);
1389 #endif
1390 
1391 }
1392 
1393 
1394 void
1395 oce_start(struct ifnet *ifp)
1396 {
1397 	POCE_SOFTC sc = ifp->if_softc;
1398 	struct mbuf *m;
1399 	int rc = 0;
1400 	int def_q = 0; /* Defualt tx queue is 0*/
1401 
1402 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1403 			IFF_DRV_RUNNING)
1404 		return;
1405 
1406 	if (!sc->link_status)
1407 		return;
1408 
1409 	do {
1410 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1411 		if (m == NULL)
1412 			break;
1413 
1414 		LOCK(&sc->wq[def_q]->tx_lock);
1415 		rc = oce_tx(sc, &m, def_q);
1416 		UNLOCK(&sc->wq[def_q]->tx_lock);
1417 		if (rc) {
1418 			if (m != NULL) {
1419 				sc->wq[def_q]->tx_stats.tx_stops ++;
1420 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1421 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1422 				m = NULL;
1423 			}
1424 			break;
1425 		}
1426 		if (m != NULL)
1427 			ETHER_BPF_MTAP(ifp, m);
1428 
1429 	} while (TRUE);
1430 
1431 	return;
1432 }
1433 
1434 
1435 /* Handle the Completion Queue for transmit */
1436 uint16_t
1437 oce_wq_handler(void *arg)
1438 {
1439 	struct oce_wq *wq = (struct oce_wq *)arg;
1440 	POCE_SOFTC sc = wq->parent;
1441 	struct oce_cq *cq = wq->cq;
1442 	struct oce_nic_tx_cqe *cqe;
1443 	int num_cqes = 0;
1444 
1445 	LOCK(&wq->tx_compl_lock);
1446 	bus_dmamap_sync(cq->ring->dma.tag,
1447 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1448 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1449 	while (cqe->u0.dw[3]) {
1450 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1451 
1452 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1453 		if (wq->ring->cidx >= wq->ring->num_items)
1454 			wq->ring->cidx -= wq->ring->num_items;
1455 
1456 		oce_process_tx_completion(wq);
1457 		wq->tx_stats.tx_compl++;
1458 		cqe->u0.dw[3] = 0;
1459 		RING_GET(cq->ring, 1);
1460 		bus_dmamap_sync(cq->ring->dma.tag,
1461 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1462 		cqe =
1463 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1464 		num_cqes++;
1465 	}
1466 
1467 	if (num_cqes)
1468 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1469 
1470 	UNLOCK(&wq->tx_compl_lock);
1471 	return num_cqes;
1472 }
1473 
1474 
1475 static int
1476 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1477 {
1478 	POCE_SOFTC sc = ifp->if_softc;
1479 	int status = 0, queue_index = 0;
1480 	struct mbuf *next = NULL;
1481 	struct buf_ring *br = NULL;
1482 
1483 	br  = wq->br;
1484 	queue_index = wq->queue_index;
1485 
1486 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1487 		IFF_DRV_RUNNING) {
1488 		if (m != NULL)
1489 			status = drbr_enqueue(ifp, br, m);
1490 		return status;
1491 	}
1492 
1493 	if (m != NULL) {
1494 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1495 			return status;
1496 	}
1497 	while ((next = drbr_peek(ifp, br)) != NULL) {
1498 		if (oce_tx(sc, &next, queue_index)) {
1499 			if (next == NULL) {
1500 				drbr_advance(ifp, br);
1501 			} else {
1502 				drbr_putback(ifp, br, next);
1503 				wq->tx_stats.tx_stops ++;
1504 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1505 			}
1506 			break;
1507 		}
1508 		drbr_advance(ifp, br);
1509 		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1510 		if (next->m_flags & M_MCAST)
1511 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1512 		ETHER_BPF_MTAP(ifp, next);
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 
1519 
1520 
1521 /*****************************************************************************
1522  *			    Receive  routines functions 		     *
1523  *****************************************************************************/
1524 
1525 static void
1526 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1527 {
1528 	uint32_t *p;
1529         struct ether_header *eh = NULL;
1530         struct tcphdr *tcp_hdr = NULL;
1531         struct ip *ip4_hdr = NULL;
1532         struct ip6_hdr *ip6 = NULL;
1533         uint32_t payload_len = 0;
1534 
1535         eh = mtod(m, struct ether_header *);
1536         /* correct IP header */
1537         if(!cqe2->ipv6_frame) {
1538 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1539                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1540                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1541                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1542         }else {
1543         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1544                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1545                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1546                                                 - sizeof(struct ip6_hdr);
1547                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1548                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1549         }
1550 
1551         /* correct tcp header */
1552         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1553         if(cqe2->push) {
1554         	tcp_hdr->th_flags |= TH_PUSH;
1555         }
1556         tcp_hdr->th_win = htons(cqe2->tcp_window);
1557         tcp_hdr->th_sum = 0xffff;
1558         if(cqe2->ts_opt) {
1559                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1560                 *p = cqe1->tcp_timestamp_val;
1561                 *(p+1) = cqe1->tcp_timestamp_ecr;
1562         }
1563 
1564 	return;
1565 }
1566 
1567 static void
1568 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1569 {
1570 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1571         uint32_t i = 0, frag_len = 0;
1572 	uint32_t len = cqe_info->pkt_size;
1573         struct oce_packet_desc *pd;
1574         struct mbuf *tail = NULL;
1575 
1576         for (i = 0; i < cqe_info->num_frags; i++) {
1577                 if (rq->ring->cidx == rq->ring->pidx) {
1578                         device_printf(sc->dev,
1579                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1580                         return;
1581                 }
1582                 pd = &rq->pckts[rq->ring->cidx];
1583 
1584                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1585                 bus_dmamap_unload(rq->tag, pd->map);
1586 		RING_GET(rq->ring, 1);
1587                 rq->pending--;
1588 
1589                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1590                 pd->mbuf->m_len = frag_len;
1591 
1592                 if (tail != NULL) {
1593                         /* additional fragments */
1594                         pd->mbuf->m_flags &= ~M_PKTHDR;
1595                         tail->m_next = pd->mbuf;
1596 			if(rq->islro)
1597                         	tail->m_nextpkt = NULL;
1598                         tail = pd->mbuf;
1599                 } else {
1600                         /* first fragment, fill out much of the packet header */
1601                         pd->mbuf->m_pkthdr.len = len;
1602 			if(rq->islro)
1603                         	pd->mbuf->m_nextpkt = NULL;
1604                         pd->mbuf->m_pkthdr.csum_flags = 0;
1605                         if (IF_CSUM_ENABLED(sc)) {
1606                                 if (cqe_info->l4_cksum_pass) {
1607                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1608                                                 pd->mbuf->m_pkthdr.csum_flags |=
1609                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1610                                         }else { /* IPV6 frame */
1611 						if(rq->islro) {
1612                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1613                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1614 						}
1615                                         }
1616                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1617                                 }
1618                                 if (cqe_info->ip_cksum_pass) {
1619                                         pd->mbuf->m_pkthdr.csum_flags |=
1620                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1621                                 }
1622                         }
1623                         *m = tail = pd->mbuf;
1624                }
1625                 pd->mbuf = NULL;
1626                 len -= frag_len;
1627         }
1628 
1629         return;
1630 }
1631 
1632 static void
1633 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1634 {
1635         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1636         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1637         struct mbuf *m = NULL;
1638 	struct oce_common_cqe_info cq_info;
1639 
1640 	/* parse cqe */
1641         if(cqe2 == NULL) {
1642                 cq_info.pkt_size =  cqe->pkt_size;
1643                 cq_info.vtag = cqe->vlan_tag;
1644                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1645                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1646                 cq_info.ipv6_frame = cqe->ipv6_frame;
1647                 cq_info.vtp = cqe->vtp;
1648                 cq_info.qnq = cqe->qnq;
1649         }else {
1650                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1651                 cq_info.pkt_size =  cqe2->coalesced_size;
1652                 cq_info.vtag = cqe2->vlan_tag;
1653                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1654                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1655                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1656                 cq_info.vtp = cqe2->vtp;
1657                 cq_info.qnq = cqe1->qnq;
1658         }
1659 
1660 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1661 
1662         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1663         if(cq_info.pkt_size % rq->cfg.frag_size)
1664                 cq_info.num_frags++;
1665 
1666 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1667 
1668 	if (m) {
1669 		if(cqe2) {
1670 			//assert(cqe2->valid != 0);
1671 
1672 			//assert(cqe2->cqe_type != 2);
1673 			oce_correct_header(m, cqe1, cqe2);
1674 		}
1675 
1676 		m->m_pkthdr.rcvif = sc->ifp;
1677 #if __FreeBSD_version >= 800000
1678 		if (rq->queue_index)
1679 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1680 		else
1681 			m->m_pkthdr.flowid = rq->queue_index;
1682 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1683 #endif
1684 		/* This deternies if vlan tag is Valid */
1685 		if (cq_info.vtp) {
1686 			if (sc->function_mode & FNM_FLEX10_MODE) {
1687 				/* FLEX10. If QnQ is not set, neglect VLAN */
1688 				if (cq_info.qnq) {
1689 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1690 					m->m_flags |= M_VLANTAG;
1691 				}
1692 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1693 				/* In UMC mode generally pvid will be striped by
1694 				   hw. But in some cases we have seen it comes
1695 				   with pvid. So if pvid == vlan, neglect vlan.
1696 				 */
1697 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1698 				m->m_flags |= M_VLANTAG;
1699 			}
1700 		}
1701 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1702 
1703 		(*sc->ifp->if_input) (sc->ifp, m);
1704 
1705 		/* Update rx stats per queue */
1706 		rq->rx_stats.rx_pkts++;
1707 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1708 		rq->rx_stats.rx_frags += cq_info.num_frags;
1709 		rq->rx_stats.rx_ucast_pkts++;
1710 	}
1711         return;
1712 }
1713 
1714 static void
1715 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1716 {
1717 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1718 	int len;
1719 	struct mbuf *m = NULL;
1720 	struct oce_common_cqe_info cq_info;
1721 	uint16_t vtag = 0;
1722 
1723 	/* Is it a flush compl that has no data */
1724 	if(!cqe->u0.s.num_fragments)
1725 		goto exit;
1726 
1727 	len = cqe->u0.s.pkt_size;
1728 	if (!len) {
1729 		/*partial DMA workaround for Lancer*/
1730 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1731 		goto exit;
1732 	}
1733 
1734 	if (!oce_cqe_portid_valid(sc, cqe)) {
1735 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1736 		goto exit;
1737 	}
1738 
1739 	 /* Get vlan_tag value */
1740 	if(IS_BE(sc) || IS_SH(sc))
1741 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1742 	else
1743 		vtag = cqe->u0.s.vlan_tag;
1744 
1745 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1746 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1747 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1748 	cq_info.num_frags = cqe->u0.s.num_fragments;
1749 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1750 
1751 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1752 
1753 	if (m) {
1754 		m->m_pkthdr.rcvif = sc->ifp;
1755 #if __FreeBSD_version >= 800000
1756 		if (rq->queue_index)
1757 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1758 		else
1759 			m->m_pkthdr.flowid = rq->queue_index;
1760 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1761 #endif
1762 		/* This deternies if vlan tag is Valid */
1763 		if (oce_cqe_vtp_valid(sc, cqe)) {
1764 			if (sc->function_mode & FNM_FLEX10_MODE) {
1765 				/* FLEX10. If QnQ is not set, neglect VLAN */
1766 				if (cqe->u0.s.qnq) {
1767 					m->m_pkthdr.ether_vtag = vtag;
1768 					m->m_flags |= M_VLANTAG;
1769 				}
1770 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1771 				/* In UMC mode generally pvid will be striped by
1772 				   hw. But in some cases we have seen it comes
1773 				   with pvid. So if pvid == vlan, neglect vlan.
1774 				*/
1775 				m->m_pkthdr.ether_vtag = vtag;
1776 				m->m_flags |= M_VLANTAG;
1777 			}
1778 		}
1779 
1780 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1781 #if defined(INET6) || defined(INET)
1782 		/* Try to queue to LRO */
1783 		if (IF_LRO_ENABLED(sc) &&
1784 		    (cqe->u0.s.ip_cksum_pass) &&
1785 		    (cqe->u0.s.l4_cksum_pass) &&
1786 		    (!cqe->u0.s.ip_ver)       &&
1787 		    (rq->lro.lro_cnt != 0)) {
1788 
1789 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1790 				rq->lro_pkts_queued ++;
1791 				goto post_done;
1792 			}
1793 			/* If LRO posting fails then try to post to STACK */
1794 		}
1795 #endif
1796 
1797 		(*sc->ifp->if_input) (sc->ifp, m);
1798 #if defined(INET6) || defined(INET)
1799 post_done:
1800 #endif
1801 		/* Update rx stats per queue */
1802 		rq->rx_stats.rx_pkts++;
1803 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1804 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1805 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1806 			rq->rx_stats.rx_mcast_pkts++;
1807 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1808 			rq->rx_stats.rx_ucast_pkts++;
1809 	}
1810 exit:
1811 	return;
1812 }
1813 
1814 
1815 void
1816 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1817 {
1818 	uint32_t i = 0;
1819 	struct oce_packet_desc *pd;
1820 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1821 
1822 	for (i = 0; i < num_frags; i++) {
1823                 if (rq->ring->cidx == rq->ring->pidx) {
1824                         device_printf(sc->dev,
1825                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1826                         return;
1827                 }
1828                 pd = &rq->pckts[rq->ring->cidx];
1829                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1830                 bus_dmamap_unload(rq->tag, pd->map);
1831                 if (pd->mbuf != NULL) {
1832                         m_freem(pd->mbuf);
1833                         pd->mbuf = NULL;
1834                 }
1835 
1836 		RING_GET(rq->ring, 1);
1837                 rq->pending--;
1838 	}
1839 }
1840 
1841 
1842 static int
1843 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1844 {
1845 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1846 	int vtp = 0;
1847 
1848 	if (sc->be3_native) {
1849 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1850 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1851 	} else
1852 		vtp = cqe->u0.s.vlan_tag_present;
1853 
1854 	return vtp;
1855 
1856 }
1857 
1858 
1859 static int
1860 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1861 {
1862 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1863 	int port_id = 0;
1864 
1865 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1866 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1867 		port_id =  cqe_v1->u0.s.port;
1868 		if (sc->port_id != port_id)
1869 			return 0;
1870 	} else
1871 		;/* For BE3 legacy and Lancer this is dummy */
1872 
1873 	return 1;
1874 
1875 }
1876 
1877 #if defined(INET6) || defined(INET)
1878 void
1879 oce_rx_flush_lro(struct oce_rq *rq)
1880 {
1881 	struct lro_ctrl	*lro = &rq->lro;
1882 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1883 
1884 	if (!IF_LRO_ENABLED(sc))
1885 		return;
1886 
1887 	tcp_lro_flush_all(lro);
1888 	rq->lro_pkts_queued = 0;
1889 
1890 	return;
1891 }
1892 
1893 
1894 static int
1895 oce_init_lro(POCE_SOFTC sc)
1896 {
1897 	struct lro_ctrl *lro = NULL;
1898 	int i = 0, rc = 0;
1899 
1900 	for (i = 0; i < sc->nrqs; i++) {
1901 		lro = &sc->rq[i]->lro;
1902 		rc = tcp_lro_init(lro);
1903 		if (rc != 0) {
1904 			device_printf(sc->dev, "LRO init failed\n");
1905 			return rc;
1906 		}
1907 		lro->ifp = sc->ifp;
1908 	}
1909 
1910 	return rc;
1911 }
1912 
1913 
1914 void
1915 oce_free_lro(POCE_SOFTC sc)
1916 {
1917 	struct lro_ctrl *lro = NULL;
1918 	int i = 0;
1919 
1920 	for (i = 0; i < sc->nrqs; i++) {
1921 		lro = &sc->rq[i]->lro;
1922 		if (lro)
1923 			tcp_lro_free(lro);
1924 	}
1925 }
1926 #endif
1927 
1928 int
1929 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1930 {
1931 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1932 	int i, in, rc;
1933 	struct oce_packet_desc *pd;
1934 	bus_dma_segment_t segs[6];
1935 	int nsegs, added = 0;
1936 	struct oce_nic_rqe *rqe;
1937 	pd_rxulp_db_t rxdb_reg;
1938 	uint32_t val = 0;
1939 	uint32_t oce_max_rq_posts = 64;
1940 
1941 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1942 	for (i = 0; i < count; i++) {
1943 		in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1944 
1945 		pd = &rq->pckts[rq->ring->pidx];
1946 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1947 		if (pd->mbuf == NULL) {
1948 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1949 			break;
1950 		}
1951 		pd->mbuf->m_nextpkt = NULL;
1952 
1953 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1954 
1955 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1956 					     pd->map,
1957 					     pd->mbuf,
1958 					     segs, &nsegs, BUS_DMA_NOWAIT);
1959 		if (rc) {
1960 			m_free(pd->mbuf);
1961 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1962 			break;
1963 		}
1964 
1965 		if (nsegs != 1) {
1966 			i--;
1967 			continue;
1968 		}
1969 
1970 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1971 
1972 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1973 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1974 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1975 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1976 		RING_PUT(rq->ring, 1);
1977 		added++;
1978 		rq->pending++;
1979 	}
1980 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1981 	if (added != 0) {
1982 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1983 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1984 			rxdb_reg.bits.qid = rq->rq_id;
1985 			if(rq->islro) {
1986                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1987                                 val |= oce_max_rq_posts << 16;
1988                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1989 			}else {
1990 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1991 			}
1992 			added -= oce_max_rq_posts;
1993 		}
1994 		if (added > 0) {
1995 			rxdb_reg.bits.qid = rq->rq_id;
1996 			rxdb_reg.bits.num_posted = added;
1997 			if(rq->islro) {
1998                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1999                                 val |= added << 16;
2000                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
2001 			}else {
2002 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
2003 			}
2004 		}
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 static void
2011 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
2012 {
2013         if (num_cqes) {
2014                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
2015 		if(!sc->enable_hwlro) {
2016 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
2017 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
2018 		}else {
2019                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
2020                         	oce_alloc_rx_bufs(rq, 64);
2021         	}
2022 	}
2023 
2024         return;
2025 }
2026 
2027 uint16_t
2028 oce_rq_handler_lro(void *arg)
2029 {
2030         struct oce_rq *rq = (struct oce_rq *)arg;
2031         struct oce_cq *cq = rq->cq;
2032         POCE_SOFTC sc = rq->parent;
2033         struct nic_hwlro_singleton_cqe *cqe;
2034         struct nic_hwlro_cqe_part2 *cqe2;
2035         int num_cqes = 0;
2036 
2037 	LOCK(&rq->rx_lock);
2038         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2039         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2040         while (cqe->valid) {
2041                 if(cqe->cqe_type == 0) { /* singleton cqe */
2042 			/* we should not get singleton cqe after cqe1 on same rq */
2043 			if(rq->cqe_firstpart != NULL) {
2044 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
2045 				goto exit_rq_handler_lro;
2046 			}
2047                         if(cqe->error != 0) {
2048                                 rq->rx_stats.rxcp_err++;
2049 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2050                         }
2051                         oce_rx_lro(rq, cqe, NULL);
2052                         rq->rx_stats.rx_compl++;
2053                         cqe->valid = 0;
2054                         RING_GET(cq->ring, 1);
2055                         num_cqes++;
2056                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2057                                 break;
2058                 }else if(cqe->cqe_type == 0x1) { /* first part */
2059 			/* we should not get cqe1 after cqe1 on same rq */
2060 			if(rq->cqe_firstpart != NULL) {
2061 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
2062 				goto exit_rq_handler_lro;
2063 			}
2064 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2065                         RING_GET(cq->ring, 1);
2066                 }else if(cqe->cqe_type == 0x2) { /* second part */
2067 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2068                         if(cqe2->error != 0) {
2069                                 rq->rx_stats.rxcp_err++;
2070 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2071                         }
2072 			/* We should not get cqe2 without cqe1 */
2073 			if(rq->cqe_firstpart == NULL) {
2074 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2075 				goto exit_rq_handler_lro;
2076 			}
2077                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2078 
2079                         rq->rx_stats.rx_compl++;
2080                         rq->cqe_firstpart->valid = 0;
2081                         cqe2->valid = 0;
2082 			rq->cqe_firstpart = NULL;
2083 
2084                         RING_GET(cq->ring, 1);
2085                         num_cqes += 2;
2086                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2087                                 break;
2088 		}
2089 
2090                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2091                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2092         }
2093 	oce_check_rx_bufs(sc, num_cqes, rq);
2094 exit_rq_handler_lro:
2095 	UNLOCK(&rq->rx_lock);
2096 	return 0;
2097 }
2098 
2099 /* Handle the Completion Queue for receive */
2100 uint16_t
2101 oce_rq_handler(void *arg)
2102 {
2103 	struct oce_rq *rq = (struct oce_rq *)arg;
2104 	struct oce_cq *cq = rq->cq;
2105 	POCE_SOFTC sc = rq->parent;
2106 	struct oce_nic_rx_cqe *cqe;
2107 	int num_cqes = 0;
2108 
2109 	if(rq->islro) {
2110 		oce_rq_handler_lro(arg);
2111 		return 0;
2112 	}
2113 	LOCK(&rq->rx_lock);
2114 	bus_dmamap_sync(cq->ring->dma.tag,
2115 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2116 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2117 	while (cqe->u0.dw[2]) {
2118 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2119 
2120 		if (cqe->u0.s.error == 0) {
2121 			oce_rx(rq, cqe);
2122 		} else {
2123 			rq->rx_stats.rxcp_err++;
2124 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2125 			/* Post L3/L4 errors to stack.*/
2126 			oce_rx(rq, cqe);
2127 		}
2128 		rq->rx_stats.rx_compl++;
2129 		cqe->u0.dw[2] = 0;
2130 
2131 #if defined(INET6) || defined(INET)
2132 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2133 			oce_rx_flush_lro(rq);
2134 		}
2135 #endif
2136 
2137 		RING_GET(cq->ring, 1);
2138 		bus_dmamap_sync(cq->ring->dma.tag,
2139 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2140 		cqe =
2141 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2142 		num_cqes++;
2143 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2144 			break;
2145 	}
2146 
2147 #if defined(INET6) || defined(INET)
2148         if (IF_LRO_ENABLED(sc))
2149                 oce_rx_flush_lro(rq);
2150 #endif
2151 
2152 	oce_check_rx_bufs(sc, num_cqes, rq);
2153 	UNLOCK(&rq->rx_lock);
2154 	return 0;
2155 
2156 }
2157 
2158 
2159 
2160 
2161 /*****************************************************************************
2162  *		   Helper function prototypes in this file 		     *
2163  *****************************************************************************/
2164 
2165 static int
2166 oce_attach_ifp(POCE_SOFTC sc)
2167 {
2168 
2169 	sc->ifp = if_alloc(IFT_ETHER);
2170 	if (!sc->ifp)
2171 		return ENOMEM;
2172 
2173 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2174 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2175 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2176 
2177 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
2178 	sc->ifp->if_ioctl = oce_ioctl;
2179 	sc->ifp->if_start = oce_start;
2180 	sc->ifp->if_init = oce_init;
2181 	sc->ifp->if_mtu = ETHERMTU;
2182 	sc->ifp->if_softc = sc;
2183 #if __FreeBSD_version >= 800000
2184 	sc->ifp->if_transmit = oce_multiq_start;
2185 	sc->ifp->if_qflush = oce_multiq_flush;
2186 #endif
2187 
2188 	if_initname(sc->ifp,
2189 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2190 
2191 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2192 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2193 	IFQ_SET_READY(&sc->ifp->if_snd);
2194 
2195 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2196 	sc->ifp->if_hwassist |= CSUM_TSO;
2197 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2198 
2199 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2200 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2201 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2202 
2203 #if defined(INET6) || defined(INET)
2204 	sc->ifp->if_capabilities |= IFCAP_TSO;
2205 	sc->ifp->if_capabilities |= IFCAP_LRO;
2206 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2207 #endif
2208 
2209 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2210 	sc->ifp->if_baudrate = IF_Gbps(10);
2211 
2212 #if __FreeBSD_version >= 1000000
2213 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2214 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2215 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2216 #endif
2217 
2218 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2219 
2220 	return 0;
2221 }
2222 
2223 
2224 static void
2225 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2226 {
2227 	POCE_SOFTC sc = ifp->if_softc;
2228 
2229 	if (ifp->if_softc !=  arg)
2230 		return;
2231 	if ((vtag == 0) || (vtag > 4095))
2232 		return;
2233 
2234 	sc->vlan_tag[vtag] = 1;
2235 	sc->vlans_added++;
2236 	if (sc->vlans_added <= (sc->max_vlans + 1))
2237 		oce_vid_config(sc);
2238 }
2239 
2240 
2241 static void
2242 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2243 {
2244 	POCE_SOFTC sc = ifp->if_softc;
2245 
2246 	if (ifp->if_softc !=  arg)
2247 		return;
2248 	if ((vtag == 0) || (vtag > 4095))
2249 		return;
2250 
2251 	sc->vlan_tag[vtag] = 0;
2252 	sc->vlans_added--;
2253 	oce_vid_config(sc);
2254 }
2255 
2256 
2257 /*
2258  * A max of 64 vlans can be configured in BE. If the user configures
2259  * more, place the card in vlan promiscuous mode.
2260  */
2261 static int
2262 oce_vid_config(POCE_SOFTC sc)
2263 {
2264 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2265 	uint16_t ntags = 0, i;
2266 	int status = 0;
2267 
2268 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2269 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2270 		for (i = 0; i < MAX_VLANS; i++) {
2271 			if (sc->vlan_tag[i]) {
2272 				vtags[ntags].vtag = i;
2273 				ntags++;
2274 			}
2275 		}
2276 		if (ntags)
2277 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2278 						vtags, ntags, 1, 0);
2279 	} else
2280 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2281 					 	NULL, 0, 1, 1);
2282 	return status;
2283 }
2284 
2285 
2286 static void
2287 oce_mac_addr_set(POCE_SOFTC sc)
2288 {
2289 	uint32_t old_pmac_id = sc->pmac_id;
2290 	int status = 0;
2291 
2292 
2293 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2294 			 sc->macaddr.size_of_struct);
2295 	if (!status)
2296 		return;
2297 
2298 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2299 					sc->if_id, &sc->pmac_id);
2300 	if (!status) {
2301 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2302 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2303 				 sc->macaddr.size_of_struct);
2304 	}
2305 	if (status)
2306 		device_printf(sc->dev, "Failed update macaddress\n");
2307 
2308 }
2309 
2310 
2311 static int
2312 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2313 {
2314 	POCE_SOFTC sc = ifp->if_softc;
2315 	struct ifreq *ifr = (struct ifreq *)data;
2316 	int rc = ENXIO;
2317 	char cookie[32] = {0};
2318 	void *priv_data = ifr_data_get_ptr(ifr);
2319 	void *ioctl_ptr;
2320 	uint32_t req_size;
2321 	struct mbx_hdr req;
2322 	OCE_DMA_MEM dma_mem;
2323 	struct mbx_common_get_cntl_attr *fw_cmd;
2324 
2325 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2326 		return EFAULT;
2327 
2328 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2329 		return EINVAL;
2330 
2331 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2332 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2333 		return EFAULT;
2334 
2335 	req_size = le32toh(req.u0.req.request_length);
2336 	if (req_size > 65536)
2337 		return EINVAL;
2338 
2339 	req_size += sizeof(struct mbx_hdr);
2340 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2341 	if (rc)
2342 		return ENOMEM;
2343 
2344 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2345 		rc = EFAULT;
2346 		goto dma_free;
2347 	}
2348 
2349 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2350 	if (rc) {
2351 		rc = EIO;
2352 		goto dma_free;
2353 	}
2354 
2355 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2356 		rc =  EFAULT;
2357 
2358 	/*
2359 	   firmware is filling all the attributes for this ioctl except
2360 	   the driver version..so fill it
2361 	 */
2362 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2363 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2364 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2365 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2366 	}
2367 
2368 dma_free:
2369 	oce_dma_free(sc, &dma_mem);
2370 	return rc;
2371 
2372 }
2373 
2374 static void
2375 oce_eqd_set_periodic(POCE_SOFTC sc)
2376 {
2377 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2378 	struct oce_aic_obj *aic;
2379 	struct oce_eq *eqo;
2380 	uint64_t now = 0, delta;
2381 	int eqd, i, num = 0;
2382 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2383 	struct oce_wq *wq;
2384 	struct oce_rq *rq;
2385 
2386 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2387 
2388 	for (i = 0 ; i < sc->neqs; i++) {
2389 		eqo = sc->eq[i];
2390 		aic = &sc->aic_obj[i];
2391 		/* When setting the static eq delay from the user space */
2392 		if (!aic->enable) {
2393 			if (aic->ticks)
2394 				aic->ticks = 0;
2395 			eqd = aic->et_eqd;
2396 			goto modify_eqd;
2397 		}
2398 
2399 		if (i == 0) {
2400 			rq = sc->rq[0];
2401 			rxpkts = rq->rx_stats.rx_pkts;
2402 		} else
2403 			rxpkts = 0;
2404 		if (i + 1 < sc->nrqs) {
2405 			rq = sc->rq[i + 1];
2406 			rxpkts += rq->rx_stats.rx_pkts;
2407 		}
2408 		if (i < sc->nwqs) {
2409 			wq = sc->wq[i];
2410 			tx_reqs = wq->tx_stats.tx_reqs;
2411 		} else
2412 			tx_reqs = 0;
2413 		now = ticks;
2414 
2415 		if (!aic->ticks || now < aic->ticks ||
2416 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2417 			aic->prev_rxpkts = rxpkts;
2418 			aic->prev_txreqs = tx_reqs;
2419 			aic->ticks = now;
2420 			continue;
2421 		}
2422 
2423 		delta = ticks_to_msecs(now - aic->ticks);
2424 
2425 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2426 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2427 		eqd = (pps / 15000) << 2;
2428 		if (eqd < 8)
2429 			eqd = 0;
2430 
2431 		/* Make sure that the eq delay is in the known range */
2432 		eqd = min(eqd, aic->max_eqd);
2433 		eqd = max(eqd, aic->min_eqd);
2434 
2435 		aic->prev_rxpkts = rxpkts;
2436 		aic->prev_txreqs = tx_reqs;
2437 		aic->ticks = now;
2438 
2439 modify_eqd:
2440 		if (eqd != aic->cur_eqd) {
2441 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2442 			set_eqd[num].eq_id = eqo->eq_id;
2443 			aic->cur_eqd = eqd;
2444 			num++;
2445 		}
2446 	}
2447 
2448 	/* Is there atleast one eq that needs to be modified? */
2449         for(i = 0; i < num; i += 8) {
2450                 if((num - i) >=8 )
2451                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2452                 else
2453                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2454         }
2455 
2456 }
2457 
2458 static void oce_detect_hw_error(POCE_SOFTC sc)
2459 {
2460 
2461 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2462 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2463 	uint32_t i;
2464 
2465 	if (sc->hw_error)
2466 		return;
2467 
2468 	if (IS_XE201(sc)) {
2469 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2470 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2471 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2472 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2473 		}
2474 	} else {
2475 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2476 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2477 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2478 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2479 
2480 		ue_low = (ue_low & ~ue_low_mask);
2481 		ue_high = (ue_high & ~ue_high_mask);
2482 	}
2483 
2484 	/* On certain platforms BE hardware can indicate spurious UEs.
2485 	 * Allow the h/w to stop working completely in case of a real UE.
2486 	 * Hence not setting the hw_error for UE detection.
2487 	 */
2488 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2489 		sc->hw_error = TRUE;
2490 		device_printf(sc->dev, "Error detected in the card\n");
2491 	}
2492 
2493 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2494 		device_printf(sc->dev,
2495 				"ERR: sliport status 0x%x\n", sliport_status);
2496 		device_printf(sc->dev,
2497 				"ERR: sliport error1 0x%x\n", sliport_err1);
2498 		device_printf(sc->dev,
2499 				"ERR: sliport error2 0x%x\n", sliport_err2);
2500 	}
2501 
2502 	if (ue_low) {
2503 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2504 			if (ue_low & 1)
2505 				device_printf(sc->dev, "UE: %s bit set\n",
2506 							ue_status_low_desc[i]);
2507 		}
2508 	}
2509 
2510 	if (ue_high) {
2511 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2512 			if (ue_high & 1)
2513 				device_printf(sc->dev, "UE: %s bit set\n",
2514 							ue_status_hi_desc[i]);
2515 		}
2516 	}
2517 
2518 }
2519 
2520 
2521 static void
2522 oce_local_timer(void *arg)
2523 {
2524 	POCE_SOFTC sc = arg;
2525 	int i = 0;
2526 
2527 	oce_detect_hw_error(sc);
2528 	oce_refresh_nic_stats(sc);
2529 	oce_refresh_queue_stats(sc);
2530 	oce_mac_addr_set(sc);
2531 
2532 	/* TX Watch Dog*/
2533 	for (i = 0; i < sc->nwqs; i++)
2534 		oce_tx_restart(sc, sc->wq[i]);
2535 
2536 	/* calculate and set the eq delay for optimal interrupt rate */
2537 	if (IS_BE(sc) || IS_SH(sc))
2538 		oce_eqd_set_periodic(sc);
2539 
2540 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2541 }
2542 
2543 static void
2544 oce_tx_compl_clean(POCE_SOFTC sc)
2545 {
2546 	struct oce_wq *wq;
2547 	int i = 0, timeo = 0, num_wqes = 0;
2548 	int pending_txqs = sc->nwqs;
2549 
2550 	/* Stop polling for compls when HW has been silent for 10ms or
2551 	 * hw_error or no outstanding completions expected
2552 	 */
2553 	do {
2554 		pending_txqs = sc->nwqs;
2555 
2556 		for_all_wq_queues(sc, wq, i) {
2557 			num_wqes = oce_wq_handler(wq);
2558 
2559 			if(num_wqes)
2560 				timeo = 0;
2561 
2562 			if(!wq->ring->num_used)
2563 				pending_txqs--;
2564 		}
2565 
2566 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2567 			break;
2568 
2569 		DELAY(1000);
2570 	} while (TRUE);
2571 
2572 	for_all_wq_queues(sc, wq, i) {
2573 		while(wq->ring->num_used) {
2574 			LOCK(&wq->tx_compl_lock);
2575 			oce_process_tx_completion(wq);
2576 			UNLOCK(&wq->tx_compl_lock);
2577 		}
2578 	}
2579 
2580 }
2581 
2582 /* NOTE : This should only be called holding
2583  *        DEVICE_LOCK.
2584  */
2585 static void
2586 oce_if_deactivate(POCE_SOFTC sc)
2587 {
2588 	int i;
2589 	struct oce_rq *rq;
2590 	struct oce_wq *wq;
2591 	struct oce_eq *eq;
2592 
2593 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2594 
2595 	oce_tx_compl_clean(sc);
2596 
2597 	/* Stop intrs and finish any bottom halves pending */
2598 	oce_hw_intr_disable(sc);
2599 
2600 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2601 	   any other lock. So unlock device lock and require after
2602 	   completing taskqueue_drain.
2603 	*/
2604 	UNLOCK(&sc->dev_lock);
2605 	for (i = 0; i < sc->intr_count; i++) {
2606 		if (sc->intrs[i].tq != NULL) {
2607 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2608 		}
2609 	}
2610 	LOCK(&sc->dev_lock);
2611 
2612 	/* Delete RX queue in card with flush param */
2613 	oce_stop_rx(sc);
2614 
2615 	/* Invalidate any pending cq and eq entries*/
2616 	for_all_evnt_queues(sc, eq, i)
2617 		oce_drain_eq(eq);
2618 	for_all_rq_queues(sc, rq, i)
2619 		oce_drain_rq_cq(rq);
2620 	for_all_wq_queues(sc, wq, i)
2621 		oce_drain_wq_cq(wq);
2622 
2623 	/* But still we need to get MCC aync events.
2624 	   So enable intrs and also arm first EQ
2625 	*/
2626 	oce_hw_intr_enable(sc);
2627 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2628 
2629 	DELAY(10);
2630 }
2631 
2632 
2633 static void
2634 oce_if_activate(POCE_SOFTC sc)
2635 {
2636 	struct oce_eq *eq;
2637 	struct oce_rq *rq;
2638 	struct oce_wq *wq;
2639 	int i, rc = 0;
2640 
2641 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2642 
2643 	oce_hw_intr_disable(sc);
2644 
2645 	oce_start_rx(sc);
2646 
2647 	for_all_rq_queues(sc, rq, i) {
2648 		rc = oce_start_rq(rq);
2649 		if (rc)
2650 			device_printf(sc->dev, "Unable to start RX\n");
2651 	}
2652 
2653 	for_all_wq_queues(sc, wq, i) {
2654 		rc = oce_start_wq(wq);
2655 		if (rc)
2656 			device_printf(sc->dev, "Unable to start TX\n");
2657 	}
2658 
2659 
2660 	for_all_evnt_queues(sc, eq, i)
2661 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2662 
2663 	oce_hw_intr_enable(sc);
2664 
2665 }
2666 
2667 static void
2668 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2669 {
2670 	/* Update Link status */
2671 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2672 	     ASYNC_EVENT_LINK_UP) {
2673 		sc->link_status = ASYNC_EVENT_LINK_UP;
2674 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2675 	} else {
2676 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2677 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2678 	}
2679 }
2680 
2681 
2682 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2683 					 struct oce_async_evt_grp5_os2bmc *evt)
2684 {
2685 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2686 	if (evt->u.s.mgmt_enable)
2687 		sc->flags |= OCE_FLAGS_OS2BMC;
2688 	else
2689 		return;
2690 
2691 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2692 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2693 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2694 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2695 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2696 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2697 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2698 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2699 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2700 }
2701 
2702 
2703 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2704 {
2705 	struct oce_async_event_grp5_pvid_state *gcqe;
2706 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2707 
2708 	switch (cqe->u0.s.async_type) {
2709 	case ASYNC_EVENT_PVID_STATE:
2710 		/* GRP5 PVID */
2711 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2712 		if (gcqe->enabled)
2713 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2714 		else
2715 			sc->pvid = 0;
2716 		break;
2717 	case ASYNC_EVENT_OS2BMC:
2718 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2719 		oce_async_grp5_osbmc_process(sc, bmccqe);
2720 		break;
2721 	default:
2722 		break;
2723 	}
2724 }
2725 
2726 /* Handle the Completion Queue for the Mailbox/Async notifications */
2727 uint16_t
2728 oce_mq_handler(void *arg)
2729 {
2730 	struct oce_mq *mq = (struct oce_mq *)arg;
2731 	POCE_SOFTC sc = mq->parent;
2732 	struct oce_cq *cq = mq->cq;
2733 	int num_cqes = 0, evt_type = 0, optype = 0;
2734 	struct oce_mq_cqe *cqe;
2735 	struct oce_async_cqe_link_state *acqe;
2736 	struct oce_async_event_qnq *dbgcqe;
2737 
2738 
2739 	bus_dmamap_sync(cq->ring->dma.tag,
2740 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2741 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2742 
2743 	while (cqe->u0.dw[3]) {
2744 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2745 		if (cqe->u0.s.async_event) {
2746 			evt_type = cqe->u0.s.event_type;
2747 			optype = cqe->u0.s.async_type;
2748 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2749 				/* Link status evt */
2750 				acqe = (struct oce_async_cqe_link_state *)cqe;
2751 				process_link_state(sc, acqe);
2752 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2753 				oce_process_grp5_events(sc, cqe);
2754 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2755 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2756 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2757 				if(dbgcqe->valid)
2758 					sc->qnqid = dbgcqe->vlan_tag;
2759 				sc->qnq_debug_event = TRUE;
2760 			}
2761 		}
2762 		cqe->u0.dw[3] = 0;
2763 		RING_GET(cq->ring, 1);
2764 		bus_dmamap_sync(cq->ring->dma.tag,
2765 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2766 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2767 		num_cqes++;
2768 	}
2769 
2770 	if (num_cqes)
2771 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2772 
2773 	return 0;
2774 }
2775 
2776 
2777 static void
2778 setup_max_queues_want(POCE_SOFTC sc)
2779 {
2780 	/* Check if it is FLEX machine. Is so dont use RSS */
2781 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2782 	    (sc->function_mode & FNM_UMC_MODE)    ||
2783 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2784 	    (!is_rss_enabled(sc))		  ||
2785 	    IS_BE2(sc)) {
2786 		sc->nrqs = 1;
2787 		sc->nwqs = 1;
2788 	} else {
2789 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2790 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2791 	}
2792 
2793 	if (IS_BE2(sc) && is_rss_enabled(sc))
2794 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2795 }
2796 
2797 
2798 static void
2799 update_queues_got(POCE_SOFTC sc)
2800 {
2801 	if (is_rss_enabled(sc)) {
2802 		sc->nrqs = sc->intr_count + 1;
2803 		sc->nwqs = sc->intr_count;
2804 	} else {
2805 		sc->nrqs = 1;
2806 		sc->nwqs = 1;
2807 	}
2808 
2809 	if (IS_BE2(sc))
2810 		sc->nwqs = 1;
2811 }
2812 
2813 static int
2814 oce_check_ipv6_ext_hdr(struct mbuf *m)
2815 {
2816 	struct ether_header *eh = mtod(m, struct ether_header *);
2817 	caddr_t m_datatemp = m->m_data;
2818 
2819 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2820 		m->m_data += sizeof(struct ether_header);
2821 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2822 
2823 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2824 				(ip6->ip6_nxt != IPPROTO_UDP)){
2825 			struct ip6_ext *ip6e = NULL;
2826 			m->m_data += sizeof(struct ip6_hdr);
2827 
2828 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2829 			if(ip6e->ip6e_len == 0xff) {
2830 				m->m_data = m_datatemp;
2831 				return TRUE;
2832 			}
2833 		}
2834 		m->m_data = m_datatemp;
2835 	}
2836 	return FALSE;
2837 }
2838 
2839 static int
2840 is_be3_a1(POCE_SOFTC sc)
2841 {
2842 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2843 		return TRUE;
2844 	}
2845 	return FALSE;
2846 }
2847 
2848 static struct mbuf *
2849 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2850 {
2851 	uint16_t vlan_tag = 0;
2852 
2853 	if(!M_WRITABLE(m))
2854 		return NULL;
2855 
2856 	/* Embed vlan tag in the packet if it is not part of it */
2857 	if(m->m_flags & M_VLANTAG) {
2858 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2859 		m->m_flags &= ~M_VLANTAG;
2860 	}
2861 
2862 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2863 	if(sc->pvid) {
2864 		if(!vlan_tag)
2865 			vlan_tag = sc->pvid;
2866 		if (complete)
2867 			*complete = FALSE;
2868 	}
2869 
2870 	if(vlan_tag) {
2871 		m = ether_vlanencap(m, vlan_tag);
2872 	}
2873 
2874 	if(sc->qnqid) {
2875 		m = ether_vlanencap(m, sc->qnqid);
2876 
2877 		if (complete)
2878 			*complete = FALSE;
2879 	}
2880 	return m;
2881 }
2882 
2883 static int
2884 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2885 {
2886 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2887 			oce_check_ipv6_ext_hdr(m)) {
2888 		return TRUE;
2889 	}
2890 	return FALSE;
2891 }
2892 
2893 static void
2894 oce_get_config(POCE_SOFTC sc)
2895 {
2896 	int rc = 0;
2897 	uint32_t max_rss = 0;
2898 
2899 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2900 		max_rss = OCE_LEGACY_MODE_RSS;
2901 	else
2902 		max_rss = OCE_MAX_RSS;
2903 
2904 	if (!IS_BE(sc)) {
2905 		rc = oce_get_profile_config(sc, max_rss);
2906 		if (rc) {
2907 			sc->nwqs = OCE_MAX_WQ;
2908 			sc->nrssqs = max_rss;
2909 			sc->nrqs = sc->nrssqs + 1;
2910 		}
2911 	}
2912 	else { /* For BE3 don't rely on fw for determining the resources */
2913 		sc->nrssqs = max_rss;
2914 		sc->nrqs = sc->nrssqs + 1;
2915 		sc->nwqs = OCE_MAX_WQ;
2916 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2917 	}
2918 }
2919 
2920 static void
2921 oce_rdma_close(void)
2922 {
2923   if (oce_rdma_if != NULL) {
2924     oce_rdma_if = NULL;
2925   }
2926 }
2927 
2928 static void
2929 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2930 {
2931   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2932 }
2933 
2934 int
2935 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2936 {
2937   POCE_SOFTC sc;
2938   struct oce_dev_info di;
2939   int i;
2940 
2941   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2942     return -EINVAL;
2943   }
2944 
2945   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2946       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2947     return -ENXIO;
2948   }
2949 
2950   rdma_info->close = oce_rdma_close;
2951   rdma_info->mbox_post = oce_mbox_post;
2952   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2953   rdma_info->get_mac_addr = oce_get_mac_addr;
2954 
2955   oce_rdma_if = rdma_if;
2956 
2957   sc = softc_head;
2958   while (sc != NULL) {
2959     if (oce_rdma_if->announce != NULL) {
2960       memset(&di, 0, sizeof(di));
2961       di.dev = sc->dev;
2962       di.softc = sc;
2963       di.ifp = sc->ifp;
2964       di.db_bhandle = sc->db_bhandle;
2965       di.db_btag = sc->db_btag;
2966       di.db_page_size = 4096;
2967       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2968         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2969       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2970         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2971       } else {
2972         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2973       }
2974       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2975       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2976         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2977         di.msix.start_vector = sc->intr_count;
2978         for (i=0; i<di.msix.num_vectors; i++) {
2979           di.msix.vector_list[i] = sc->intrs[i].vector;
2980         }
2981       } else {
2982       }
2983       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2984       di.vendor_id = pci_get_vendor(sc->dev);
2985       di.dev_id = pci_get_device(sc->dev);
2986 
2987       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2988           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2989       }
2990 
2991       rdma_if->announce(&di);
2992       sc = sc->next;
2993     }
2994   }
2995 
2996   return 0;
2997 }
2998 
2999 static void
3000 oce_read_env_variables( POCE_SOFTC sc )
3001 {
3002 	char *value = NULL;
3003 	int rc = 0;
3004 
3005         /* read if user wants to enable hwlro or swlro */
3006         //value = getenv("oce_enable_hwlro");
3007         if(value && IS_SH(sc)) {
3008                 sc->enable_hwlro = strtol(value, NULL, 10);
3009                 if(sc->enable_hwlro) {
3010                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
3011                         if(rc) {
3012                                 device_printf(sc->dev, "no hardware lro support\n");
3013                 		device_printf(sc->dev, "software lro enabled\n");
3014                                 sc->enable_hwlro = 0;
3015                         }else {
3016                                 device_printf(sc->dev, "hardware lro enabled\n");
3017 				oce_max_rsp_handled = 32;
3018                         }
3019                 }else {
3020                         device_printf(sc->dev, "software lro enabled\n");
3021                 }
3022         }else {
3023                 sc->enable_hwlro = 0;
3024         }
3025 
3026         /* read mbuf size */
3027         //value = getenv("oce_rq_buf_size");
3028         if(value && IS_SH(sc)) {
3029                 oce_rq_buf_size = strtol(value, NULL, 10);
3030                 switch(oce_rq_buf_size) {
3031                 case 2048:
3032                 case 4096:
3033                 case 9216:
3034                 case 16384:
3035                         break;
3036 
3037                 default:
3038                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
3039                         oce_rq_buf_size = 2048;
3040                 }
3041         }
3042 
3043 	return;
3044 }
3045