xref: /freebsd/sys/dev/oce/oce_if.c (revision f976241773df2260e6170317080761d1c5814fe5)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 
136 /* Driver entry points prototypes */
137 static int  oce_probe(device_t dev);
138 static int  oce_attach(device_t dev);
139 static int  oce_detach(device_t dev);
140 static int  oce_shutdown(device_t dev);
141 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
142 static void oce_init(void *xsc);
143 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
144 static void oce_multiq_flush(struct ifnet *ifp);
145 
146 /* Driver interrupt routines protypes */
147 static void oce_intr(void *arg, int pending);
148 static int  oce_setup_intr(POCE_SOFTC sc);
149 static int  oce_fast_isr(void *arg);
150 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
151 			  void (*isr) (void *arg, int pending));
152 
153 /* Media callbacks prototypes */
154 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
155 static int  oce_media_change(struct ifnet *ifp);
156 
157 /* Transmit routines prototypes */
158 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
159 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
160 static void oce_process_tx_completion(struct oce_wq *wq);
161 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
162 				 struct oce_wq *wq);
163 
164 /* Receive routines prototypes */
165 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
167 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
168 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static uint16_t oce_rq_handler_lro(void *arg);
170 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
172 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
173 
174 /* Helper function prototypes in this file */
175 static int  oce_attach_ifp(POCE_SOFTC sc);
176 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
178 static int  oce_vid_config(POCE_SOFTC sc);
179 static void oce_mac_addr_set(POCE_SOFTC sc);
180 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
181 static void oce_local_timer(void *arg);
182 static void oce_if_deactivate(POCE_SOFTC sc);
183 static void oce_if_activate(POCE_SOFTC sc);
184 static void setup_max_queues_want(POCE_SOFTC sc);
185 static void update_queues_got(POCE_SOFTC sc);
186 static void process_link_state(POCE_SOFTC sc,
187 		 struct oce_async_cqe_link_state *acqe);
188 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
189 static void oce_get_config(POCE_SOFTC sc);
190 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
191 static void oce_read_env_variables(POCE_SOFTC sc);
192 
193 
194 /* IP specific */
195 #if defined(INET6) || defined(INET)
196 static int  oce_init_lro(POCE_SOFTC sc);
197 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
198 #endif
199 
200 static device_method_t oce_dispatch[] = {
201 	DEVMETHOD(device_probe, oce_probe),
202 	DEVMETHOD(device_attach, oce_attach),
203 	DEVMETHOD(device_detach, oce_detach),
204 	DEVMETHOD(device_shutdown, oce_shutdown),
205 
206 	DEVMETHOD_END
207 };
208 
209 static driver_t oce_driver = {
210 	"oce",
211 	oce_dispatch,
212 	sizeof(OCE_SOFTC)
213 };
214 static devclass_t oce_devclass;
215 
216 
217 /* global vars */
218 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
219 
220 /* Module capabilites and parameters */
221 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
222 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
223 uint32_t oce_rq_buf_size = 2048;
224 
225 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
226 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
227 
228 
229 /* Supported devices table */
230 static uint32_t supportedDevices[] =  {
231 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
232 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
233 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
234 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
235 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
236 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
237 };
238 
239 
240 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
241 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
242     nitems(supportedDevices));
243 MODULE_DEPEND(oce, pci, 1, 1, 1);
244 MODULE_DEPEND(oce, ether, 1, 1, 1);
245 MODULE_VERSION(oce, 1);
246 
247 
248 POCE_SOFTC softc_head = NULL;
249 POCE_SOFTC softc_tail = NULL;
250 
251 struct oce_rdma_if *oce_rdma_if = NULL;
252 
253 /*****************************************************************************
254  *			Driver entry points functions                        *
255  *****************************************************************************/
256 
257 static int
258 oce_probe(device_t dev)
259 {
260 	uint16_t vendor = 0;
261 	uint16_t device = 0;
262 	int i = 0;
263 	char str[256] = {0};
264 	POCE_SOFTC sc;
265 
266 	sc = device_get_softc(dev);
267 	bzero(sc, sizeof(OCE_SOFTC));
268 	sc->dev = dev;
269 
270 	vendor = pci_get_vendor(dev);
271 	device = pci_get_device(dev);
272 
273 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
274 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
275 			if (device == (supportedDevices[i] & 0xffff)) {
276 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
277 					component_revision);
278 				device_set_desc_copy(dev, str);
279 
280 				switch (device) {
281 				case PCI_PRODUCT_BE2:
282 					sc->flags |= OCE_FLAGS_BE2;
283 					break;
284 				case PCI_PRODUCT_BE3:
285 					sc->flags |= OCE_FLAGS_BE3;
286 					break;
287 				case PCI_PRODUCT_XE201:
288 				case PCI_PRODUCT_XE201_VF:
289 					sc->flags |= OCE_FLAGS_XE201;
290 					break;
291 				case PCI_PRODUCT_SH:
292 					sc->flags |= OCE_FLAGS_SH;
293 					break;
294 				default:
295 					return ENXIO;
296 				}
297 				return BUS_PROBE_DEFAULT;
298 			}
299 		}
300 	}
301 
302 	return ENXIO;
303 }
304 
305 
306 static int
307 oce_attach(device_t dev)
308 {
309 	POCE_SOFTC sc;
310 	int rc = 0;
311 
312 	sc = device_get_softc(dev);
313 
314 	rc = oce_hw_pci_alloc(sc);
315 	if (rc)
316 		return rc;
317 
318 	sc->tx_ring_size = OCE_TX_RING_SIZE;
319 	sc->rx_ring_size = OCE_RX_RING_SIZE;
320 	/* receive fragment size should be multiple of 2K */
321 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
322 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
323 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
324 
325 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
326 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
327 
328 	/* initialise the hardware */
329 	rc = oce_hw_init(sc);
330 	if (rc)
331 		goto pci_res_free;
332 
333 	oce_read_env_variables(sc);
334 
335 	oce_get_config(sc);
336 
337 	setup_max_queues_want(sc);
338 
339 	rc = oce_setup_intr(sc);
340 	if (rc)
341 		goto mbox_free;
342 
343 	rc = oce_queue_init_all(sc);
344 	if (rc)
345 		goto intr_free;
346 
347 	rc = oce_attach_ifp(sc);
348 	if (rc)
349 		goto queues_free;
350 
351 #if defined(INET6) || defined(INET)
352 	rc = oce_init_lro(sc);
353 	if (rc)
354 		goto ifp_free;
355 #endif
356 
357 	rc = oce_hw_start(sc);
358 	if (rc)
359 		goto lro_free;
360 
361 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
362 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
363 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
364 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
365 
366 	rc = oce_stats_init(sc);
367 	if (rc)
368 		goto vlan_free;
369 
370 	oce_add_sysctls(sc);
371 
372 	callout_init(&sc->timer, CALLOUT_MPSAFE);
373 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
374 	if (rc)
375 		goto stats_free;
376 
377 	sc->next =NULL;
378 	if (softc_tail != NULL) {
379 	  softc_tail->next = sc;
380 	} else {
381 	  softc_head = sc;
382 	}
383 	softc_tail = sc;
384 
385 	return 0;
386 
387 stats_free:
388 	callout_drain(&sc->timer);
389 	oce_stats_free(sc);
390 vlan_free:
391 	if (sc->vlan_attach)
392 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
393 	if (sc->vlan_detach)
394 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
395 	oce_hw_intr_disable(sc);
396 lro_free:
397 #if defined(INET6) || defined(INET)
398 	oce_free_lro(sc);
399 ifp_free:
400 #endif
401 	ether_ifdetach(sc->ifp);
402 	if_free(sc->ifp);
403 queues_free:
404 	oce_queue_release_all(sc);
405 intr_free:
406 	oce_intr_free(sc);
407 mbox_free:
408 	oce_dma_free(sc, &sc->bsmbx);
409 pci_res_free:
410 	oce_hw_pci_free(sc);
411 	LOCK_DESTROY(&sc->dev_lock);
412 	LOCK_DESTROY(&sc->bmbx_lock);
413 	return rc;
414 
415 }
416 
417 
418 static int
419 oce_detach(device_t dev)
420 {
421 	POCE_SOFTC sc = device_get_softc(dev);
422 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
423 
424         poce_sc_tmp = softc_head;
425         ppoce_sc_tmp1 = &softc_head;
426         while (poce_sc_tmp != NULL) {
427           if (poce_sc_tmp == sc) {
428             *ppoce_sc_tmp1 = sc->next;
429             if (sc->next == NULL) {
430               softc_tail = poce_sc_tmp2;
431             }
432             break;
433           }
434           poce_sc_tmp2 = poce_sc_tmp;
435           ppoce_sc_tmp1 = &poce_sc_tmp->next;
436           poce_sc_tmp = poce_sc_tmp->next;
437         }
438 
439 	LOCK(&sc->dev_lock);
440 	oce_if_deactivate(sc);
441 	UNLOCK(&sc->dev_lock);
442 
443 	callout_drain(&sc->timer);
444 
445 	if (sc->vlan_attach != NULL)
446 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
447 	if (sc->vlan_detach != NULL)
448 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
449 
450 	ether_ifdetach(sc->ifp);
451 
452 	if_free(sc->ifp);
453 
454 	oce_hw_shutdown(sc);
455 
456 	bus_generic_detach(dev);
457 
458 	return 0;
459 }
460 
461 
462 static int
463 oce_shutdown(device_t dev)
464 {
465 	int rc;
466 
467 	rc = oce_detach(dev);
468 
469 	return rc;
470 }
471 
472 
473 static int
474 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
475 {
476 	struct ifreq *ifr = (struct ifreq *)data;
477 	POCE_SOFTC sc = ifp->if_softc;
478 	struct ifi2creq i2c;
479 	uint8_t	offset = 0;
480 	int rc = 0;
481 	uint32_t u;
482 
483 	switch (command) {
484 
485 	case SIOCGIFMEDIA:
486 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
487 		break;
488 
489 	case SIOCSIFMTU:
490 		if (ifr->ifr_mtu > OCE_MAX_MTU)
491 			rc = EINVAL;
492 		else
493 			ifp->if_mtu = ifr->ifr_mtu;
494 		break;
495 
496 	case SIOCSIFFLAGS:
497 		if (ifp->if_flags & IFF_UP) {
498 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
499 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
500 				oce_init(sc);
501 			}
502 			device_printf(sc->dev, "Interface Up\n");
503 		} else {
504 			LOCK(&sc->dev_lock);
505 
506 			sc->ifp->if_drv_flags &=
507 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
508 			oce_if_deactivate(sc);
509 
510 			UNLOCK(&sc->dev_lock);
511 
512 			device_printf(sc->dev, "Interface Down\n");
513 		}
514 
515 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
516 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
517 				sc->promisc = TRUE;
518 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
519 			if (!oce_rxf_set_promiscuous(sc, 0))
520 				sc->promisc = FALSE;
521 		}
522 
523 		break;
524 
525 	case SIOCADDMULTI:
526 	case SIOCDELMULTI:
527 		rc = oce_hw_update_multicast(sc);
528 		if (rc)
529 			device_printf(sc->dev,
530 				"Update multicast address failed\n");
531 		break;
532 
533 	case SIOCSIFCAP:
534 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
535 
536 		if (u & IFCAP_TXCSUM) {
537 			ifp->if_capenable ^= IFCAP_TXCSUM;
538 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
539 
540 			if (IFCAP_TSO & ifp->if_capenable &&
541 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
542 				ifp->if_capenable &= ~IFCAP_TSO;
543 				ifp->if_hwassist &= ~CSUM_TSO;
544 				if_printf(ifp,
545 					 "TSO disabled due to -txcsum.\n");
546 			}
547 		}
548 
549 		if (u & IFCAP_RXCSUM)
550 			ifp->if_capenable ^= IFCAP_RXCSUM;
551 
552 		if (u & IFCAP_TSO4) {
553 			ifp->if_capenable ^= IFCAP_TSO4;
554 
555 			if (IFCAP_TSO & ifp->if_capenable) {
556 				if (IFCAP_TXCSUM & ifp->if_capenable)
557 					ifp->if_hwassist |= CSUM_TSO;
558 				else {
559 					ifp->if_capenable &= ~IFCAP_TSO;
560 					ifp->if_hwassist &= ~CSUM_TSO;
561 					if_printf(ifp,
562 					    "Enable txcsum first.\n");
563 					rc = EAGAIN;
564 				}
565 			} else
566 				ifp->if_hwassist &= ~CSUM_TSO;
567 		}
568 
569 		if (u & IFCAP_VLAN_HWTAGGING)
570 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
571 
572 		if (u & IFCAP_VLAN_HWFILTER) {
573 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
574 			oce_vid_config(sc);
575 		}
576 #if defined(INET6) || defined(INET)
577 		if (u & IFCAP_LRO) {
578 			ifp->if_capenable ^= IFCAP_LRO;
579 			if(sc->enable_hwlro) {
580 				if(ifp->if_capenable & IFCAP_LRO) {
581 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
582 				}else {
583 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
584 				}
585 			}
586 		}
587 #endif
588 
589 		break;
590 
591 	case SIOCGI2C:
592 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
593 		if (rc)
594 			break;
595 
596 		if (i2c.dev_addr == PAGE_NUM_A0) {
597 			offset = i2c.offset;
598 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
599 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
600 		} else {
601 			rc = EINVAL;
602 			break;
603 		}
604 
605 		if (i2c.len > sizeof(i2c.data) ||
606 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
607 			rc = EINVAL;
608 			break;
609 		}
610 
611 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
612 		if (rc) {
613 			rc = -rc;
614 			break;
615 		}
616 
617 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
618 
619 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
620 		break;
621 
622 	case SIOCGPRIVATE_0:
623 		rc = priv_check(curthread, PRIV_DRIVER);
624 		if (rc != 0)
625 			break;
626 		rc = oce_handle_passthrough(ifp, data);
627 		break;
628 	default:
629 		rc = ether_ioctl(ifp, command, data);
630 		break;
631 	}
632 
633 	return rc;
634 }
635 
636 
637 static void
638 oce_init(void *arg)
639 {
640 	POCE_SOFTC sc = arg;
641 
642 	LOCK(&sc->dev_lock);
643 
644 	if (sc->ifp->if_flags & IFF_UP) {
645 		oce_if_deactivate(sc);
646 		oce_if_activate(sc);
647 	}
648 
649 	UNLOCK(&sc->dev_lock);
650 
651 }
652 
653 
654 static int
655 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
656 {
657 	POCE_SOFTC sc = ifp->if_softc;
658 	struct oce_wq *wq = NULL;
659 	int queue_index = 0;
660 	int status = 0;
661 
662 	if (!sc->link_status)
663 		return ENXIO;
664 
665 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
666 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
667 
668 	wq = sc->wq[queue_index];
669 
670 	LOCK(&wq->tx_lock);
671 	status = oce_multiq_transmit(ifp, m, wq);
672 	UNLOCK(&wq->tx_lock);
673 
674 	return status;
675 
676 }
677 
678 
679 static void
680 oce_multiq_flush(struct ifnet *ifp)
681 {
682 	POCE_SOFTC sc = ifp->if_softc;
683 	struct mbuf     *m;
684 	int i = 0;
685 
686 	for (i = 0; i < sc->nwqs; i++) {
687 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
688 			m_freem(m);
689 	}
690 	if_qflush(ifp);
691 }
692 
693 
694 
695 /*****************************************************************************
696  *                   Driver interrupt routines functions                     *
697  *****************************************************************************/
698 
699 static void
700 oce_intr(void *arg, int pending)
701 {
702 
703 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
704 	POCE_SOFTC sc = ii->sc;
705 	struct oce_eq *eq = ii->eq;
706 	struct oce_eqe *eqe;
707 	struct oce_cq *cq = NULL;
708 	int i, num_eqes = 0;
709 
710 
711 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
712 				 BUS_DMASYNC_POSTWRITE);
713 	do {
714 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
715 		if (eqe->evnt == 0)
716 			break;
717 		eqe->evnt = 0;
718 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
719 					BUS_DMASYNC_POSTWRITE);
720 		RING_GET(eq->ring, 1);
721 		num_eqes++;
722 
723 	} while (TRUE);
724 
725 	if (!num_eqes)
726 		goto eq_arm; /* Spurious */
727 
728  	/* Clear EQ entries, but dont arm */
729 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
730 
731 	/* Process TX, RX and MCC. But dont arm CQ*/
732 	for (i = 0; i < eq->cq_valid; i++) {
733 		cq = eq->cq[i];
734 		(*cq->cq_handler)(cq->cb_arg);
735 	}
736 
737 	/* Arm all cqs connected to this EQ */
738 	for (i = 0; i < eq->cq_valid; i++) {
739 		cq = eq->cq[i];
740 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
741 	}
742 
743 eq_arm:
744 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
745 
746 	return;
747 }
748 
749 
750 static int
751 oce_setup_intr(POCE_SOFTC sc)
752 {
753 	int rc = 0, use_intx = 0;
754 	int vector = 0, req_vectors = 0;
755 	int tot_req_vectors, tot_vectors;
756 
757 	if (is_rss_enabled(sc))
758 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
759 	else
760 		req_vectors = 1;
761 
762 	tot_req_vectors = req_vectors;
763 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
764 	  if (req_vectors > 1) {
765 	    tot_req_vectors += OCE_RDMA_VECTORS;
766 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
767 	  }
768 	}
769 
770         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
771 		sc->intr_count = req_vectors;
772                 tot_vectors = tot_req_vectors;
773 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
774 		if (rc != 0) {
775 			use_intx = 1;
776 			pci_release_msi(sc->dev);
777 		} else {
778 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
779 		    if (tot_vectors < tot_req_vectors) {
780 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
781 			sc->roce_intr_count = (tot_vectors / 2);
782 		      }
783 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
784 		    }
785 		  } else {
786 		    sc->intr_count = tot_vectors;
787 		  }
788     		  sc->flags |= OCE_FLAGS_USING_MSIX;
789 		}
790 	} else
791 		use_intx = 1;
792 
793 	if (use_intx)
794 		sc->intr_count = 1;
795 
796 	/* Scale number of queues based on intr we got */
797 	update_queues_got(sc);
798 
799 	if (use_intx) {
800 		device_printf(sc->dev, "Using legacy interrupt\n");
801 		rc = oce_alloc_intr(sc, vector, oce_intr);
802 		if (rc)
803 			goto error;
804 	} else {
805 		for (; vector < sc->intr_count; vector++) {
806 			rc = oce_alloc_intr(sc, vector, oce_intr);
807 			if (rc)
808 				goto error;
809 		}
810 	}
811 
812 	return 0;
813 error:
814 	oce_intr_free(sc);
815 	return rc;
816 }
817 
818 
819 static int
820 oce_fast_isr(void *arg)
821 {
822 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
823 	POCE_SOFTC sc = ii->sc;
824 
825 	if (ii->eq == NULL)
826 		return FILTER_STRAY;
827 
828 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
829 
830 	taskqueue_enqueue(ii->tq, &ii->task);
831 
832  	ii->eq->intr++;
833 
834 	return FILTER_HANDLED;
835 }
836 
837 
838 static int
839 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
840 {
841 	POCE_INTR_INFO ii;
842 	int rc = 0, rr;
843 
844 	if (vector >= OCE_MAX_EQ)
845 		return (EINVAL);
846 
847 	ii = &sc->intrs[vector];
848 
849 	/* Set the resource id for the interrupt.
850 	 * MSIx is vector + 1 for the resource id,
851 	 * INTx is 0 for the resource id.
852 	 */
853 	if (sc->flags & OCE_FLAGS_USING_MSIX)
854 		rr = vector + 1;
855 	else
856 		rr = 0;
857 	ii->intr_res = bus_alloc_resource_any(sc->dev,
858 					      SYS_RES_IRQ,
859 					      &rr, RF_ACTIVE|RF_SHAREABLE);
860 	ii->irq_rr = rr;
861 	if (ii->intr_res == NULL) {
862 		device_printf(sc->dev,
863 			  "Could not allocate interrupt\n");
864 		rc = ENXIO;
865 		return rc;
866 	}
867 
868 	TASK_INIT(&ii->task, 0, isr, ii);
869 	ii->vector = vector;
870 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
871 	ii->tq = taskqueue_create_fast(ii->task_name,
872 			M_NOWAIT,
873 			taskqueue_thread_enqueue,
874 			&ii->tq);
875 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
876 			device_get_nameunit(sc->dev));
877 
878 	ii->sc = sc;
879 	rc = bus_setup_intr(sc->dev,
880 			ii->intr_res,
881 			INTR_TYPE_NET,
882 			oce_fast_isr, NULL, ii, &ii->tag);
883 	return rc;
884 
885 }
886 
887 
888 void
889 oce_intr_free(POCE_SOFTC sc)
890 {
891 	int i = 0;
892 
893 	for (i = 0; i < sc->intr_count; i++) {
894 
895 		if (sc->intrs[i].tag != NULL)
896 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
897 						sc->intrs[i].tag);
898 		if (sc->intrs[i].tq != NULL)
899 			taskqueue_free(sc->intrs[i].tq);
900 
901 		if (sc->intrs[i].intr_res != NULL)
902 			bus_release_resource(sc->dev, SYS_RES_IRQ,
903 						sc->intrs[i].irq_rr,
904 						sc->intrs[i].intr_res);
905 		sc->intrs[i].tag = NULL;
906 		sc->intrs[i].intr_res = NULL;
907 	}
908 
909 	if (sc->flags & OCE_FLAGS_USING_MSIX)
910 		pci_release_msi(sc->dev);
911 
912 }
913 
914 
915 
916 /******************************************************************************
917 *			  Media callbacks functions 			      *
918 ******************************************************************************/
919 
920 static void
921 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
922 {
923 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
924 
925 
926 	req->ifm_status = IFM_AVALID;
927 	req->ifm_active = IFM_ETHER;
928 
929 	if (sc->link_status == 1)
930 		req->ifm_status |= IFM_ACTIVE;
931 	else
932 		return;
933 
934 	switch (sc->link_speed) {
935 	case 1: /* 10 Mbps */
936 		req->ifm_active |= IFM_10_T | IFM_FDX;
937 		sc->speed = 10;
938 		break;
939 	case 2: /* 100 Mbps */
940 		req->ifm_active |= IFM_100_TX | IFM_FDX;
941 		sc->speed = 100;
942 		break;
943 	case 3: /* 1 Gbps */
944 		req->ifm_active |= IFM_1000_T | IFM_FDX;
945 		sc->speed = 1000;
946 		break;
947 	case 4: /* 10 Gbps */
948 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
949 		sc->speed = 10000;
950 		break;
951 	case 5: /* 20 Gbps */
952 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
953 		sc->speed = 20000;
954 		break;
955 	case 6: /* 25 Gbps */
956 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
957 		sc->speed = 25000;
958 		break;
959 	case 7: /* 40 Gbps */
960 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
961 		sc->speed = 40000;
962 		break;
963 	default:
964 		sc->speed = 0;
965 		break;
966 	}
967 
968 	return;
969 }
970 
971 
972 int
973 oce_media_change(struct ifnet *ifp)
974 {
975 	return 0;
976 }
977 
978 
979 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
980 				struct mbuf *m, boolean_t *os2bmc,
981 				struct mbuf **m_new)
982 {
983 	struct ether_header *eh = NULL;
984 
985 	eh = mtod(m, struct ether_header *);
986 
987 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
988 		*os2bmc = FALSE;
989 		goto done;
990 	}
991 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
992 		goto done;
993 
994 	if (is_mc_allowed_on_bmc(sc, eh) ||
995 	    is_bc_allowed_on_bmc(sc, eh) ||
996 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
997 		*os2bmc = TRUE;
998 		goto done;
999 	}
1000 
1001 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
1002 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1003 		uint8_t nexthdr = ip6->ip6_nxt;
1004 		if (nexthdr == IPPROTO_ICMPV6) {
1005 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
1006 			switch (icmp6->icmp6_type) {
1007 			case ND_ROUTER_ADVERT:
1008 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
1009 				goto done;
1010 			case ND_NEIGHBOR_ADVERT:
1011 				*os2bmc = is_ipv6_na_filt_enabled(sc);
1012 				goto done;
1013 			default:
1014 				break;
1015 			}
1016 		}
1017 	}
1018 
1019 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
1020 		struct ip *ip = mtod(m, struct ip *);
1021 		int iphlen = ip->ip_hl << 2;
1022 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
1023 		switch (uh->uh_dport) {
1024 		case DHCP_CLIENT_PORT:
1025 			*os2bmc = is_dhcp_client_filt_enabled(sc);
1026 			goto done;
1027 		case DHCP_SERVER_PORT:
1028 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1029 			goto done;
1030 		case NET_BIOS_PORT1:
1031 		case NET_BIOS_PORT2:
1032 			*os2bmc = is_nbios_filt_enabled(sc);
1033 			goto done;
1034 		case DHCPV6_RAS_PORT:
1035 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1036 			goto done;
1037 		default:
1038 			break;
1039 		}
1040 	}
1041 done:
1042 	if (*os2bmc) {
1043 		*m_new = m_dup(m, M_NOWAIT);
1044 		if (!*m_new) {
1045 			*os2bmc = FALSE;
1046 			return;
1047 		}
1048 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1049 	}
1050 }
1051 
1052 
1053 
1054 /*****************************************************************************
1055  *			  Transmit routines functions			     *
1056  *****************************************************************************/
1057 
1058 static int
1059 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1060 {
1061 	int rc = 0, i, retry_cnt = 0;
1062 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1063 	struct mbuf *m, *m_temp, *m_new = NULL;
1064 	struct oce_wq *wq = sc->wq[wq_index];
1065 	struct oce_packet_desc *pd;
1066 	struct oce_nic_hdr_wqe *nichdr;
1067 	struct oce_nic_frag_wqe *nicfrag;
1068 	struct ether_header *eh = NULL;
1069 	int num_wqes;
1070 	uint32_t reg_value;
1071 	boolean_t complete = TRUE;
1072 	boolean_t os2bmc = FALSE;
1073 
1074 	m = *mpp;
1075 	if (!m)
1076 		return EINVAL;
1077 
1078 	if (!(m->m_flags & M_PKTHDR)) {
1079 		rc = ENXIO;
1080 		goto free_ret;
1081 	}
1082 
1083 	/* Don't allow non-TSO packets longer than MTU */
1084 	if (!is_tso_pkt(m)) {
1085 		eh = mtod(m, struct ether_header *);
1086 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1087 			 goto free_ret;
1088 	}
1089 
1090 	if(oce_tx_asic_stall_verify(sc, m)) {
1091 		m = oce_insert_vlan_tag(sc, m, &complete);
1092 		if(!m) {
1093 			device_printf(sc->dev, "Insertion unsuccessful\n");
1094 			return 0;
1095 		}
1096 
1097 	}
1098 
1099 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1100 	 * may cause a transmit stall on that port. So the work-around is to
1101 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1102 	*/
1103 	if(IS_SH(sc) || IS_XE201(sc) ) {
1104 		if(m->m_pkthdr.len <= 32) {
1105 			char buf[36];
1106 			bzero((void *)buf, 36);
1107 			m_append(m, (36 - m->m_pkthdr.len), buf);
1108 		}
1109 	}
1110 
1111 tx_start:
1112 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1113 		/* consolidate packet buffers for TSO/LSO segment offload */
1114 #if defined(INET6) || defined(INET)
1115 		m = oce_tso_setup(sc, mpp);
1116 #else
1117 		m = NULL;
1118 #endif
1119 		if (m == NULL) {
1120 			rc = ENXIO;
1121 			goto free_ret;
1122 		}
1123 	}
1124 
1125 
1126 	pd = &wq->pckts[wq->pkt_desc_head];
1127 
1128 retry:
1129 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1130 				     pd->map,
1131 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1132 	if (rc == 0) {
1133 		num_wqes = pd->nsegs + 1;
1134 		if (IS_BE(sc) || IS_SH(sc)) {
1135 			/*Dummy required only for BE3.*/
1136 			if (num_wqes & 1)
1137 				num_wqes++;
1138 		}
1139 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1140 			bus_dmamap_unload(wq->tag, pd->map);
1141 			return EBUSY;
1142 		}
1143 		atomic_store_rel_int(&wq->pkt_desc_head,
1144 				     (wq->pkt_desc_head + 1) % \
1145 				      OCE_WQ_PACKET_ARRAY_SIZE);
1146 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1147 		pd->mbuf = m;
1148 
1149 		nichdr =
1150 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1151 		nichdr->u0.dw[0] = 0;
1152 		nichdr->u0.dw[1] = 0;
1153 		nichdr->u0.dw[2] = 0;
1154 		nichdr->u0.dw[3] = 0;
1155 
1156 		nichdr->u0.s.complete = complete;
1157 		nichdr->u0.s.mgmt = os2bmc;
1158 		nichdr->u0.s.event = 1;
1159 		nichdr->u0.s.crc = 1;
1160 		nichdr->u0.s.forward = 0;
1161 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1162 		nichdr->u0.s.udpcs =
1163 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1164 		nichdr->u0.s.tcpcs =
1165 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1166 		nichdr->u0.s.num_wqe = num_wqes;
1167 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1168 
1169 		if (m->m_flags & M_VLANTAG) {
1170 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1171 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1172 		}
1173 
1174 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1175 			if (m->m_pkthdr.tso_segsz) {
1176 				nichdr->u0.s.lso = 1;
1177 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1178 			}
1179 			if (!IS_BE(sc) || !IS_SH(sc))
1180 				nichdr->u0.s.ipcs = 1;
1181 		}
1182 
1183 		RING_PUT(wq->ring, 1);
1184 		atomic_add_int(&wq->ring->num_used, 1);
1185 
1186 		for (i = 0; i < pd->nsegs; i++) {
1187 			nicfrag =
1188 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1189 						      struct oce_nic_frag_wqe);
1190 			nicfrag->u0.s.rsvd0 = 0;
1191 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1192 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1193 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1194 			pd->wqe_idx = wq->ring->pidx;
1195 			RING_PUT(wq->ring, 1);
1196 			atomic_add_int(&wq->ring->num_used, 1);
1197 		}
1198 		if (num_wqes > (pd->nsegs + 1)) {
1199 			nicfrag =
1200 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1201 						      struct oce_nic_frag_wqe);
1202 			nicfrag->u0.dw[0] = 0;
1203 			nicfrag->u0.dw[1] = 0;
1204 			nicfrag->u0.dw[2] = 0;
1205 			nicfrag->u0.dw[3] = 0;
1206 			pd->wqe_idx = wq->ring->pidx;
1207 			RING_PUT(wq->ring, 1);
1208 			atomic_add_int(&wq->ring->num_used, 1);
1209 			pd->nsegs++;
1210 		}
1211 
1212 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1213 		wq->tx_stats.tx_reqs++;
1214 		wq->tx_stats.tx_wrbs += num_wqes;
1215 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1216 		wq->tx_stats.tx_pkts++;
1217 
1218 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1219 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1220 		reg_value = (num_wqes << 16) | wq->wq_id;
1221 
1222 		/* if os2bmc is not enabled or if the pkt is already tagged as
1223 		   bmc, do nothing
1224 		 */
1225 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1226 
1227 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1228 
1229 	} else if (rc == EFBIG)	{
1230 		if (retry_cnt == 0) {
1231 			m_temp = m_defrag(m, M_NOWAIT);
1232 			if (m_temp == NULL)
1233 				goto free_ret;
1234 			m = m_temp;
1235 			*mpp = m_temp;
1236 			retry_cnt = retry_cnt + 1;
1237 			goto retry;
1238 		} else
1239 			goto free_ret;
1240 	} else if (rc == ENOMEM)
1241 		return rc;
1242 	else
1243 		goto free_ret;
1244 
1245 	if (os2bmc) {
1246 		m = m_new;
1247 		goto tx_start;
1248 	}
1249 
1250 	return 0;
1251 
1252 free_ret:
1253 	m_freem(*mpp);
1254 	*mpp = NULL;
1255 	return rc;
1256 }
1257 
1258 
1259 static void
1260 oce_process_tx_completion(struct oce_wq *wq)
1261 {
1262 	struct oce_packet_desc *pd;
1263 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1264 	struct mbuf *m;
1265 
1266 	pd = &wq->pckts[wq->pkt_desc_tail];
1267 	atomic_store_rel_int(&wq->pkt_desc_tail,
1268 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1269 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1270 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1271 	bus_dmamap_unload(wq->tag, pd->map);
1272 
1273 	m = pd->mbuf;
1274 	m_freem(m);
1275 	pd->mbuf = NULL;
1276 
1277 
1278 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1279 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1280 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1281 			oce_tx_restart(sc, wq);
1282 		}
1283 	}
1284 }
1285 
1286 
1287 static void
1288 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1289 {
1290 
1291 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1292 		return;
1293 
1294 #if __FreeBSD_version >= 800000
1295 	if (!drbr_empty(sc->ifp, wq->br))
1296 #else
1297 	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1298 #endif
1299 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1300 
1301 }
1302 
1303 
1304 #if defined(INET6) || defined(INET)
1305 static struct mbuf *
1306 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1307 {
1308 	struct mbuf *m;
1309 #ifdef INET
1310 	struct ip *ip;
1311 #endif
1312 #ifdef INET6
1313 	struct ip6_hdr *ip6;
1314 #endif
1315 	struct ether_vlan_header *eh;
1316 	struct tcphdr *th;
1317 	uint16_t etype;
1318 	int total_len = 0, ehdrlen = 0;
1319 
1320 	m = *mpp;
1321 
1322 	if (M_WRITABLE(m) == 0) {
1323 		m = m_dup(*mpp, M_NOWAIT);
1324 		if (!m)
1325 			return NULL;
1326 		m_freem(*mpp);
1327 		*mpp = m;
1328 	}
1329 
1330 	eh = mtod(m, struct ether_vlan_header *);
1331 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1332 		etype = ntohs(eh->evl_proto);
1333 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1334 	} else {
1335 		etype = ntohs(eh->evl_encap_proto);
1336 		ehdrlen = ETHER_HDR_LEN;
1337 	}
1338 
1339 	switch (etype) {
1340 #ifdef INET
1341 	case ETHERTYPE_IP:
1342 		ip = (struct ip *)(m->m_data + ehdrlen);
1343 		if (ip->ip_p != IPPROTO_TCP)
1344 			return NULL;
1345 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1346 
1347 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1348 		break;
1349 #endif
1350 #ifdef INET6
1351 	case ETHERTYPE_IPV6:
1352 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1353 		if (ip6->ip6_nxt != IPPROTO_TCP)
1354 			return NULL;
1355 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1356 
1357 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1358 		break;
1359 #endif
1360 	default:
1361 		return NULL;
1362 	}
1363 
1364 	m = m_pullup(m, total_len);
1365 	if (!m)
1366 		return NULL;
1367 	*mpp = m;
1368 	return m;
1369 
1370 }
1371 #endif /* INET6 || INET */
1372 
1373 void
1374 oce_tx_task(void *arg, int npending)
1375 {
1376 	struct oce_wq *wq = arg;
1377 	POCE_SOFTC sc = wq->parent;
1378 	struct ifnet *ifp = sc->ifp;
1379 	int rc = 0;
1380 
1381 #if __FreeBSD_version >= 800000
1382 	LOCK(&wq->tx_lock);
1383 	rc = oce_multiq_transmit(ifp, NULL, wq);
1384 	if (rc) {
1385 		device_printf(sc->dev,
1386 				"TX[%d] restart failed\n", wq->queue_index);
1387 	}
1388 	UNLOCK(&wq->tx_lock);
1389 #else
1390 	oce_start(ifp);
1391 #endif
1392 
1393 }
1394 
1395 
1396 void
1397 oce_start(struct ifnet *ifp)
1398 {
1399 	POCE_SOFTC sc = ifp->if_softc;
1400 	struct mbuf *m;
1401 	int rc = 0;
1402 	int def_q = 0; /* Defualt tx queue is 0*/
1403 
1404 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1405 			IFF_DRV_RUNNING)
1406 		return;
1407 
1408 	if (!sc->link_status)
1409 		return;
1410 
1411 	do {
1412 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1413 		if (m == NULL)
1414 			break;
1415 
1416 		LOCK(&sc->wq[def_q]->tx_lock);
1417 		rc = oce_tx(sc, &m, def_q);
1418 		UNLOCK(&sc->wq[def_q]->tx_lock);
1419 		if (rc) {
1420 			if (m != NULL) {
1421 				sc->wq[def_q]->tx_stats.tx_stops ++;
1422 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1423 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1424 				m = NULL;
1425 			}
1426 			break;
1427 		}
1428 		if (m != NULL)
1429 			ETHER_BPF_MTAP(ifp, m);
1430 
1431 	} while (TRUE);
1432 
1433 	return;
1434 }
1435 
1436 
1437 /* Handle the Completion Queue for transmit */
1438 uint16_t
1439 oce_wq_handler(void *arg)
1440 {
1441 	struct oce_wq *wq = (struct oce_wq *)arg;
1442 	POCE_SOFTC sc = wq->parent;
1443 	struct oce_cq *cq = wq->cq;
1444 	struct oce_nic_tx_cqe *cqe;
1445 	int num_cqes = 0;
1446 
1447 	LOCK(&wq->tx_compl_lock);
1448 	bus_dmamap_sync(cq->ring->dma.tag,
1449 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1450 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1451 	while (cqe->u0.dw[3]) {
1452 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1453 
1454 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1455 		if (wq->ring->cidx >= wq->ring->num_items)
1456 			wq->ring->cidx -= wq->ring->num_items;
1457 
1458 		oce_process_tx_completion(wq);
1459 		wq->tx_stats.tx_compl++;
1460 		cqe->u0.dw[3] = 0;
1461 		RING_GET(cq->ring, 1);
1462 		bus_dmamap_sync(cq->ring->dma.tag,
1463 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1464 		cqe =
1465 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1466 		num_cqes++;
1467 	}
1468 
1469 	if (num_cqes)
1470 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1471 
1472 	UNLOCK(&wq->tx_compl_lock);
1473 	return num_cqes;
1474 }
1475 
1476 
1477 static int
1478 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1479 {
1480 	POCE_SOFTC sc = ifp->if_softc;
1481 	int status = 0, queue_index = 0;
1482 	struct mbuf *next = NULL;
1483 	struct buf_ring *br = NULL;
1484 
1485 	br  = wq->br;
1486 	queue_index = wq->queue_index;
1487 
1488 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1489 		IFF_DRV_RUNNING) {
1490 		if (m != NULL)
1491 			status = drbr_enqueue(ifp, br, m);
1492 		return status;
1493 	}
1494 
1495 	if (m != NULL) {
1496 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1497 			return status;
1498 	}
1499 	while ((next = drbr_peek(ifp, br)) != NULL) {
1500 		if (oce_tx(sc, &next, queue_index)) {
1501 			if (next == NULL) {
1502 				drbr_advance(ifp, br);
1503 			} else {
1504 				drbr_putback(ifp, br, next);
1505 				wq->tx_stats.tx_stops ++;
1506 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1507 			}
1508 			break;
1509 		}
1510 		drbr_advance(ifp, br);
1511 		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1512 		if (next->m_flags & M_MCAST)
1513 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1514 		ETHER_BPF_MTAP(ifp, next);
1515 	}
1516 
1517 	return 0;
1518 }
1519 
1520 
1521 
1522 
1523 /*****************************************************************************
1524  *			    Receive  routines functions 		     *
1525  *****************************************************************************/
1526 
1527 static void
1528 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1529 {
1530 	uint32_t *p;
1531         struct ether_header *eh = NULL;
1532         struct tcphdr *tcp_hdr = NULL;
1533         struct ip *ip4_hdr = NULL;
1534         struct ip6_hdr *ip6 = NULL;
1535         uint32_t payload_len = 0;
1536 
1537         eh = mtod(m, struct ether_header *);
1538         /* correct IP header */
1539         if(!cqe2->ipv6_frame) {
1540 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1541                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1542                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1543                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1544         }else {
1545         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1546                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1547                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1548                                                 - sizeof(struct ip6_hdr);
1549                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1550                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1551         }
1552 
1553         /* correct tcp header */
1554         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1555         if(cqe2->push) {
1556         	tcp_hdr->th_flags |= TH_PUSH;
1557         }
1558         tcp_hdr->th_win = htons(cqe2->tcp_window);
1559         tcp_hdr->th_sum = 0xffff;
1560         if(cqe2->ts_opt) {
1561                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1562                 *p = cqe1->tcp_timestamp_val;
1563                 *(p+1) = cqe1->tcp_timestamp_ecr;
1564         }
1565 
1566 	return;
1567 }
1568 
1569 static void
1570 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1571 {
1572 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1573         uint32_t i = 0, frag_len = 0;
1574 	uint32_t len = cqe_info->pkt_size;
1575         struct oce_packet_desc *pd;
1576         struct mbuf *tail = NULL;
1577 
1578         for (i = 0; i < cqe_info->num_frags; i++) {
1579                 if (rq->ring->cidx == rq->ring->pidx) {
1580                         device_printf(sc->dev,
1581                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1582                         return;
1583                 }
1584                 pd = &rq->pckts[rq->ring->cidx];
1585 
1586                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1587                 bus_dmamap_unload(rq->tag, pd->map);
1588 		RING_GET(rq->ring, 1);
1589                 rq->pending--;
1590 
1591                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1592                 pd->mbuf->m_len = frag_len;
1593 
1594                 if (tail != NULL) {
1595                         /* additional fragments */
1596                         pd->mbuf->m_flags &= ~M_PKTHDR;
1597                         tail->m_next = pd->mbuf;
1598 			if(rq->islro)
1599                         	tail->m_nextpkt = NULL;
1600                         tail = pd->mbuf;
1601                 } else {
1602                         /* first fragment, fill out much of the packet header */
1603                         pd->mbuf->m_pkthdr.len = len;
1604 			if(rq->islro)
1605                         	pd->mbuf->m_nextpkt = NULL;
1606                         pd->mbuf->m_pkthdr.csum_flags = 0;
1607                         if (IF_CSUM_ENABLED(sc)) {
1608                                 if (cqe_info->l4_cksum_pass) {
1609                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1610                                                 pd->mbuf->m_pkthdr.csum_flags |=
1611                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1612                                         }else { /* IPV6 frame */
1613 						if(rq->islro) {
1614                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1615                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1616 						}
1617                                         }
1618                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1619                                 }
1620                                 if (cqe_info->ip_cksum_pass) {
1621                                         pd->mbuf->m_pkthdr.csum_flags |=
1622                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1623                                 }
1624                         }
1625                         *m = tail = pd->mbuf;
1626                }
1627                 pd->mbuf = NULL;
1628                 len -= frag_len;
1629         }
1630 
1631         return;
1632 }
1633 
1634 static void
1635 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1636 {
1637         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1638         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1639         struct mbuf *m = NULL;
1640 	struct oce_common_cqe_info cq_info;
1641 
1642 	/* parse cqe */
1643         if(cqe2 == NULL) {
1644                 cq_info.pkt_size =  cqe->pkt_size;
1645                 cq_info.vtag = cqe->vlan_tag;
1646                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1647                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1648                 cq_info.ipv6_frame = cqe->ipv6_frame;
1649                 cq_info.vtp = cqe->vtp;
1650                 cq_info.qnq = cqe->qnq;
1651         }else {
1652                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1653                 cq_info.pkt_size =  cqe2->coalesced_size;
1654                 cq_info.vtag = cqe2->vlan_tag;
1655                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1656                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1657                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1658                 cq_info.vtp = cqe2->vtp;
1659                 cq_info.qnq = cqe1->qnq;
1660         }
1661 
1662 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1663 
1664         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1665         if(cq_info.pkt_size % rq->cfg.frag_size)
1666                 cq_info.num_frags++;
1667 
1668 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1669 
1670 	if (m) {
1671 		if(cqe2) {
1672 			//assert(cqe2->valid != 0);
1673 
1674 			//assert(cqe2->cqe_type != 2);
1675 			oce_correct_header(m, cqe1, cqe2);
1676 		}
1677 
1678 		m->m_pkthdr.rcvif = sc->ifp;
1679 #if __FreeBSD_version >= 800000
1680 		if (rq->queue_index)
1681 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1682 		else
1683 			m->m_pkthdr.flowid = rq->queue_index;
1684 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1685 #endif
1686 		/* This deternies if vlan tag is Valid */
1687 		if (cq_info.vtp) {
1688 			if (sc->function_mode & FNM_FLEX10_MODE) {
1689 				/* FLEX10. If QnQ is not set, neglect VLAN */
1690 				if (cq_info.qnq) {
1691 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1692 					m->m_flags |= M_VLANTAG;
1693 				}
1694 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1695 				/* In UMC mode generally pvid will be striped by
1696 				   hw. But in some cases we have seen it comes
1697 				   with pvid. So if pvid == vlan, neglect vlan.
1698 				 */
1699 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1700 				m->m_flags |= M_VLANTAG;
1701 			}
1702 		}
1703 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1704 
1705 		(*sc->ifp->if_input) (sc->ifp, m);
1706 
1707 		/* Update rx stats per queue */
1708 		rq->rx_stats.rx_pkts++;
1709 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1710 		rq->rx_stats.rx_frags += cq_info.num_frags;
1711 		rq->rx_stats.rx_ucast_pkts++;
1712 	}
1713         return;
1714 }
1715 
1716 static void
1717 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1718 {
1719 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1720 	int len;
1721 	struct mbuf *m = NULL;
1722 	struct oce_common_cqe_info cq_info;
1723 	uint16_t vtag = 0;
1724 
1725 	/* Is it a flush compl that has no data */
1726 	if(!cqe->u0.s.num_fragments)
1727 		goto exit;
1728 
1729 	len = cqe->u0.s.pkt_size;
1730 	if (!len) {
1731 		/*partial DMA workaround for Lancer*/
1732 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1733 		goto exit;
1734 	}
1735 
1736 	if (!oce_cqe_portid_valid(sc, cqe)) {
1737 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1738 		goto exit;
1739 	}
1740 
1741 	 /* Get vlan_tag value */
1742 	if(IS_BE(sc) || IS_SH(sc))
1743 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1744 	else
1745 		vtag = cqe->u0.s.vlan_tag;
1746 
1747 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1748 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1749 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1750 	cq_info.num_frags = cqe->u0.s.num_fragments;
1751 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1752 
1753 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1754 
1755 	if (m) {
1756 		m->m_pkthdr.rcvif = sc->ifp;
1757 #if __FreeBSD_version >= 800000
1758 		if (rq->queue_index)
1759 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1760 		else
1761 			m->m_pkthdr.flowid = rq->queue_index;
1762 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1763 #endif
1764 		/* This deternies if vlan tag is Valid */
1765 		if (oce_cqe_vtp_valid(sc, cqe)) {
1766 			if (sc->function_mode & FNM_FLEX10_MODE) {
1767 				/* FLEX10. If QnQ is not set, neglect VLAN */
1768 				if (cqe->u0.s.qnq) {
1769 					m->m_pkthdr.ether_vtag = vtag;
1770 					m->m_flags |= M_VLANTAG;
1771 				}
1772 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1773 				/* In UMC mode generally pvid will be striped by
1774 				   hw. But in some cases we have seen it comes
1775 				   with pvid. So if pvid == vlan, neglect vlan.
1776 				*/
1777 				m->m_pkthdr.ether_vtag = vtag;
1778 				m->m_flags |= M_VLANTAG;
1779 			}
1780 		}
1781 
1782 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1783 #if defined(INET6) || defined(INET)
1784 		/* Try to queue to LRO */
1785 		if (IF_LRO_ENABLED(sc) &&
1786 		    (cqe->u0.s.ip_cksum_pass) &&
1787 		    (cqe->u0.s.l4_cksum_pass) &&
1788 		    (!cqe->u0.s.ip_ver)       &&
1789 		    (rq->lro.lro_cnt != 0)) {
1790 
1791 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1792 				rq->lro_pkts_queued ++;
1793 				goto post_done;
1794 			}
1795 			/* If LRO posting fails then try to post to STACK */
1796 		}
1797 #endif
1798 
1799 		(*sc->ifp->if_input) (sc->ifp, m);
1800 #if defined(INET6) || defined(INET)
1801 post_done:
1802 #endif
1803 		/* Update rx stats per queue */
1804 		rq->rx_stats.rx_pkts++;
1805 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1806 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1807 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1808 			rq->rx_stats.rx_mcast_pkts++;
1809 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1810 			rq->rx_stats.rx_ucast_pkts++;
1811 	}
1812 exit:
1813 	return;
1814 }
1815 
1816 
1817 void
1818 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1819 {
1820 	uint32_t i = 0;
1821 	struct oce_packet_desc *pd;
1822 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1823 
1824 	for (i = 0; i < num_frags; i++) {
1825                 if (rq->ring->cidx == rq->ring->pidx) {
1826                         device_printf(sc->dev,
1827                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1828                         return;
1829                 }
1830                 pd = &rq->pckts[rq->ring->cidx];
1831                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1832                 bus_dmamap_unload(rq->tag, pd->map);
1833                 if (pd->mbuf != NULL) {
1834                         m_freem(pd->mbuf);
1835                         pd->mbuf = NULL;
1836                 }
1837 
1838 		RING_GET(rq->ring, 1);
1839                 rq->pending--;
1840 	}
1841 }
1842 
1843 
1844 static int
1845 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1846 {
1847 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1848 	int vtp = 0;
1849 
1850 	if (sc->be3_native) {
1851 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1852 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1853 	} else
1854 		vtp = cqe->u0.s.vlan_tag_present;
1855 
1856 	return vtp;
1857 
1858 }
1859 
1860 
1861 static int
1862 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1863 {
1864 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1865 	int port_id = 0;
1866 
1867 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1868 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1869 		port_id =  cqe_v1->u0.s.port;
1870 		if (sc->port_id != port_id)
1871 			return 0;
1872 	} else
1873 		;/* For BE3 legacy and Lancer this is dummy */
1874 
1875 	return 1;
1876 
1877 }
1878 
1879 #if defined(INET6) || defined(INET)
1880 void
1881 oce_rx_flush_lro(struct oce_rq *rq)
1882 {
1883 	struct lro_ctrl	*lro = &rq->lro;
1884 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1885 
1886 	if (!IF_LRO_ENABLED(sc))
1887 		return;
1888 
1889 	tcp_lro_flush_all(lro);
1890 	rq->lro_pkts_queued = 0;
1891 
1892 	return;
1893 }
1894 
1895 
1896 static int
1897 oce_init_lro(POCE_SOFTC sc)
1898 {
1899 	struct lro_ctrl *lro = NULL;
1900 	int i = 0, rc = 0;
1901 
1902 	for (i = 0; i < sc->nrqs; i++) {
1903 		lro = &sc->rq[i]->lro;
1904 		rc = tcp_lro_init(lro);
1905 		if (rc != 0) {
1906 			device_printf(sc->dev, "LRO init failed\n");
1907 			return rc;
1908 		}
1909 		lro->ifp = sc->ifp;
1910 	}
1911 
1912 	return rc;
1913 }
1914 
1915 
1916 void
1917 oce_free_lro(POCE_SOFTC sc)
1918 {
1919 	struct lro_ctrl *lro = NULL;
1920 	int i = 0;
1921 
1922 	for (i = 0; i < sc->nrqs; i++) {
1923 		lro = &sc->rq[i]->lro;
1924 		if (lro)
1925 			tcp_lro_free(lro);
1926 	}
1927 }
1928 #endif
1929 
1930 int
1931 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1932 {
1933 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1934 	int i, in, rc;
1935 	struct oce_packet_desc *pd;
1936 	bus_dma_segment_t segs[6];
1937 	int nsegs, added = 0;
1938 	struct oce_nic_rqe *rqe;
1939 	pd_rxulp_db_t rxdb_reg;
1940 	uint32_t val = 0;
1941 	uint32_t oce_max_rq_posts = 64;
1942 
1943 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1944 	for (i = 0; i < count; i++) {
1945 		in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1946 
1947 		pd = &rq->pckts[rq->ring->pidx];
1948 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1949 		if (pd->mbuf == NULL) {
1950 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1951 			break;
1952 		}
1953 		pd->mbuf->m_nextpkt = NULL;
1954 
1955 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1956 
1957 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1958 					     pd->map,
1959 					     pd->mbuf,
1960 					     segs, &nsegs, BUS_DMA_NOWAIT);
1961 		if (rc) {
1962 			m_free(pd->mbuf);
1963 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1964 			break;
1965 		}
1966 
1967 		if (nsegs != 1) {
1968 			i--;
1969 			continue;
1970 		}
1971 
1972 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1973 
1974 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1975 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1976 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1977 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1978 		RING_PUT(rq->ring, 1);
1979 		added++;
1980 		rq->pending++;
1981 	}
1982 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1983 	if (added != 0) {
1984 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1985 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1986 			rxdb_reg.bits.qid = rq->rq_id;
1987 			if(rq->islro) {
1988                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1989                                 val |= oce_max_rq_posts << 16;
1990                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1991 			}else {
1992 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1993 			}
1994 			added -= oce_max_rq_posts;
1995 		}
1996 		if (added > 0) {
1997 			rxdb_reg.bits.qid = rq->rq_id;
1998 			rxdb_reg.bits.num_posted = added;
1999 			if(rq->islro) {
2000                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
2001                                 val |= added << 16;
2002                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
2003 			}else {
2004 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
2005 			}
2006 		}
2007 	}
2008 
2009 	return 0;
2010 }
2011 
2012 static void
2013 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
2014 {
2015         if (num_cqes) {
2016                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
2017 		if(!sc->enable_hwlro) {
2018 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
2019 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
2020 		}else {
2021                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
2022                         	oce_alloc_rx_bufs(rq, 64);
2023         	}
2024 	}
2025 
2026         return;
2027 }
2028 
2029 uint16_t
2030 oce_rq_handler_lro(void *arg)
2031 {
2032         struct oce_rq *rq = (struct oce_rq *)arg;
2033         struct oce_cq *cq = rq->cq;
2034         POCE_SOFTC sc = rq->parent;
2035         struct nic_hwlro_singleton_cqe *cqe;
2036         struct nic_hwlro_cqe_part2 *cqe2;
2037         int num_cqes = 0;
2038 
2039 	LOCK(&rq->rx_lock);
2040         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2041         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2042         while (cqe->valid) {
2043                 if(cqe->cqe_type == 0) { /* singleton cqe */
2044 			/* we should not get singleton cqe after cqe1 on same rq */
2045 			if(rq->cqe_firstpart != NULL) {
2046 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
2047 				goto exit_rq_handler_lro;
2048 			}
2049                         if(cqe->error != 0) {
2050                                 rq->rx_stats.rxcp_err++;
2051 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2052                         }
2053                         oce_rx_lro(rq, cqe, NULL);
2054                         rq->rx_stats.rx_compl++;
2055                         cqe->valid = 0;
2056                         RING_GET(cq->ring, 1);
2057                         num_cqes++;
2058                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2059                                 break;
2060                 }else if(cqe->cqe_type == 0x1) { /* first part */
2061 			/* we should not get cqe1 after cqe1 on same rq */
2062 			if(rq->cqe_firstpart != NULL) {
2063 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
2064 				goto exit_rq_handler_lro;
2065 			}
2066 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2067                         RING_GET(cq->ring, 1);
2068                 }else if(cqe->cqe_type == 0x2) { /* second part */
2069 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2070                         if(cqe2->error != 0) {
2071                                 rq->rx_stats.rxcp_err++;
2072 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2073                         }
2074 			/* We should not get cqe2 without cqe1 */
2075 			if(rq->cqe_firstpart == NULL) {
2076 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2077 				goto exit_rq_handler_lro;
2078 			}
2079                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2080 
2081                         rq->rx_stats.rx_compl++;
2082                         rq->cqe_firstpart->valid = 0;
2083                         cqe2->valid = 0;
2084 			rq->cqe_firstpart = NULL;
2085 
2086                         RING_GET(cq->ring, 1);
2087                         num_cqes += 2;
2088                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2089                                 break;
2090 		}
2091 
2092                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2093                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2094         }
2095 	oce_check_rx_bufs(sc, num_cqes, rq);
2096 exit_rq_handler_lro:
2097 	UNLOCK(&rq->rx_lock);
2098 	return 0;
2099 }
2100 
2101 /* Handle the Completion Queue for receive */
2102 uint16_t
2103 oce_rq_handler(void *arg)
2104 {
2105 	struct oce_rq *rq = (struct oce_rq *)arg;
2106 	struct oce_cq *cq = rq->cq;
2107 	POCE_SOFTC sc = rq->parent;
2108 	struct oce_nic_rx_cqe *cqe;
2109 	int num_cqes = 0;
2110 
2111 	if(rq->islro) {
2112 		oce_rq_handler_lro(arg);
2113 		return 0;
2114 	}
2115 	LOCK(&rq->rx_lock);
2116 	bus_dmamap_sync(cq->ring->dma.tag,
2117 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2118 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2119 	while (cqe->u0.dw[2]) {
2120 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2121 
2122 		if (cqe->u0.s.error == 0) {
2123 			oce_rx(rq, cqe);
2124 		} else {
2125 			rq->rx_stats.rxcp_err++;
2126 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2127 			/* Post L3/L4 errors to stack.*/
2128 			oce_rx(rq, cqe);
2129 		}
2130 		rq->rx_stats.rx_compl++;
2131 		cqe->u0.dw[2] = 0;
2132 
2133 #if defined(INET6) || defined(INET)
2134 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2135 			oce_rx_flush_lro(rq);
2136 		}
2137 #endif
2138 
2139 		RING_GET(cq->ring, 1);
2140 		bus_dmamap_sync(cq->ring->dma.tag,
2141 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2142 		cqe =
2143 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2144 		num_cqes++;
2145 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2146 			break;
2147 	}
2148 
2149 #if defined(INET6) || defined(INET)
2150         if (IF_LRO_ENABLED(sc))
2151                 oce_rx_flush_lro(rq);
2152 #endif
2153 
2154 	oce_check_rx_bufs(sc, num_cqes, rq);
2155 	UNLOCK(&rq->rx_lock);
2156 	return 0;
2157 
2158 }
2159 
2160 
2161 
2162 
2163 /*****************************************************************************
2164  *		   Helper function prototypes in this file 		     *
2165  *****************************************************************************/
2166 
2167 static int
2168 oce_attach_ifp(POCE_SOFTC sc)
2169 {
2170 
2171 	sc->ifp = if_alloc(IFT_ETHER);
2172 	if (!sc->ifp)
2173 		return ENOMEM;
2174 
2175 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2176 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2177 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2178 
2179 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
2180 	sc->ifp->if_ioctl = oce_ioctl;
2181 	sc->ifp->if_start = oce_start;
2182 	sc->ifp->if_init = oce_init;
2183 	sc->ifp->if_mtu = ETHERMTU;
2184 	sc->ifp->if_softc = sc;
2185 #if __FreeBSD_version >= 800000
2186 	sc->ifp->if_transmit = oce_multiq_start;
2187 	sc->ifp->if_qflush = oce_multiq_flush;
2188 #endif
2189 
2190 	if_initname(sc->ifp,
2191 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2192 
2193 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2194 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2195 	IFQ_SET_READY(&sc->ifp->if_snd);
2196 
2197 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2198 	sc->ifp->if_hwassist |= CSUM_TSO;
2199 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2200 
2201 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2202 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2203 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2204 
2205 #if defined(INET6) || defined(INET)
2206 	sc->ifp->if_capabilities |= IFCAP_TSO;
2207 	sc->ifp->if_capabilities |= IFCAP_LRO;
2208 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2209 #endif
2210 
2211 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2212 	sc->ifp->if_baudrate = IF_Gbps(10);
2213 
2214 #if __FreeBSD_version >= 1000000
2215 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2216 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2217 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2218 #endif
2219 
2220 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2221 
2222 	return 0;
2223 }
2224 
2225 
2226 static void
2227 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2228 {
2229 	POCE_SOFTC sc = ifp->if_softc;
2230 
2231 	if (ifp->if_softc !=  arg)
2232 		return;
2233 	if ((vtag == 0) || (vtag > 4095))
2234 		return;
2235 
2236 	sc->vlan_tag[vtag] = 1;
2237 	sc->vlans_added++;
2238 	if (sc->vlans_added <= (sc->max_vlans + 1))
2239 		oce_vid_config(sc);
2240 }
2241 
2242 
2243 static void
2244 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2245 {
2246 	POCE_SOFTC sc = ifp->if_softc;
2247 
2248 	if (ifp->if_softc !=  arg)
2249 		return;
2250 	if ((vtag == 0) || (vtag > 4095))
2251 		return;
2252 
2253 	sc->vlan_tag[vtag] = 0;
2254 	sc->vlans_added--;
2255 	oce_vid_config(sc);
2256 }
2257 
2258 
2259 /*
2260  * A max of 64 vlans can be configured in BE. If the user configures
2261  * more, place the card in vlan promiscuous mode.
2262  */
2263 static int
2264 oce_vid_config(POCE_SOFTC sc)
2265 {
2266 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2267 	uint16_t ntags = 0, i;
2268 	int status = 0;
2269 
2270 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2271 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2272 		for (i = 0; i < MAX_VLANS; i++) {
2273 			if (sc->vlan_tag[i]) {
2274 				vtags[ntags].vtag = i;
2275 				ntags++;
2276 			}
2277 		}
2278 		if (ntags)
2279 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2280 						vtags, ntags, 1, 0);
2281 	} else
2282 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2283 					 	NULL, 0, 1, 1);
2284 	return status;
2285 }
2286 
2287 
2288 static void
2289 oce_mac_addr_set(POCE_SOFTC sc)
2290 {
2291 	uint32_t old_pmac_id = sc->pmac_id;
2292 	int status = 0;
2293 
2294 
2295 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2296 			 sc->macaddr.size_of_struct);
2297 	if (!status)
2298 		return;
2299 
2300 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2301 					sc->if_id, &sc->pmac_id);
2302 	if (!status) {
2303 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2304 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2305 				 sc->macaddr.size_of_struct);
2306 	}
2307 	if (status)
2308 		device_printf(sc->dev, "Failed update macaddress\n");
2309 
2310 }
2311 
2312 
2313 static int
2314 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2315 {
2316 	POCE_SOFTC sc = ifp->if_softc;
2317 	struct ifreq *ifr = (struct ifreq *)data;
2318 	int rc = ENXIO;
2319 	char cookie[32] = {0};
2320 	void *priv_data = ifr_data_get_ptr(ifr);
2321 	void *ioctl_ptr;
2322 	uint32_t req_size;
2323 	struct mbx_hdr req;
2324 	OCE_DMA_MEM dma_mem;
2325 	struct mbx_common_get_cntl_attr *fw_cmd;
2326 
2327 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2328 		return EFAULT;
2329 
2330 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2331 		return EINVAL;
2332 
2333 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2334 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2335 		return EFAULT;
2336 
2337 	req_size = le32toh(req.u0.req.request_length);
2338 	if (req_size > 65536)
2339 		return EINVAL;
2340 
2341 	req_size += sizeof(struct mbx_hdr);
2342 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2343 	if (rc)
2344 		return ENOMEM;
2345 
2346 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2347 		rc = EFAULT;
2348 		goto dma_free;
2349 	}
2350 
2351 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2352 	if (rc) {
2353 		rc = EIO;
2354 		goto dma_free;
2355 	}
2356 
2357 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2358 		rc =  EFAULT;
2359 
2360 	/*
2361 	   firmware is filling all the attributes for this ioctl except
2362 	   the driver version..so fill it
2363 	 */
2364 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2365 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2366 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2367 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2368 	}
2369 
2370 dma_free:
2371 	oce_dma_free(sc, &dma_mem);
2372 	return rc;
2373 
2374 }
2375 
2376 static void
2377 oce_eqd_set_periodic(POCE_SOFTC sc)
2378 {
2379 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2380 	struct oce_aic_obj *aic;
2381 	struct oce_eq *eqo;
2382 	uint64_t now = 0, delta;
2383 	int eqd, i, num = 0;
2384 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2385 	struct oce_wq *wq;
2386 	struct oce_rq *rq;
2387 
2388 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2389 
2390 	for (i = 0 ; i < sc->neqs; i++) {
2391 		eqo = sc->eq[i];
2392 		aic = &sc->aic_obj[i];
2393 		/* When setting the static eq delay from the user space */
2394 		if (!aic->enable) {
2395 			if (aic->ticks)
2396 				aic->ticks = 0;
2397 			eqd = aic->et_eqd;
2398 			goto modify_eqd;
2399 		}
2400 
2401 		if (i == 0) {
2402 			rq = sc->rq[0];
2403 			rxpkts = rq->rx_stats.rx_pkts;
2404 		} else
2405 			rxpkts = 0;
2406 		if (i + 1 < sc->nrqs) {
2407 			rq = sc->rq[i + 1];
2408 			rxpkts += rq->rx_stats.rx_pkts;
2409 		}
2410 		if (i < sc->nwqs) {
2411 			wq = sc->wq[i];
2412 			tx_reqs = wq->tx_stats.tx_reqs;
2413 		} else
2414 			tx_reqs = 0;
2415 		now = ticks;
2416 
2417 		if (!aic->ticks || now < aic->ticks ||
2418 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2419 			aic->prev_rxpkts = rxpkts;
2420 			aic->prev_txreqs = tx_reqs;
2421 			aic->ticks = now;
2422 			continue;
2423 		}
2424 
2425 		delta = ticks_to_msecs(now - aic->ticks);
2426 
2427 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2428 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2429 		eqd = (pps / 15000) << 2;
2430 		if (eqd < 8)
2431 			eqd = 0;
2432 
2433 		/* Make sure that the eq delay is in the known range */
2434 		eqd = min(eqd, aic->max_eqd);
2435 		eqd = max(eqd, aic->min_eqd);
2436 
2437 		aic->prev_rxpkts = rxpkts;
2438 		aic->prev_txreqs = tx_reqs;
2439 		aic->ticks = now;
2440 
2441 modify_eqd:
2442 		if (eqd != aic->cur_eqd) {
2443 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2444 			set_eqd[num].eq_id = eqo->eq_id;
2445 			aic->cur_eqd = eqd;
2446 			num++;
2447 		}
2448 	}
2449 
2450 	/* Is there atleast one eq that needs to be modified? */
2451         for(i = 0; i < num; i += 8) {
2452                 if((num - i) >=8 )
2453                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2454                 else
2455                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2456         }
2457 
2458 }
2459 
2460 static void oce_detect_hw_error(POCE_SOFTC sc)
2461 {
2462 
2463 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2464 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2465 	uint32_t i;
2466 
2467 	if (sc->hw_error)
2468 		return;
2469 
2470 	if (IS_XE201(sc)) {
2471 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2472 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2473 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2474 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2475 		}
2476 	} else {
2477 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2478 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2479 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2480 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2481 
2482 		ue_low = (ue_low & ~ue_low_mask);
2483 		ue_high = (ue_high & ~ue_high_mask);
2484 	}
2485 
2486 	/* On certain platforms BE hardware can indicate spurious UEs.
2487 	 * Allow the h/w to stop working completely in case of a real UE.
2488 	 * Hence not setting the hw_error for UE detection.
2489 	 */
2490 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2491 		sc->hw_error = TRUE;
2492 		device_printf(sc->dev, "Error detected in the card\n");
2493 	}
2494 
2495 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2496 		device_printf(sc->dev,
2497 				"ERR: sliport status 0x%x\n", sliport_status);
2498 		device_printf(sc->dev,
2499 				"ERR: sliport error1 0x%x\n", sliport_err1);
2500 		device_printf(sc->dev,
2501 				"ERR: sliport error2 0x%x\n", sliport_err2);
2502 	}
2503 
2504 	if (ue_low) {
2505 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2506 			if (ue_low & 1)
2507 				device_printf(sc->dev, "UE: %s bit set\n",
2508 							ue_status_low_desc[i]);
2509 		}
2510 	}
2511 
2512 	if (ue_high) {
2513 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2514 			if (ue_high & 1)
2515 				device_printf(sc->dev, "UE: %s bit set\n",
2516 							ue_status_hi_desc[i]);
2517 		}
2518 	}
2519 
2520 }
2521 
2522 
2523 static void
2524 oce_local_timer(void *arg)
2525 {
2526 	POCE_SOFTC sc = arg;
2527 	int i = 0;
2528 
2529 	oce_detect_hw_error(sc);
2530 	oce_refresh_nic_stats(sc);
2531 	oce_refresh_queue_stats(sc);
2532 	oce_mac_addr_set(sc);
2533 
2534 	/* TX Watch Dog*/
2535 	for (i = 0; i < sc->nwqs; i++)
2536 		oce_tx_restart(sc, sc->wq[i]);
2537 
2538 	/* calculate and set the eq delay for optimal interrupt rate */
2539 	if (IS_BE(sc) || IS_SH(sc))
2540 		oce_eqd_set_periodic(sc);
2541 
2542 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2543 }
2544 
2545 static void
2546 oce_tx_compl_clean(POCE_SOFTC sc)
2547 {
2548 	struct oce_wq *wq;
2549 	int i = 0, timeo = 0, num_wqes = 0;
2550 	int pending_txqs = sc->nwqs;
2551 
2552 	/* Stop polling for compls when HW has been silent for 10ms or
2553 	 * hw_error or no outstanding completions expected
2554 	 */
2555 	do {
2556 		pending_txqs = sc->nwqs;
2557 
2558 		for_all_wq_queues(sc, wq, i) {
2559 			num_wqes = oce_wq_handler(wq);
2560 
2561 			if(num_wqes)
2562 				timeo = 0;
2563 
2564 			if(!wq->ring->num_used)
2565 				pending_txqs--;
2566 		}
2567 
2568 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2569 			break;
2570 
2571 		DELAY(1000);
2572 	} while (TRUE);
2573 
2574 	for_all_wq_queues(sc, wq, i) {
2575 		while(wq->ring->num_used) {
2576 			LOCK(&wq->tx_compl_lock);
2577 			oce_process_tx_completion(wq);
2578 			UNLOCK(&wq->tx_compl_lock);
2579 		}
2580 	}
2581 
2582 }
2583 
2584 /* NOTE : This should only be called holding
2585  *        DEVICE_LOCK.
2586  */
2587 static void
2588 oce_if_deactivate(POCE_SOFTC sc)
2589 {
2590 	int i;
2591 	struct oce_rq *rq;
2592 	struct oce_wq *wq;
2593 	struct oce_eq *eq;
2594 
2595 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2596 
2597 	oce_tx_compl_clean(sc);
2598 
2599 	/* Stop intrs and finish any bottom halves pending */
2600 	oce_hw_intr_disable(sc);
2601 
2602 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2603 	   any other lock. So unlock device lock and require after
2604 	   completing taskqueue_drain.
2605 	*/
2606 	UNLOCK(&sc->dev_lock);
2607 	for (i = 0; i < sc->intr_count; i++) {
2608 		if (sc->intrs[i].tq != NULL) {
2609 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2610 		}
2611 	}
2612 	LOCK(&sc->dev_lock);
2613 
2614 	/* Delete RX queue in card with flush param */
2615 	oce_stop_rx(sc);
2616 
2617 	/* Invalidate any pending cq and eq entries*/
2618 	for_all_evnt_queues(sc, eq, i)
2619 		oce_drain_eq(eq);
2620 	for_all_rq_queues(sc, rq, i)
2621 		oce_drain_rq_cq(rq);
2622 	for_all_wq_queues(sc, wq, i)
2623 		oce_drain_wq_cq(wq);
2624 
2625 	/* But still we need to get MCC aync events.
2626 	   So enable intrs and also arm first EQ
2627 	*/
2628 	oce_hw_intr_enable(sc);
2629 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2630 
2631 	DELAY(10);
2632 }
2633 
2634 
2635 static void
2636 oce_if_activate(POCE_SOFTC sc)
2637 {
2638 	struct oce_eq *eq;
2639 	struct oce_rq *rq;
2640 	struct oce_wq *wq;
2641 	int i, rc = 0;
2642 
2643 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2644 
2645 	oce_hw_intr_disable(sc);
2646 
2647 	oce_start_rx(sc);
2648 
2649 	for_all_rq_queues(sc, rq, i) {
2650 		rc = oce_start_rq(rq);
2651 		if (rc)
2652 			device_printf(sc->dev, "Unable to start RX\n");
2653 	}
2654 
2655 	for_all_wq_queues(sc, wq, i) {
2656 		rc = oce_start_wq(wq);
2657 		if (rc)
2658 			device_printf(sc->dev, "Unable to start TX\n");
2659 	}
2660 
2661 
2662 	for_all_evnt_queues(sc, eq, i)
2663 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2664 
2665 	oce_hw_intr_enable(sc);
2666 
2667 }
2668 
2669 static void
2670 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2671 {
2672 	/* Update Link status */
2673 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2674 	     ASYNC_EVENT_LINK_UP) {
2675 		sc->link_status = ASYNC_EVENT_LINK_UP;
2676 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2677 	} else {
2678 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2679 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2680 	}
2681 }
2682 
2683 
2684 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2685 					 struct oce_async_evt_grp5_os2bmc *evt)
2686 {
2687 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2688 	if (evt->u.s.mgmt_enable)
2689 		sc->flags |= OCE_FLAGS_OS2BMC;
2690 	else
2691 		return;
2692 
2693 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2694 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2695 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2696 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2697 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2698 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2699 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2700 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2701 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2702 }
2703 
2704 
2705 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2706 {
2707 	struct oce_async_event_grp5_pvid_state *gcqe;
2708 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2709 
2710 	switch (cqe->u0.s.async_type) {
2711 	case ASYNC_EVENT_PVID_STATE:
2712 		/* GRP5 PVID */
2713 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2714 		if (gcqe->enabled)
2715 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2716 		else
2717 			sc->pvid = 0;
2718 		break;
2719 	case ASYNC_EVENT_OS2BMC:
2720 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2721 		oce_async_grp5_osbmc_process(sc, bmccqe);
2722 		break;
2723 	default:
2724 		break;
2725 	}
2726 }
2727 
2728 /* Handle the Completion Queue for the Mailbox/Async notifications */
2729 uint16_t
2730 oce_mq_handler(void *arg)
2731 {
2732 	struct oce_mq *mq = (struct oce_mq *)arg;
2733 	POCE_SOFTC sc = mq->parent;
2734 	struct oce_cq *cq = mq->cq;
2735 	int num_cqes = 0, evt_type = 0, optype = 0;
2736 	struct oce_mq_cqe *cqe;
2737 	struct oce_async_cqe_link_state *acqe;
2738 	struct oce_async_event_qnq *dbgcqe;
2739 
2740 
2741 	bus_dmamap_sync(cq->ring->dma.tag,
2742 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2743 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2744 
2745 	while (cqe->u0.dw[3]) {
2746 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2747 		if (cqe->u0.s.async_event) {
2748 			evt_type = cqe->u0.s.event_type;
2749 			optype = cqe->u0.s.async_type;
2750 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2751 				/* Link status evt */
2752 				acqe = (struct oce_async_cqe_link_state *)cqe;
2753 				process_link_state(sc, acqe);
2754 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2755 				oce_process_grp5_events(sc, cqe);
2756 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2757 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2758 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2759 				if(dbgcqe->valid)
2760 					sc->qnqid = dbgcqe->vlan_tag;
2761 				sc->qnq_debug_event = TRUE;
2762 			}
2763 		}
2764 		cqe->u0.dw[3] = 0;
2765 		RING_GET(cq->ring, 1);
2766 		bus_dmamap_sync(cq->ring->dma.tag,
2767 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2768 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2769 		num_cqes++;
2770 	}
2771 
2772 	if (num_cqes)
2773 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2774 
2775 	return 0;
2776 }
2777 
2778 
2779 static void
2780 setup_max_queues_want(POCE_SOFTC sc)
2781 {
2782 	/* Check if it is FLEX machine. Is so dont use RSS */
2783 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2784 	    (sc->function_mode & FNM_UMC_MODE)    ||
2785 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2786 	    (!is_rss_enabled(sc))		  ||
2787 	    IS_BE2(sc)) {
2788 		sc->nrqs = 1;
2789 		sc->nwqs = 1;
2790 	} else {
2791 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2792 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2793 	}
2794 
2795 	if (IS_BE2(sc) && is_rss_enabled(sc))
2796 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2797 }
2798 
2799 
2800 static void
2801 update_queues_got(POCE_SOFTC sc)
2802 {
2803 	if (is_rss_enabled(sc)) {
2804 		sc->nrqs = sc->intr_count + 1;
2805 		sc->nwqs = sc->intr_count;
2806 	} else {
2807 		sc->nrqs = 1;
2808 		sc->nwqs = 1;
2809 	}
2810 
2811 	if (IS_BE2(sc))
2812 		sc->nwqs = 1;
2813 }
2814 
2815 static int
2816 oce_check_ipv6_ext_hdr(struct mbuf *m)
2817 {
2818 	struct ether_header *eh = mtod(m, struct ether_header *);
2819 	caddr_t m_datatemp = m->m_data;
2820 
2821 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2822 		m->m_data += sizeof(struct ether_header);
2823 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2824 
2825 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2826 				(ip6->ip6_nxt != IPPROTO_UDP)){
2827 			struct ip6_ext *ip6e = NULL;
2828 			m->m_data += sizeof(struct ip6_hdr);
2829 
2830 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2831 			if(ip6e->ip6e_len == 0xff) {
2832 				m->m_data = m_datatemp;
2833 				return TRUE;
2834 			}
2835 		}
2836 		m->m_data = m_datatemp;
2837 	}
2838 	return FALSE;
2839 }
2840 
2841 static int
2842 is_be3_a1(POCE_SOFTC sc)
2843 {
2844 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2845 		return TRUE;
2846 	}
2847 	return FALSE;
2848 }
2849 
2850 static struct mbuf *
2851 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2852 {
2853 	uint16_t vlan_tag = 0;
2854 
2855 	if(!M_WRITABLE(m))
2856 		return NULL;
2857 
2858 	/* Embed vlan tag in the packet if it is not part of it */
2859 	if(m->m_flags & M_VLANTAG) {
2860 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2861 		m->m_flags &= ~M_VLANTAG;
2862 	}
2863 
2864 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2865 	if(sc->pvid) {
2866 		if(!vlan_tag)
2867 			vlan_tag = sc->pvid;
2868 		if (complete)
2869 			*complete = FALSE;
2870 	}
2871 
2872 	if(vlan_tag) {
2873 		m = ether_vlanencap(m, vlan_tag);
2874 	}
2875 
2876 	if(sc->qnqid) {
2877 		m = ether_vlanencap(m, sc->qnqid);
2878 
2879 		if (complete)
2880 			*complete = FALSE;
2881 	}
2882 	return m;
2883 }
2884 
2885 static int
2886 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2887 {
2888 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2889 			oce_check_ipv6_ext_hdr(m)) {
2890 		return TRUE;
2891 	}
2892 	return FALSE;
2893 }
2894 
2895 static void
2896 oce_get_config(POCE_SOFTC sc)
2897 {
2898 	int rc = 0;
2899 	uint32_t max_rss = 0;
2900 
2901 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2902 		max_rss = OCE_LEGACY_MODE_RSS;
2903 	else
2904 		max_rss = OCE_MAX_RSS;
2905 
2906 	if (!IS_BE(sc)) {
2907 		rc = oce_get_profile_config(sc, max_rss);
2908 		if (rc) {
2909 			sc->nwqs = OCE_MAX_WQ;
2910 			sc->nrssqs = max_rss;
2911 			sc->nrqs = sc->nrssqs + 1;
2912 		}
2913 	}
2914 	else { /* For BE3 don't rely on fw for determining the resources */
2915 		sc->nrssqs = max_rss;
2916 		sc->nrqs = sc->nrssqs + 1;
2917 		sc->nwqs = OCE_MAX_WQ;
2918 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2919 	}
2920 }
2921 
2922 static void
2923 oce_rdma_close(void)
2924 {
2925   if (oce_rdma_if != NULL) {
2926     oce_rdma_if = NULL;
2927   }
2928 }
2929 
2930 static void
2931 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2932 {
2933   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2934 }
2935 
2936 int
2937 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2938 {
2939   POCE_SOFTC sc;
2940   struct oce_dev_info di;
2941   int i;
2942 
2943   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2944     return -EINVAL;
2945   }
2946 
2947   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2948       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2949     return -ENXIO;
2950   }
2951 
2952   rdma_info->close = oce_rdma_close;
2953   rdma_info->mbox_post = oce_mbox_post;
2954   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2955   rdma_info->get_mac_addr = oce_get_mac_addr;
2956 
2957   oce_rdma_if = rdma_if;
2958 
2959   sc = softc_head;
2960   while (sc != NULL) {
2961     if (oce_rdma_if->announce != NULL) {
2962       memset(&di, 0, sizeof(di));
2963       di.dev = sc->dev;
2964       di.softc = sc;
2965       di.ifp = sc->ifp;
2966       di.db_bhandle = sc->db_bhandle;
2967       di.db_btag = sc->db_btag;
2968       di.db_page_size = 4096;
2969       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2970         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2971       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2972         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2973       } else {
2974         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2975       }
2976       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2977       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2978         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2979         di.msix.start_vector = sc->intr_count;
2980         for (i=0; i<di.msix.num_vectors; i++) {
2981           di.msix.vector_list[i] = sc->intrs[i].vector;
2982         }
2983       } else {
2984       }
2985       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2986       di.vendor_id = pci_get_vendor(sc->dev);
2987       di.dev_id = pci_get_device(sc->dev);
2988 
2989       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2990           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2991       }
2992 
2993       rdma_if->announce(&di);
2994       sc = sc->next;
2995     }
2996   }
2997 
2998   return 0;
2999 }
3000 
3001 static void
3002 oce_read_env_variables( POCE_SOFTC sc )
3003 {
3004 	char *value = NULL;
3005 	int rc = 0;
3006 
3007         /* read if user wants to enable hwlro or swlro */
3008         //value = getenv("oce_enable_hwlro");
3009         if(value && IS_SH(sc)) {
3010                 sc->enable_hwlro = strtol(value, NULL, 10);
3011                 if(sc->enable_hwlro) {
3012                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
3013                         if(rc) {
3014                                 device_printf(sc->dev, "no hardware lro support\n");
3015                 		device_printf(sc->dev, "software lro enabled\n");
3016                                 sc->enable_hwlro = 0;
3017                         }else {
3018                                 device_printf(sc->dev, "hardware lro enabled\n");
3019 				oce_max_rsp_handled = 32;
3020                         }
3021                 }else {
3022                         device_printf(sc->dev, "software lro enabled\n");
3023                 }
3024         }else {
3025                 sc->enable_hwlro = 0;
3026         }
3027 
3028         /* read mbuf size */
3029         //value = getenv("oce_rq_buf_size");
3030         if(value && IS_SH(sc)) {
3031                 oce_rq_buf_size = strtol(value, NULL, 10);
3032                 switch(oce_rq_buf_size) {
3033                 case 2048:
3034                 case 4096:
3035                 case 9216:
3036                 case 16384:
3037                         break;
3038 
3039                 default:
3040                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
3041                         oce_rq_buf_size = 2048;
3042                 }
3043         }
3044 
3045 	return;
3046 }
3047