xref: /freebsd/sys/dev/oce/oce_if.c (revision d34048812292b714a0bf99967270d18fe3097c62)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 /* $FreeBSD$ */
42 
43 #include "opt_inet6.h"
44 #include "opt_inet.h"
45 
46 #include "oce_if.h"
47 #include "oce_user.h"
48 
49 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
50 
51 /* UE Status Low CSR */
52 static char *ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 
87 /* UE Status High CSR */
88 static char *ue_status_hi_desc[] = {
89         "LPCMEMHOST",
90         "MGMT_MAC",
91         "PCS0ONLINE",
92         "MPU_IRAM",
93         "PCS1ONLINE",
94         "PCTL0",
95         "PCTL1",
96         "PMEM",
97         "RR",
98         "TXPB",
99         "RXPP",
100         "XAUI",
101         "TXP",
102         "ARM",
103         "IPC",
104         "HOST2",
105         "HOST3",
106         "HOST4",
107         "HOST5",
108         "HOST6",
109         "HOST7",
110         "HOST8",
111         "HOST9",
112         "NETC",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown"
121 };
122 
123 struct oce_common_cqe_info{
124         uint8_t vtp:1;
125         uint8_t l4_cksum_pass:1;
126         uint8_t ip_cksum_pass:1;
127         uint8_t ipv6_frame:1;
128         uint8_t qnq:1;
129         uint8_t rsvd:3;
130         uint8_t num_frags;
131         uint16_t pkt_size;
132         uint16_t vtag;
133 };
134 
135 
136 /* Driver entry points prototypes */
137 static int  oce_probe(device_t dev);
138 static int  oce_attach(device_t dev);
139 static int  oce_detach(device_t dev);
140 static int  oce_shutdown(device_t dev);
141 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
142 static void oce_init(void *xsc);
143 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
144 static void oce_multiq_flush(struct ifnet *ifp);
145 
146 /* Driver interrupt routines protypes */
147 static void oce_intr(void *arg, int pending);
148 static int  oce_setup_intr(POCE_SOFTC sc);
149 static int  oce_fast_isr(void *arg);
150 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
151 			  void (*isr) (void *arg, int pending));
152 
153 /* Media callbacks prototypes */
154 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
155 static int  oce_media_change(struct ifnet *ifp);
156 
157 /* Transmit routines prototypes */
158 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
159 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
160 static void oce_process_tx_completion(struct oce_wq *wq);
161 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
162 				 struct oce_wq *wq);
163 
164 /* Receive routines prototypes */
165 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
166 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
167 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
168 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static uint16_t oce_rq_handler_lro(void *arg);
170 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
171 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
172 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
173 
174 /* Helper function prototypes in this file */
175 static int  oce_attach_ifp(POCE_SOFTC sc);
176 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
177 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
178 static int  oce_vid_config(POCE_SOFTC sc);
179 static void oce_mac_addr_set(POCE_SOFTC sc);
180 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
181 static void oce_local_timer(void *arg);
182 static void oce_if_deactivate(POCE_SOFTC sc);
183 static void oce_if_activate(POCE_SOFTC sc);
184 static void setup_max_queues_want(POCE_SOFTC sc);
185 static void update_queues_got(POCE_SOFTC sc);
186 static void process_link_state(POCE_SOFTC sc,
187 		 struct oce_async_cqe_link_state *acqe);
188 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
189 static void oce_get_config(POCE_SOFTC sc);
190 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
191 static void oce_read_env_variables(POCE_SOFTC sc);
192 
193 
194 /* IP specific */
195 #if defined(INET6) || defined(INET)
196 static int  oce_init_lro(POCE_SOFTC sc);
197 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
198 #endif
199 
200 static device_method_t oce_dispatch[] = {
201 	DEVMETHOD(device_probe, oce_probe),
202 	DEVMETHOD(device_attach, oce_attach),
203 	DEVMETHOD(device_detach, oce_detach),
204 	DEVMETHOD(device_shutdown, oce_shutdown),
205 
206 	DEVMETHOD_END
207 };
208 
209 static driver_t oce_driver = {
210 	"oce",
211 	oce_dispatch,
212 	sizeof(OCE_SOFTC)
213 };
214 static devclass_t oce_devclass;
215 
216 
217 /* global vars */
218 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
219 
220 /* Module capabilites and parameters */
221 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
222 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
223 uint32_t oce_rq_buf_size = 2048;
224 
225 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
226 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
227 
228 
229 /* Supported devices table */
230 static uint32_t supportedDevices[] =  {
231 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
232 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
233 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
234 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
235 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
236 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
237 };
238 
239 
240 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
241 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
242     nitems(supportedDevices));
243 MODULE_DEPEND(oce, pci, 1, 1, 1);
244 MODULE_DEPEND(oce, ether, 1, 1, 1);
245 MODULE_VERSION(oce, 1);
246 
247 
248 POCE_SOFTC softc_head = NULL;
249 POCE_SOFTC softc_tail = NULL;
250 
251 struct oce_rdma_if *oce_rdma_if = NULL;
252 
253 /*****************************************************************************
254  *			Driver entry points functions                        *
255  *****************************************************************************/
256 
257 static int
258 oce_probe(device_t dev)
259 {
260 	uint16_t vendor = 0;
261 	uint16_t device = 0;
262 	int i = 0;
263 	char str[256] = {0};
264 	POCE_SOFTC sc;
265 
266 	sc = device_get_softc(dev);
267 	bzero(sc, sizeof(OCE_SOFTC));
268 	sc->dev = dev;
269 
270 	vendor = pci_get_vendor(dev);
271 	device = pci_get_device(dev);
272 
273 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
274 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
275 			if (device == (supportedDevices[i] & 0xffff)) {
276 				sprintf(str, "%s:%s", "Emulex CNA NIC function",
277 					component_revision);
278 				device_set_desc_copy(dev, str);
279 
280 				switch (device) {
281 				case PCI_PRODUCT_BE2:
282 					sc->flags |= OCE_FLAGS_BE2;
283 					break;
284 				case PCI_PRODUCT_BE3:
285 					sc->flags |= OCE_FLAGS_BE3;
286 					break;
287 				case PCI_PRODUCT_XE201:
288 				case PCI_PRODUCT_XE201_VF:
289 					sc->flags |= OCE_FLAGS_XE201;
290 					break;
291 				case PCI_PRODUCT_SH:
292 					sc->flags |= OCE_FLAGS_SH;
293 					break;
294 				default:
295 					return ENXIO;
296 				}
297 				return BUS_PROBE_DEFAULT;
298 			}
299 		}
300 	}
301 
302 	return ENXIO;
303 }
304 
305 
306 static int
307 oce_attach(device_t dev)
308 {
309 	POCE_SOFTC sc;
310 	int rc = 0;
311 
312 	sc = device_get_softc(dev);
313 
314 	rc = oce_hw_pci_alloc(sc);
315 	if (rc)
316 		return rc;
317 
318 	sc->tx_ring_size = OCE_TX_RING_SIZE;
319 	sc->rx_ring_size = OCE_RX_RING_SIZE;
320 	/* receive fragment size should be multiple of 2K */
321 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
322 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
323 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
324 
325 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
326 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
327 
328 	/* initialise the hardware */
329 	rc = oce_hw_init(sc);
330 	if (rc)
331 		goto pci_res_free;
332 
333 	oce_read_env_variables(sc);
334 
335 	oce_get_config(sc);
336 
337 	setup_max_queues_want(sc);
338 
339 	rc = oce_setup_intr(sc);
340 	if (rc)
341 		goto mbox_free;
342 
343 	rc = oce_queue_init_all(sc);
344 	if (rc)
345 		goto intr_free;
346 
347 	rc = oce_attach_ifp(sc);
348 	if (rc)
349 		goto queues_free;
350 
351 #if defined(INET6) || defined(INET)
352 	rc = oce_init_lro(sc);
353 	if (rc)
354 		goto ifp_free;
355 #endif
356 
357 	rc = oce_hw_start(sc);
358 	if (rc)
359 		goto lro_free;
360 
361 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
362 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
363 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
364 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
365 
366 	rc = oce_stats_init(sc);
367 	if (rc)
368 		goto vlan_free;
369 
370 	oce_add_sysctls(sc);
371 
372 	callout_init(&sc->timer, CALLOUT_MPSAFE);
373 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
374 	if (rc)
375 		goto stats_free;
376 
377 	sc->next =NULL;
378 	if (softc_tail != NULL) {
379 	  softc_tail->next = sc;
380 	} else {
381 	  softc_head = sc;
382 	}
383 	softc_tail = sc;
384 
385 	return 0;
386 
387 stats_free:
388 	callout_drain(&sc->timer);
389 	oce_stats_free(sc);
390 vlan_free:
391 	if (sc->vlan_attach)
392 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
393 	if (sc->vlan_detach)
394 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
395 	oce_hw_intr_disable(sc);
396 lro_free:
397 #if defined(INET6) || defined(INET)
398 	oce_free_lro(sc);
399 ifp_free:
400 #endif
401 	ether_ifdetach(sc->ifp);
402 	if_free(sc->ifp);
403 queues_free:
404 	oce_queue_release_all(sc);
405 intr_free:
406 	oce_intr_free(sc);
407 mbox_free:
408 	oce_dma_free(sc, &sc->bsmbx);
409 pci_res_free:
410 	oce_hw_pci_free(sc);
411 	LOCK_DESTROY(&sc->dev_lock);
412 	LOCK_DESTROY(&sc->bmbx_lock);
413 	return rc;
414 
415 }
416 
417 
418 static int
419 oce_detach(device_t dev)
420 {
421 	POCE_SOFTC sc = device_get_softc(dev);
422 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
423 
424         poce_sc_tmp = softc_head;
425         ppoce_sc_tmp1 = &softc_head;
426         while (poce_sc_tmp != NULL) {
427           if (poce_sc_tmp == sc) {
428             *ppoce_sc_tmp1 = sc->next;
429             if (sc->next == NULL) {
430               softc_tail = poce_sc_tmp2;
431             }
432             break;
433           }
434           poce_sc_tmp2 = poce_sc_tmp;
435           ppoce_sc_tmp1 = &poce_sc_tmp->next;
436           poce_sc_tmp = poce_sc_tmp->next;
437         }
438 
439 	LOCK(&sc->dev_lock);
440 	oce_if_deactivate(sc);
441 	UNLOCK(&sc->dev_lock);
442 
443 	callout_drain(&sc->timer);
444 
445 	if (sc->vlan_attach != NULL)
446 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
447 	if (sc->vlan_detach != NULL)
448 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
449 
450 	ether_ifdetach(sc->ifp);
451 
452 	if_free(sc->ifp);
453 
454 	oce_hw_shutdown(sc);
455 
456 	bus_generic_detach(dev);
457 
458 	return 0;
459 }
460 
461 
462 static int
463 oce_shutdown(device_t dev)
464 {
465 	int rc;
466 
467 	rc = oce_detach(dev);
468 
469 	return rc;
470 }
471 
472 
473 static int
474 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
475 {
476 	struct ifreq *ifr = (struct ifreq *)data;
477 	POCE_SOFTC sc = ifp->if_softc;
478 	int rc = 0;
479 	uint32_t u;
480 
481 	switch (command) {
482 
483 	case SIOCGIFMEDIA:
484 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
485 		break;
486 
487 	case SIOCSIFMTU:
488 		if (ifr->ifr_mtu > OCE_MAX_MTU)
489 			rc = EINVAL;
490 		else
491 			ifp->if_mtu = ifr->ifr_mtu;
492 		break;
493 
494 	case SIOCSIFFLAGS:
495 		if (ifp->if_flags & IFF_UP) {
496 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
497 				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
498 				oce_init(sc);
499 			}
500 			device_printf(sc->dev, "Interface Up\n");
501 		} else {
502 			LOCK(&sc->dev_lock);
503 
504 			sc->ifp->if_drv_flags &=
505 			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
506 			oce_if_deactivate(sc);
507 
508 			UNLOCK(&sc->dev_lock);
509 
510 			device_printf(sc->dev, "Interface Down\n");
511 		}
512 
513 		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
514 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
515 				sc->promisc = TRUE;
516 		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
517 			if (!oce_rxf_set_promiscuous(sc, 0))
518 				sc->promisc = FALSE;
519 		}
520 
521 		break;
522 
523 	case SIOCADDMULTI:
524 	case SIOCDELMULTI:
525 		rc = oce_hw_update_multicast(sc);
526 		if (rc)
527 			device_printf(sc->dev,
528 				"Update multicast address failed\n");
529 		break;
530 
531 	case SIOCSIFCAP:
532 		u = ifr->ifr_reqcap ^ ifp->if_capenable;
533 
534 		if (u & IFCAP_TXCSUM) {
535 			ifp->if_capenable ^= IFCAP_TXCSUM;
536 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
537 
538 			if (IFCAP_TSO & ifp->if_capenable &&
539 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
540 				ifp->if_capenable &= ~IFCAP_TSO;
541 				ifp->if_hwassist &= ~CSUM_TSO;
542 				if_printf(ifp,
543 					 "TSO disabled due to -txcsum.\n");
544 			}
545 		}
546 
547 		if (u & IFCAP_RXCSUM)
548 			ifp->if_capenable ^= IFCAP_RXCSUM;
549 
550 		if (u & IFCAP_TSO4) {
551 			ifp->if_capenable ^= IFCAP_TSO4;
552 
553 			if (IFCAP_TSO & ifp->if_capenable) {
554 				if (IFCAP_TXCSUM & ifp->if_capenable)
555 					ifp->if_hwassist |= CSUM_TSO;
556 				else {
557 					ifp->if_capenable &= ~IFCAP_TSO;
558 					ifp->if_hwassist &= ~CSUM_TSO;
559 					if_printf(ifp,
560 					    "Enable txcsum first.\n");
561 					rc = EAGAIN;
562 				}
563 			} else
564 				ifp->if_hwassist &= ~CSUM_TSO;
565 		}
566 
567 		if (u & IFCAP_VLAN_HWTAGGING)
568 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
569 
570 		if (u & IFCAP_VLAN_HWFILTER) {
571 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
572 			oce_vid_config(sc);
573 		}
574 #if defined(INET6) || defined(INET)
575 		if (u & IFCAP_LRO) {
576 			ifp->if_capenable ^= IFCAP_LRO;
577 			if(sc->enable_hwlro) {
578 				if(ifp->if_capenable & IFCAP_LRO) {
579 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
580 				}else {
581 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
582 				}
583 			}
584 		}
585 #endif
586 
587 		break;
588 
589 	case SIOCGPRIVATE_0:
590 		rc = oce_handle_passthrough(ifp, data);
591 		break;
592 	default:
593 		rc = ether_ioctl(ifp, command, data);
594 		break;
595 	}
596 
597 	return rc;
598 }
599 
600 
601 static void
602 oce_init(void *arg)
603 {
604 	POCE_SOFTC sc = arg;
605 
606 	LOCK(&sc->dev_lock);
607 
608 	if (sc->ifp->if_flags & IFF_UP) {
609 		oce_if_deactivate(sc);
610 		oce_if_activate(sc);
611 	}
612 
613 	UNLOCK(&sc->dev_lock);
614 
615 }
616 
617 
618 static int
619 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
620 {
621 	POCE_SOFTC sc = ifp->if_softc;
622 	struct oce_wq *wq = NULL;
623 	int queue_index = 0;
624 	int status = 0;
625 
626 	if (!sc->link_status)
627 		return ENXIO;
628 
629 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
630 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
631 
632 	wq = sc->wq[queue_index];
633 
634 	LOCK(&wq->tx_lock);
635 	status = oce_multiq_transmit(ifp, m, wq);
636 	UNLOCK(&wq->tx_lock);
637 
638 	return status;
639 
640 }
641 
642 
643 static void
644 oce_multiq_flush(struct ifnet *ifp)
645 {
646 	POCE_SOFTC sc = ifp->if_softc;
647 	struct mbuf     *m;
648 	int i = 0;
649 
650 	for (i = 0; i < sc->nwqs; i++) {
651 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
652 			m_freem(m);
653 	}
654 	if_qflush(ifp);
655 }
656 
657 
658 
659 /*****************************************************************************
660  *                   Driver interrupt routines functions                     *
661  *****************************************************************************/
662 
663 static void
664 oce_intr(void *arg, int pending)
665 {
666 
667 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
668 	POCE_SOFTC sc = ii->sc;
669 	struct oce_eq *eq = ii->eq;
670 	struct oce_eqe *eqe;
671 	struct oce_cq *cq = NULL;
672 	int i, num_eqes = 0;
673 
674 
675 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
676 				 BUS_DMASYNC_POSTWRITE);
677 	do {
678 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
679 		if (eqe->evnt == 0)
680 			break;
681 		eqe->evnt = 0;
682 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
683 					BUS_DMASYNC_POSTWRITE);
684 		RING_GET(eq->ring, 1);
685 		num_eqes++;
686 
687 	} while (TRUE);
688 
689 	if (!num_eqes)
690 		goto eq_arm; /* Spurious */
691 
692  	/* Clear EQ entries, but dont arm */
693 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
694 
695 	/* Process TX, RX and MCC. But dont arm CQ*/
696 	for (i = 0; i < eq->cq_valid; i++) {
697 		cq = eq->cq[i];
698 		(*cq->cq_handler)(cq->cb_arg);
699 	}
700 
701 	/* Arm all cqs connected to this EQ */
702 	for (i = 0; i < eq->cq_valid; i++) {
703 		cq = eq->cq[i];
704 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
705 	}
706 
707 eq_arm:
708 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
709 
710 	return;
711 }
712 
713 
714 static int
715 oce_setup_intr(POCE_SOFTC sc)
716 {
717 	int rc = 0, use_intx = 0;
718 	int vector = 0, req_vectors = 0;
719 	int tot_req_vectors, tot_vectors;
720 
721 	if (is_rss_enabled(sc))
722 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
723 	else
724 		req_vectors = 1;
725 
726 	tot_req_vectors = req_vectors;
727 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
728 	  if (req_vectors > 1) {
729 	    tot_req_vectors += OCE_RDMA_VECTORS;
730 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
731 	  }
732 	}
733 
734         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
735 		sc->intr_count = req_vectors;
736                 tot_vectors = tot_req_vectors;
737 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
738 		if (rc != 0) {
739 			use_intx = 1;
740 			pci_release_msi(sc->dev);
741 		} else {
742 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
743 		    if (tot_vectors < tot_req_vectors) {
744 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
745 			sc->roce_intr_count = (tot_vectors / 2);
746 		      }
747 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
748 		    }
749 		  } else {
750 		    sc->intr_count = tot_vectors;
751 		  }
752     		  sc->flags |= OCE_FLAGS_USING_MSIX;
753 		}
754 	} else
755 		use_intx = 1;
756 
757 	if (use_intx)
758 		sc->intr_count = 1;
759 
760 	/* Scale number of queues based on intr we got */
761 	update_queues_got(sc);
762 
763 	if (use_intx) {
764 		device_printf(sc->dev, "Using legacy interrupt\n");
765 		rc = oce_alloc_intr(sc, vector, oce_intr);
766 		if (rc)
767 			goto error;
768 	} else {
769 		for (; vector < sc->intr_count; vector++) {
770 			rc = oce_alloc_intr(sc, vector, oce_intr);
771 			if (rc)
772 				goto error;
773 		}
774 	}
775 
776 	return 0;
777 error:
778 	oce_intr_free(sc);
779 	return rc;
780 }
781 
782 
783 static int
784 oce_fast_isr(void *arg)
785 {
786 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
787 	POCE_SOFTC sc = ii->sc;
788 
789 	if (ii->eq == NULL)
790 		return FILTER_STRAY;
791 
792 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
793 
794 	taskqueue_enqueue(ii->tq, &ii->task);
795 
796  	ii->eq->intr++;
797 
798 	return FILTER_HANDLED;
799 }
800 
801 
802 static int
803 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
804 {
805 	POCE_INTR_INFO ii = &sc->intrs[vector];
806 	int rc = 0, rr;
807 
808 	if (vector >= OCE_MAX_EQ)
809 		return (EINVAL);
810 
811 	/* Set the resource id for the interrupt.
812 	 * MSIx is vector + 1 for the resource id,
813 	 * INTx is 0 for the resource id.
814 	 */
815 	if (sc->flags & OCE_FLAGS_USING_MSIX)
816 		rr = vector + 1;
817 	else
818 		rr = 0;
819 	ii->intr_res = bus_alloc_resource_any(sc->dev,
820 					      SYS_RES_IRQ,
821 					      &rr, RF_ACTIVE|RF_SHAREABLE);
822 	ii->irq_rr = rr;
823 	if (ii->intr_res == NULL) {
824 		device_printf(sc->dev,
825 			  "Could not allocate interrupt\n");
826 		rc = ENXIO;
827 		return rc;
828 	}
829 
830 	TASK_INIT(&ii->task, 0, isr, ii);
831 	ii->vector = vector;
832 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
833 	ii->tq = taskqueue_create_fast(ii->task_name,
834 			M_NOWAIT,
835 			taskqueue_thread_enqueue,
836 			&ii->tq);
837 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
838 			device_get_nameunit(sc->dev));
839 
840 	ii->sc = sc;
841 	rc = bus_setup_intr(sc->dev,
842 			ii->intr_res,
843 			INTR_TYPE_NET,
844 			oce_fast_isr, NULL, ii, &ii->tag);
845 	return rc;
846 
847 }
848 
849 
850 void
851 oce_intr_free(POCE_SOFTC sc)
852 {
853 	int i = 0;
854 
855 	for (i = 0; i < sc->intr_count; i++) {
856 
857 		if (sc->intrs[i].tag != NULL)
858 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
859 						sc->intrs[i].tag);
860 		if (sc->intrs[i].tq != NULL)
861 			taskqueue_free(sc->intrs[i].tq);
862 
863 		if (sc->intrs[i].intr_res != NULL)
864 			bus_release_resource(sc->dev, SYS_RES_IRQ,
865 						sc->intrs[i].irq_rr,
866 						sc->intrs[i].intr_res);
867 		sc->intrs[i].tag = NULL;
868 		sc->intrs[i].intr_res = NULL;
869 	}
870 
871 	if (sc->flags & OCE_FLAGS_USING_MSIX)
872 		pci_release_msi(sc->dev);
873 
874 }
875 
876 
877 
878 /******************************************************************************
879 *			  Media callbacks functions 			      *
880 ******************************************************************************/
881 
882 static void
883 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
884 {
885 	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
886 
887 
888 	req->ifm_status = IFM_AVALID;
889 	req->ifm_active = IFM_ETHER;
890 
891 	if (sc->link_status == 1)
892 		req->ifm_status |= IFM_ACTIVE;
893 	else
894 		return;
895 
896 	switch (sc->link_speed) {
897 	case 1: /* 10 Mbps */
898 		req->ifm_active |= IFM_10_T | IFM_FDX;
899 		sc->speed = 10;
900 		break;
901 	case 2: /* 100 Mbps */
902 		req->ifm_active |= IFM_100_TX | IFM_FDX;
903 		sc->speed = 100;
904 		break;
905 	case 3: /* 1 Gbps */
906 		req->ifm_active |= IFM_1000_T | IFM_FDX;
907 		sc->speed = 1000;
908 		break;
909 	case 4: /* 10 Gbps */
910 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
911 		sc->speed = 10000;
912 		break;
913 	case 5: /* 20 Gbps */
914 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
915 		sc->speed = 20000;
916 		break;
917 	case 6: /* 25 Gbps */
918 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
919 		sc->speed = 25000;
920 		break;
921 	case 7: /* 40 Gbps */
922 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
923 		sc->speed = 40000;
924 		break;
925 	default:
926 		sc->speed = 0;
927 		break;
928 	}
929 
930 	return;
931 }
932 
933 
934 int
935 oce_media_change(struct ifnet *ifp)
936 {
937 	return 0;
938 }
939 
940 
941 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
942 				struct mbuf *m, boolean_t *os2bmc,
943 				struct mbuf **m_new)
944 {
945 	struct ether_header *eh = NULL;
946 
947 	eh = mtod(m, struct ether_header *);
948 
949 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
950 		*os2bmc = FALSE;
951 		goto done;
952 	}
953 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
954 		goto done;
955 
956 	if (is_mc_allowed_on_bmc(sc, eh) ||
957 	    is_bc_allowed_on_bmc(sc, eh) ||
958 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
959 		*os2bmc = TRUE;
960 		goto done;
961 	}
962 
963 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
964 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
965 		uint8_t nexthdr = ip6->ip6_nxt;
966 		if (nexthdr == IPPROTO_ICMPV6) {
967 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
968 			switch (icmp6->icmp6_type) {
969 			case ND_ROUTER_ADVERT:
970 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
971 				goto done;
972 			case ND_NEIGHBOR_ADVERT:
973 				*os2bmc = is_ipv6_na_filt_enabled(sc);
974 				goto done;
975 			default:
976 				break;
977 			}
978 		}
979 	}
980 
981 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
982 		struct ip *ip = mtod(m, struct ip *);
983 		int iphlen = ip->ip_hl << 2;
984 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
985 		switch (uh->uh_dport) {
986 		case DHCP_CLIENT_PORT:
987 			*os2bmc = is_dhcp_client_filt_enabled(sc);
988 			goto done;
989 		case DHCP_SERVER_PORT:
990 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
991 			goto done;
992 		case NET_BIOS_PORT1:
993 		case NET_BIOS_PORT2:
994 			*os2bmc = is_nbios_filt_enabled(sc);
995 			goto done;
996 		case DHCPV6_RAS_PORT:
997 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
998 			goto done;
999 		default:
1000 			break;
1001 		}
1002 	}
1003 done:
1004 	if (*os2bmc) {
1005 		*m_new = m_dup(m, M_NOWAIT);
1006 		if (!*m_new) {
1007 			*os2bmc = FALSE;
1008 			return;
1009 		}
1010 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1011 	}
1012 }
1013 
1014 
1015 
1016 /*****************************************************************************
1017  *			  Transmit routines functions			     *
1018  *****************************************************************************/
1019 
1020 static int
1021 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1022 {
1023 	int rc = 0, i, retry_cnt = 0;
1024 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1025 	struct mbuf *m, *m_temp, *m_new = NULL;
1026 	struct oce_wq *wq = sc->wq[wq_index];
1027 	struct oce_packet_desc *pd;
1028 	struct oce_nic_hdr_wqe *nichdr;
1029 	struct oce_nic_frag_wqe *nicfrag;
1030 	struct ether_header *eh = NULL;
1031 	int num_wqes;
1032 	uint32_t reg_value;
1033 	boolean_t complete = TRUE;
1034 	boolean_t os2bmc = FALSE;
1035 
1036 	m = *mpp;
1037 	if (!m)
1038 		return EINVAL;
1039 
1040 	if (!(m->m_flags & M_PKTHDR)) {
1041 		rc = ENXIO;
1042 		goto free_ret;
1043 	}
1044 
1045 	/* Don't allow non-TSO packets longer than MTU */
1046 	if (!is_tso_pkt(m)) {
1047 		eh = mtod(m, struct ether_header *);
1048 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1049 			 goto free_ret;
1050 	}
1051 
1052 	if(oce_tx_asic_stall_verify(sc, m)) {
1053 		m = oce_insert_vlan_tag(sc, m, &complete);
1054 		if(!m) {
1055 			device_printf(sc->dev, "Insertion unsuccessful\n");
1056 			return 0;
1057 		}
1058 
1059 	}
1060 
1061 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1062 	 * may cause a transmit stall on that port. So the work-around is to
1063 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1064 	*/
1065 	if(IS_SH(sc) || IS_XE201(sc) ) {
1066 		if(m->m_pkthdr.len <= 32) {
1067 			char buf[36];
1068 			bzero((void *)buf, 36);
1069 			m_append(m, (36 - m->m_pkthdr.len), buf);
1070 		}
1071 	}
1072 
1073 tx_start:
1074 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1075 		/* consolidate packet buffers for TSO/LSO segment offload */
1076 #if defined(INET6) || defined(INET)
1077 		m = oce_tso_setup(sc, mpp);
1078 #else
1079 		m = NULL;
1080 #endif
1081 		if (m == NULL) {
1082 			rc = ENXIO;
1083 			goto free_ret;
1084 		}
1085 	}
1086 
1087 
1088 	pd = &wq->pckts[wq->pkt_desc_head];
1089 
1090 retry:
1091 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1092 				     pd->map,
1093 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1094 	if (rc == 0) {
1095 		num_wqes = pd->nsegs + 1;
1096 		if (IS_BE(sc) || IS_SH(sc)) {
1097 			/*Dummy required only for BE3.*/
1098 			if (num_wqes & 1)
1099 				num_wqes++;
1100 		}
1101 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1102 			bus_dmamap_unload(wq->tag, pd->map);
1103 			return EBUSY;
1104 		}
1105 		atomic_store_rel_int(&wq->pkt_desc_head,
1106 				     (wq->pkt_desc_head + 1) % \
1107 				      OCE_WQ_PACKET_ARRAY_SIZE);
1108 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1109 		pd->mbuf = m;
1110 
1111 		nichdr =
1112 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1113 		nichdr->u0.dw[0] = 0;
1114 		nichdr->u0.dw[1] = 0;
1115 		nichdr->u0.dw[2] = 0;
1116 		nichdr->u0.dw[3] = 0;
1117 
1118 		nichdr->u0.s.complete = complete;
1119 		nichdr->u0.s.mgmt = os2bmc;
1120 		nichdr->u0.s.event = 1;
1121 		nichdr->u0.s.crc = 1;
1122 		nichdr->u0.s.forward = 0;
1123 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1124 		nichdr->u0.s.udpcs =
1125 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1126 		nichdr->u0.s.tcpcs =
1127 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1128 		nichdr->u0.s.num_wqe = num_wqes;
1129 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1130 
1131 		if (m->m_flags & M_VLANTAG) {
1132 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1133 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1134 		}
1135 
1136 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1137 			if (m->m_pkthdr.tso_segsz) {
1138 				nichdr->u0.s.lso = 1;
1139 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1140 			}
1141 			if (!IS_BE(sc) || !IS_SH(sc))
1142 				nichdr->u0.s.ipcs = 1;
1143 		}
1144 
1145 		RING_PUT(wq->ring, 1);
1146 		atomic_add_int(&wq->ring->num_used, 1);
1147 
1148 		for (i = 0; i < pd->nsegs; i++) {
1149 			nicfrag =
1150 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1151 						      struct oce_nic_frag_wqe);
1152 			nicfrag->u0.s.rsvd0 = 0;
1153 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1154 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1155 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1156 			pd->wqe_idx = wq->ring->pidx;
1157 			RING_PUT(wq->ring, 1);
1158 			atomic_add_int(&wq->ring->num_used, 1);
1159 		}
1160 		if (num_wqes > (pd->nsegs + 1)) {
1161 			nicfrag =
1162 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1163 						      struct oce_nic_frag_wqe);
1164 			nicfrag->u0.dw[0] = 0;
1165 			nicfrag->u0.dw[1] = 0;
1166 			nicfrag->u0.dw[2] = 0;
1167 			nicfrag->u0.dw[3] = 0;
1168 			pd->wqe_idx = wq->ring->pidx;
1169 			RING_PUT(wq->ring, 1);
1170 			atomic_add_int(&wq->ring->num_used, 1);
1171 			pd->nsegs++;
1172 		}
1173 
1174 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1175 		wq->tx_stats.tx_reqs++;
1176 		wq->tx_stats.tx_wrbs += num_wqes;
1177 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1178 		wq->tx_stats.tx_pkts++;
1179 
1180 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1181 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1182 		reg_value = (num_wqes << 16) | wq->wq_id;
1183 
1184 		/* if os2bmc is not enabled or if the pkt is already tagged as
1185 		   bmc, do nothing
1186 		 */
1187 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1188 
1189 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1190 
1191 	} else if (rc == EFBIG)	{
1192 		if (retry_cnt == 0) {
1193 			m_temp = m_defrag(m, M_NOWAIT);
1194 			if (m_temp == NULL)
1195 				goto free_ret;
1196 			m = m_temp;
1197 			*mpp = m_temp;
1198 			retry_cnt = retry_cnt + 1;
1199 			goto retry;
1200 		} else
1201 			goto free_ret;
1202 	} else if (rc == ENOMEM)
1203 		return rc;
1204 	else
1205 		goto free_ret;
1206 
1207 	if (os2bmc) {
1208 		m = m_new;
1209 		goto tx_start;
1210 	}
1211 
1212 	return 0;
1213 
1214 free_ret:
1215 	m_freem(*mpp);
1216 	*mpp = NULL;
1217 	return rc;
1218 }
1219 
1220 
1221 static void
1222 oce_process_tx_completion(struct oce_wq *wq)
1223 {
1224 	struct oce_packet_desc *pd;
1225 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1226 	struct mbuf *m;
1227 
1228 	pd = &wq->pckts[wq->pkt_desc_tail];
1229 	atomic_store_rel_int(&wq->pkt_desc_tail,
1230 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1231 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1232 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1233 	bus_dmamap_unload(wq->tag, pd->map);
1234 
1235 	m = pd->mbuf;
1236 	m_freem(m);
1237 	pd->mbuf = NULL;
1238 
1239 
1240 	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1241 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1242 			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1243 			oce_tx_restart(sc, wq);
1244 		}
1245 	}
1246 }
1247 
1248 
1249 static void
1250 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1251 {
1252 
1253 	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1254 		return;
1255 
1256 #if __FreeBSD_version >= 800000
1257 	if (!drbr_empty(sc->ifp, wq->br))
1258 #else
1259 	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1260 #endif
1261 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1262 
1263 }
1264 
1265 
1266 #if defined(INET6) || defined(INET)
1267 static struct mbuf *
1268 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1269 {
1270 	struct mbuf *m;
1271 #ifdef INET
1272 	struct ip *ip;
1273 #endif
1274 #ifdef INET6
1275 	struct ip6_hdr *ip6;
1276 #endif
1277 	struct ether_vlan_header *eh;
1278 	struct tcphdr *th;
1279 	uint16_t etype;
1280 	int total_len = 0, ehdrlen = 0;
1281 
1282 	m = *mpp;
1283 
1284 	if (M_WRITABLE(m) == 0) {
1285 		m = m_dup(*mpp, M_NOWAIT);
1286 		if (!m)
1287 			return NULL;
1288 		m_freem(*mpp);
1289 		*mpp = m;
1290 	}
1291 
1292 	eh = mtod(m, struct ether_vlan_header *);
1293 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1294 		etype = ntohs(eh->evl_proto);
1295 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1296 	} else {
1297 		etype = ntohs(eh->evl_encap_proto);
1298 		ehdrlen = ETHER_HDR_LEN;
1299 	}
1300 
1301 	switch (etype) {
1302 #ifdef INET
1303 	case ETHERTYPE_IP:
1304 		ip = (struct ip *)(m->m_data + ehdrlen);
1305 		if (ip->ip_p != IPPROTO_TCP)
1306 			return NULL;
1307 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1308 
1309 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1310 		break;
1311 #endif
1312 #ifdef INET6
1313 	case ETHERTYPE_IPV6:
1314 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1315 		if (ip6->ip6_nxt != IPPROTO_TCP)
1316 			return NULL;
1317 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1318 
1319 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1320 		break;
1321 #endif
1322 	default:
1323 		return NULL;
1324 	}
1325 
1326 	m = m_pullup(m, total_len);
1327 	if (!m)
1328 		return NULL;
1329 	*mpp = m;
1330 	return m;
1331 
1332 }
1333 #endif /* INET6 || INET */
1334 
1335 void
1336 oce_tx_task(void *arg, int npending)
1337 {
1338 	struct oce_wq *wq = arg;
1339 	POCE_SOFTC sc = wq->parent;
1340 	struct ifnet *ifp = sc->ifp;
1341 	int rc = 0;
1342 
1343 #if __FreeBSD_version >= 800000
1344 	LOCK(&wq->tx_lock);
1345 	rc = oce_multiq_transmit(ifp, NULL, wq);
1346 	if (rc) {
1347 		device_printf(sc->dev,
1348 				"TX[%d] restart failed\n", wq->queue_index);
1349 	}
1350 	UNLOCK(&wq->tx_lock);
1351 #else
1352 	oce_start(ifp);
1353 #endif
1354 
1355 }
1356 
1357 
1358 void
1359 oce_start(struct ifnet *ifp)
1360 {
1361 	POCE_SOFTC sc = ifp->if_softc;
1362 	struct mbuf *m;
1363 	int rc = 0;
1364 	int def_q = 0; /* Defualt tx queue is 0*/
1365 
1366 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1367 			IFF_DRV_RUNNING)
1368 		return;
1369 
1370 	if (!sc->link_status)
1371 		return;
1372 
1373 	do {
1374 		IF_DEQUEUE(&sc->ifp->if_snd, m);
1375 		if (m == NULL)
1376 			break;
1377 
1378 		LOCK(&sc->wq[def_q]->tx_lock);
1379 		rc = oce_tx(sc, &m, def_q);
1380 		UNLOCK(&sc->wq[def_q]->tx_lock);
1381 		if (rc) {
1382 			if (m != NULL) {
1383 				sc->wq[def_q]->tx_stats.tx_stops ++;
1384 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1385 				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1386 				m = NULL;
1387 			}
1388 			break;
1389 		}
1390 		if (m != NULL)
1391 			ETHER_BPF_MTAP(ifp, m);
1392 
1393 	} while (TRUE);
1394 
1395 	return;
1396 }
1397 
1398 
1399 /* Handle the Completion Queue for transmit */
1400 uint16_t
1401 oce_wq_handler(void *arg)
1402 {
1403 	struct oce_wq *wq = (struct oce_wq *)arg;
1404 	POCE_SOFTC sc = wq->parent;
1405 	struct oce_cq *cq = wq->cq;
1406 	struct oce_nic_tx_cqe *cqe;
1407 	int num_cqes = 0;
1408 
1409 	LOCK(&wq->tx_compl_lock);
1410 	bus_dmamap_sync(cq->ring->dma.tag,
1411 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1412 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1413 	while (cqe->u0.dw[3]) {
1414 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1415 
1416 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1417 		if (wq->ring->cidx >= wq->ring->num_items)
1418 			wq->ring->cidx -= wq->ring->num_items;
1419 
1420 		oce_process_tx_completion(wq);
1421 		wq->tx_stats.tx_compl++;
1422 		cqe->u0.dw[3] = 0;
1423 		RING_GET(cq->ring, 1);
1424 		bus_dmamap_sync(cq->ring->dma.tag,
1425 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1426 		cqe =
1427 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1428 		num_cqes++;
1429 	}
1430 
1431 	if (num_cqes)
1432 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1433 
1434 	UNLOCK(&wq->tx_compl_lock);
1435 	return num_cqes;
1436 }
1437 
1438 
1439 static int
1440 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1441 {
1442 	POCE_SOFTC sc = ifp->if_softc;
1443 	int status = 0, queue_index = 0;
1444 	struct mbuf *next = NULL;
1445 	struct buf_ring *br = NULL;
1446 
1447 	br  = wq->br;
1448 	queue_index = wq->queue_index;
1449 
1450 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1451 		IFF_DRV_RUNNING) {
1452 		if (m != NULL)
1453 			status = drbr_enqueue(ifp, br, m);
1454 		return status;
1455 	}
1456 
1457 	if (m != NULL) {
1458 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1459 			return status;
1460 	}
1461 	while ((next = drbr_peek(ifp, br)) != NULL) {
1462 		if (oce_tx(sc, &next, queue_index)) {
1463 			if (next == NULL) {
1464 				drbr_advance(ifp, br);
1465 			} else {
1466 				drbr_putback(ifp, br, next);
1467 				wq->tx_stats.tx_stops ++;
1468 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1469 			}
1470 			break;
1471 		}
1472 		drbr_advance(ifp, br);
1473 		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1474 		if (next->m_flags & M_MCAST)
1475 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1476 		ETHER_BPF_MTAP(ifp, next);
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 
1483 
1484 
1485 /*****************************************************************************
1486  *			    Receive  routines functions 		     *
1487  *****************************************************************************/
1488 
1489 static void
1490 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1491 {
1492 	uint32_t *p;
1493         struct ether_header *eh = NULL;
1494         struct tcphdr *tcp_hdr = NULL;
1495         struct ip *ip4_hdr = NULL;
1496         struct ip6_hdr *ip6 = NULL;
1497         uint32_t payload_len = 0;
1498 
1499         eh = mtod(m, struct ether_header *);
1500         /* correct IP header */
1501         if(!cqe2->ipv6_frame) {
1502 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1503                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1504                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1505                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1506         }else {
1507         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1508                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1509                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1510                                                 - sizeof(struct ip6_hdr);
1511                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1512                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1513         }
1514 
1515         /* correct tcp header */
1516         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1517         if(cqe2->push) {
1518         	tcp_hdr->th_flags |= TH_PUSH;
1519         }
1520         tcp_hdr->th_win = htons(cqe2->tcp_window);
1521         tcp_hdr->th_sum = 0xffff;
1522         if(cqe2->ts_opt) {
1523                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1524                 *p = cqe1->tcp_timestamp_val;
1525                 *(p+1) = cqe1->tcp_timestamp_ecr;
1526         }
1527 
1528 	return;
1529 }
1530 
1531 static void
1532 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1533 {
1534 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1535         uint32_t i = 0, frag_len = 0;
1536 	uint32_t len = cqe_info->pkt_size;
1537         struct oce_packet_desc *pd;
1538         struct mbuf *tail = NULL;
1539 
1540         for (i = 0; i < cqe_info->num_frags; i++) {
1541                 if (rq->ring->cidx == rq->ring->pidx) {
1542                         device_printf(sc->dev,
1543                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1544                         return;
1545                 }
1546                 pd = &rq->pckts[rq->ring->cidx];
1547 
1548                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1549                 bus_dmamap_unload(rq->tag, pd->map);
1550 		RING_GET(rq->ring, 1);
1551                 rq->pending--;
1552 
1553                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1554                 pd->mbuf->m_len = frag_len;
1555 
1556                 if (tail != NULL) {
1557                         /* additional fragments */
1558                         pd->mbuf->m_flags &= ~M_PKTHDR;
1559                         tail->m_next = pd->mbuf;
1560 			if(rq->islro)
1561                         	tail->m_nextpkt = NULL;
1562                         tail = pd->mbuf;
1563                 } else {
1564                         /* first fragment, fill out much of the packet header */
1565                         pd->mbuf->m_pkthdr.len = len;
1566 			if(rq->islro)
1567                         	pd->mbuf->m_nextpkt = NULL;
1568                         pd->mbuf->m_pkthdr.csum_flags = 0;
1569                         if (IF_CSUM_ENABLED(sc)) {
1570                                 if (cqe_info->l4_cksum_pass) {
1571                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1572                                                 pd->mbuf->m_pkthdr.csum_flags |=
1573                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1574                                         }else { /* IPV6 frame */
1575 						if(rq->islro) {
1576                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1577                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1578 						}
1579                                         }
1580                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1581                                 }
1582                                 if (cqe_info->ip_cksum_pass) {
1583                                         pd->mbuf->m_pkthdr.csum_flags |=
1584                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1585                                 }
1586                         }
1587                         *m = tail = pd->mbuf;
1588                }
1589                 pd->mbuf = NULL;
1590                 len -= frag_len;
1591         }
1592 
1593         return;
1594 }
1595 
1596 static void
1597 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1598 {
1599         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1600         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1601         struct mbuf *m = NULL;
1602 	struct oce_common_cqe_info cq_info;
1603 
1604 	/* parse cqe */
1605         if(cqe2 == NULL) {
1606                 cq_info.pkt_size =  cqe->pkt_size;
1607                 cq_info.vtag = cqe->vlan_tag;
1608                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1609                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1610                 cq_info.ipv6_frame = cqe->ipv6_frame;
1611                 cq_info.vtp = cqe->vtp;
1612                 cq_info.qnq = cqe->qnq;
1613         }else {
1614                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1615                 cq_info.pkt_size =  cqe2->coalesced_size;
1616                 cq_info.vtag = cqe2->vlan_tag;
1617                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1618                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1619                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1620                 cq_info.vtp = cqe2->vtp;
1621                 cq_info.qnq = cqe1->qnq;
1622         }
1623 
1624 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1625 
1626         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1627         if(cq_info.pkt_size % rq->cfg.frag_size)
1628                 cq_info.num_frags++;
1629 
1630 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1631 
1632 	if (m) {
1633 		if(cqe2) {
1634 			//assert(cqe2->valid != 0);
1635 
1636 			//assert(cqe2->cqe_type != 2);
1637 			oce_correct_header(m, cqe1, cqe2);
1638 		}
1639 
1640 		m->m_pkthdr.rcvif = sc->ifp;
1641 #if __FreeBSD_version >= 800000
1642 		if (rq->queue_index)
1643 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1644 		else
1645 			m->m_pkthdr.flowid = rq->queue_index;
1646 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1647 #endif
1648 		/* This deternies if vlan tag is Valid */
1649 		if (cq_info.vtp) {
1650 			if (sc->function_mode & FNM_FLEX10_MODE) {
1651 				/* FLEX10. If QnQ is not set, neglect VLAN */
1652 				if (cq_info.qnq) {
1653 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1654 					m->m_flags |= M_VLANTAG;
1655 				}
1656 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1657 				/* In UMC mode generally pvid will be striped by
1658 				   hw. But in some cases we have seen it comes
1659 				   with pvid. So if pvid == vlan, neglect vlan.
1660 				 */
1661 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1662 				m->m_flags |= M_VLANTAG;
1663 			}
1664 		}
1665 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1666 
1667 		(*sc->ifp->if_input) (sc->ifp, m);
1668 
1669 		/* Update rx stats per queue */
1670 		rq->rx_stats.rx_pkts++;
1671 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1672 		rq->rx_stats.rx_frags += cq_info.num_frags;
1673 		rq->rx_stats.rx_ucast_pkts++;
1674 	}
1675         return;
1676 }
1677 
1678 static void
1679 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1680 {
1681 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1682 	int len;
1683 	struct mbuf *m = NULL;
1684 	struct oce_common_cqe_info cq_info;
1685 	uint16_t vtag = 0;
1686 
1687 	/* Is it a flush compl that has no data */
1688 	if(!cqe->u0.s.num_fragments)
1689 		goto exit;
1690 
1691 	len = cqe->u0.s.pkt_size;
1692 	if (!len) {
1693 		/*partial DMA workaround for Lancer*/
1694 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1695 		goto exit;
1696 	}
1697 
1698 	if (!oce_cqe_portid_valid(sc, cqe)) {
1699 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1700 		goto exit;
1701 	}
1702 
1703 	 /* Get vlan_tag value */
1704 	if(IS_BE(sc) || IS_SH(sc))
1705 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1706 	else
1707 		vtag = cqe->u0.s.vlan_tag;
1708 
1709 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1710 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1711 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1712 	cq_info.num_frags = cqe->u0.s.num_fragments;
1713 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1714 
1715 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1716 
1717 	if (m) {
1718 		m->m_pkthdr.rcvif = sc->ifp;
1719 #if __FreeBSD_version >= 800000
1720 		if (rq->queue_index)
1721 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1722 		else
1723 			m->m_pkthdr.flowid = rq->queue_index;
1724 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1725 #endif
1726 		/* This deternies if vlan tag is Valid */
1727 		if (oce_cqe_vtp_valid(sc, cqe)) {
1728 			if (sc->function_mode & FNM_FLEX10_MODE) {
1729 				/* FLEX10. If QnQ is not set, neglect VLAN */
1730 				if (cqe->u0.s.qnq) {
1731 					m->m_pkthdr.ether_vtag = vtag;
1732 					m->m_flags |= M_VLANTAG;
1733 				}
1734 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1735 				/* In UMC mode generally pvid will be striped by
1736 				   hw. But in some cases we have seen it comes
1737 				   with pvid. So if pvid == vlan, neglect vlan.
1738 				*/
1739 				m->m_pkthdr.ether_vtag = vtag;
1740 				m->m_flags |= M_VLANTAG;
1741 			}
1742 		}
1743 
1744 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1745 #if defined(INET6) || defined(INET)
1746 		/* Try to queue to LRO */
1747 		if (IF_LRO_ENABLED(sc) &&
1748 		    (cqe->u0.s.ip_cksum_pass) &&
1749 		    (cqe->u0.s.l4_cksum_pass) &&
1750 		    (!cqe->u0.s.ip_ver)       &&
1751 		    (rq->lro.lro_cnt != 0)) {
1752 
1753 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1754 				rq->lro_pkts_queued ++;
1755 				goto post_done;
1756 			}
1757 			/* If LRO posting fails then try to post to STACK */
1758 		}
1759 #endif
1760 
1761 		(*sc->ifp->if_input) (sc->ifp, m);
1762 #if defined(INET6) || defined(INET)
1763 post_done:
1764 #endif
1765 		/* Update rx stats per queue */
1766 		rq->rx_stats.rx_pkts++;
1767 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1768 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1769 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1770 			rq->rx_stats.rx_mcast_pkts++;
1771 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1772 			rq->rx_stats.rx_ucast_pkts++;
1773 	}
1774 exit:
1775 	return;
1776 }
1777 
1778 
1779 void
1780 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1781 {
1782 	uint32_t i = 0;
1783 	struct oce_packet_desc *pd;
1784 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1785 
1786 	for (i = 0; i < num_frags; i++) {
1787                 if (rq->ring->cidx == rq->ring->pidx) {
1788                         device_printf(sc->dev,
1789                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1790                         return;
1791                 }
1792                 pd = &rq->pckts[rq->ring->cidx];
1793                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1794                 bus_dmamap_unload(rq->tag, pd->map);
1795                 if (pd->mbuf != NULL) {
1796                         m_freem(pd->mbuf);
1797                         pd->mbuf = NULL;
1798                 }
1799 
1800 		RING_GET(rq->ring, 1);
1801                 rq->pending--;
1802 	}
1803 }
1804 
1805 
1806 static int
1807 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1808 {
1809 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1810 	int vtp = 0;
1811 
1812 	if (sc->be3_native) {
1813 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1814 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1815 	} else
1816 		vtp = cqe->u0.s.vlan_tag_present;
1817 
1818 	return vtp;
1819 
1820 }
1821 
1822 
1823 static int
1824 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1825 {
1826 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1827 	int port_id = 0;
1828 
1829 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1830 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1831 		port_id =  cqe_v1->u0.s.port;
1832 		if (sc->port_id != port_id)
1833 			return 0;
1834 	} else
1835 		;/* For BE3 legacy and Lancer this is dummy */
1836 
1837 	return 1;
1838 
1839 }
1840 
1841 #if defined(INET6) || defined(INET)
1842 void
1843 oce_rx_flush_lro(struct oce_rq *rq)
1844 {
1845 	struct lro_ctrl	*lro = &rq->lro;
1846 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1847 
1848 	if (!IF_LRO_ENABLED(sc))
1849 		return;
1850 
1851 	tcp_lro_flush_all(lro);
1852 	rq->lro_pkts_queued = 0;
1853 
1854 	return;
1855 }
1856 
1857 
1858 static int
1859 oce_init_lro(POCE_SOFTC sc)
1860 {
1861 	struct lro_ctrl *lro = NULL;
1862 	int i = 0, rc = 0;
1863 
1864 	for (i = 0; i < sc->nrqs; i++) {
1865 		lro = &sc->rq[i]->lro;
1866 		rc = tcp_lro_init(lro);
1867 		if (rc != 0) {
1868 			device_printf(sc->dev, "LRO init failed\n");
1869 			return rc;
1870 		}
1871 		lro->ifp = sc->ifp;
1872 	}
1873 
1874 	return rc;
1875 }
1876 
1877 
1878 void
1879 oce_free_lro(POCE_SOFTC sc)
1880 {
1881 	struct lro_ctrl *lro = NULL;
1882 	int i = 0;
1883 
1884 	for (i = 0; i < sc->nrqs; i++) {
1885 		lro = &sc->rq[i]->lro;
1886 		if (lro)
1887 			tcp_lro_free(lro);
1888 	}
1889 }
1890 #endif
1891 
1892 int
1893 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1894 {
1895 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1896 	int i, in, rc;
1897 	struct oce_packet_desc *pd;
1898 	bus_dma_segment_t segs[6];
1899 	int nsegs, added = 0;
1900 	struct oce_nic_rqe *rqe;
1901 	pd_rxulp_db_t rxdb_reg;
1902 	uint32_t val = 0;
1903 	uint32_t oce_max_rq_posts = 64;
1904 
1905 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1906 	for (i = 0; i < count; i++) {
1907 		in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
1908 
1909 		pd = &rq->pckts[rq->ring->pidx];
1910 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1911 		if (pd->mbuf == NULL) {
1912 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1913 			break;
1914 		}
1915 		pd->mbuf->m_nextpkt = NULL;
1916 
1917 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1918 
1919 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1920 					     pd->map,
1921 					     pd->mbuf,
1922 					     segs, &nsegs, BUS_DMA_NOWAIT);
1923 		if (rc) {
1924 			m_free(pd->mbuf);
1925 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1926 			break;
1927 		}
1928 
1929 		if (nsegs != 1) {
1930 			i--;
1931 			continue;
1932 		}
1933 
1934 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1935 
1936 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1937 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1938 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1939 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1940 		RING_PUT(rq->ring, 1);
1941 		added++;
1942 		rq->pending++;
1943 	}
1944 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1945 	if (added != 0) {
1946 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1947 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1948 			rxdb_reg.bits.qid = rq->rq_id;
1949 			if(rq->islro) {
1950                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1951                                 val |= oce_max_rq_posts << 16;
1952                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1953 			}else {
1954 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1955 			}
1956 			added -= oce_max_rq_posts;
1957 		}
1958 		if (added > 0) {
1959 			rxdb_reg.bits.qid = rq->rq_id;
1960 			rxdb_reg.bits.num_posted = added;
1961 			if(rq->islro) {
1962                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1963                                 val |= added << 16;
1964                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1965 			}else {
1966 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1967 			}
1968 		}
1969 	}
1970 
1971 	return 0;
1972 }
1973 
1974 static void
1975 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1976 {
1977         if (num_cqes) {
1978                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1979 		if(!sc->enable_hwlro) {
1980 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1981 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1982 		}else {
1983                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1984                         	oce_alloc_rx_bufs(rq, 64);
1985         	}
1986 	}
1987 
1988         return;
1989 }
1990 
1991 uint16_t
1992 oce_rq_handler_lro(void *arg)
1993 {
1994         struct oce_rq *rq = (struct oce_rq *)arg;
1995         struct oce_cq *cq = rq->cq;
1996         POCE_SOFTC sc = rq->parent;
1997         struct nic_hwlro_singleton_cqe *cqe;
1998         struct nic_hwlro_cqe_part2 *cqe2;
1999         int num_cqes = 0;
2000 
2001 	LOCK(&rq->rx_lock);
2002         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2003         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2004         while (cqe->valid) {
2005                 if(cqe->cqe_type == 0) { /* singleton cqe */
2006 			/* we should not get singleton cqe after cqe1 on same rq */
2007 			if(rq->cqe_firstpart != NULL) {
2008 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
2009 				goto exit_rq_handler_lro;
2010 			}
2011                         if(cqe->error != 0) {
2012                                 rq->rx_stats.rxcp_err++;
2013 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2014                         }
2015                         oce_rx_lro(rq, cqe, NULL);
2016                         rq->rx_stats.rx_compl++;
2017                         cqe->valid = 0;
2018                         RING_GET(cq->ring, 1);
2019                         num_cqes++;
2020                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2021                                 break;
2022                 }else if(cqe->cqe_type == 0x1) { /* first part */
2023 			/* we should not get cqe1 after cqe1 on same rq */
2024 			if(rq->cqe_firstpart != NULL) {
2025 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
2026 				goto exit_rq_handler_lro;
2027 			}
2028 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2029                         RING_GET(cq->ring, 1);
2030                 }else if(cqe->cqe_type == 0x2) { /* second part */
2031 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2032                         if(cqe2->error != 0) {
2033                                 rq->rx_stats.rxcp_err++;
2034 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2035                         }
2036 			/* We should not get cqe2 without cqe1 */
2037 			if(rq->cqe_firstpart == NULL) {
2038 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2039 				goto exit_rq_handler_lro;
2040 			}
2041                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2042 
2043                         rq->rx_stats.rx_compl++;
2044                         rq->cqe_firstpart->valid = 0;
2045                         cqe2->valid = 0;
2046 			rq->cqe_firstpart = NULL;
2047 
2048                         RING_GET(cq->ring, 1);
2049                         num_cqes += 2;
2050                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2051                                 break;
2052 		}
2053 
2054                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2055                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2056         }
2057 	oce_check_rx_bufs(sc, num_cqes, rq);
2058 exit_rq_handler_lro:
2059 	UNLOCK(&rq->rx_lock);
2060 	return 0;
2061 }
2062 
2063 /* Handle the Completion Queue for receive */
2064 uint16_t
2065 oce_rq_handler(void *arg)
2066 {
2067 	struct oce_rq *rq = (struct oce_rq *)arg;
2068 	struct oce_cq *cq = rq->cq;
2069 	POCE_SOFTC sc = rq->parent;
2070 	struct oce_nic_rx_cqe *cqe;
2071 	int num_cqes = 0;
2072 
2073 	if(rq->islro) {
2074 		oce_rq_handler_lro(arg);
2075 		return 0;
2076 	}
2077 	LOCK(&rq->rx_lock);
2078 	bus_dmamap_sync(cq->ring->dma.tag,
2079 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2080 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2081 	while (cqe->u0.dw[2]) {
2082 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2083 
2084 		if (cqe->u0.s.error == 0) {
2085 			oce_rx(rq, cqe);
2086 		} else {
2087 			rq->rx_stats.rxcp_err++;
2088 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2089 			/* Post L3/L4 errors to stack.*/
2090 			oce_rx(rq, cqe);
2091 		}
2092 		rq->rx_stats.rx_compl++;
2093 		cqe->u0.dw[2] = 0;
2094 
2095 #if defined(INET6) || defined(INET)
2096 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2097 			oce_rx_flush_lro(rq);
2098 		}
2099 #endif
2100 
2101 		RING_GET(cq->ring, 1);
2102 		bus_dmamap_sync(cq->ring->dma.tag,
2103 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2104 		cqe =
2105 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2106 		num_cqes++;
2107 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2108 			break;
2109 	}
2110 
2111 #if defined(INET6) || defined(INET)
2112         if (IF_LRO_ENABLED(sc))
2113                 oce_rx_flush_lro(rq);
2114 #endif
2115 
2116 	oce_check_rx_bufs(sc, num_cqes, rq);
2117 	UNLOCK(&rq->rx_lock);
2118 	return 0;
2119 
2120 }
2121 
2122 
2123 
2124 
2125 /*****************************************************************************
2126  *		   Helper function prototypes in this file 		     *
2127  *****************************************************************************/
2128 
2129 static int
2130 oce_attach_ifp(POCE_SOFTC sc)
2131 {
2132 
2133 	sc->ifp = if_alloc(IFT_ETHER);
2134 	if (!sc->ifp)
2135 		return ENOMEM;
2136 
2137 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2138 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2139 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2140 
2141 	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
2142 	sc->ifp->if_ioctl = oce_ioctl;
2143 	sc->ifp->if_start = oce_start;
2144 	sc->ifp->if_init = oce_init;
2145 	sc->ifp->if_mtu = ETHERMTU;
2146 	sc->ifp->if_softc = sc;
2147 #if __FreeBSD_version >= 800000
2148 	sc->ifp->if_transmit = oce_multiq_start;
2149 	sc->ifp->if_qflush = oce_multiq_flush;
2150 #endif
2151 
2152 	if_initname(sc->ifp,
2153 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2154 
2155 	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
2156 	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
2157 	IFQ_SET_READY(&sc->ifp->if_snd);
2158 
2159 	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
2160 	sc->ifp->if_hwassist |= CSUM_TSO;
2161 	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
2162 
2163 	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
2164 	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
2165 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2166 
2167 #if defined(INET6) || defined(INET)
2168 	sc->ifp->if_capabilities |= IFCAP_TSO;
2169 	sc->ifp->if_capabilities |= IFCAP_LRO;
2170 	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2171 #endif
2172 
2173 	sc->ifp->if_capenable = sc->ifp->if_capabilities;
2174 	sc->ifp->if_baudrate = IF_Gbps(10);
2175 
2176 #if __FreeBSD_version >= 1000000
2177 	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2178 	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
2179 	sc->ifp->if_hw_tsomaxsegsize = 4096;
2180 #endif
2181 
2182 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2183 
2184 	return 0;
2185 }
2186 
2187 
2188 static void
2189 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2190 {
2191 	POCE_SOFTC sc = ifp->if_softc;
2192 
2193 	if (ifp->if_softc !=  arg)
2194 		return;
2195 	if ((vtag == 0) || (vtag > 4095))
2196 		return;
2197 
2198 	sc->vlan_tag[vtag] = 1;
2199 	sc->vlans_added++;
2200 	if (sc->vlans_added <= (sc->max_vlans + 1))
2201 		oce_vid_config(sc);
2202 }
2203 
2204 
2205 static void
2206 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
2207 {
2208 	POCE_SOFTC sc = ifp->if_softc;
2209 
2210 	if (ifp->if_softc !=  arg)
2211 		return;
2212 	if ((vtag == 0) || (vtag > 4095))
2213 		return;
2214 
2215 	sc->vlan_tag[vtag] = 0;
2216 	sc->vlans_added--;
2217 	oce_vid_config(sc);
2218 }
2219 
2220 
2221 /*
2222  * A max of 64 vlans can be configured in BE. If the user configures
2223  * more, place the card in vlan promiscuous mode.
2224  */
2225 static int
2226 oce_vid_config(POCE_SOFTC sc)
2227 {
2228 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2229 	uint16_t ntags = 0, i;
2230 	int status = 0;
2231 
2232 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2233 			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
2234 		for (i = 0; i < MAX_VLANS; i++) {
2235 			if (sc->vlan_tag[i]) {
2236 				vtags[ntags].vtag = i;
2237 				ntags++;
2238 			}
2239 		}
2240 		if (ntags)
2241 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2242 						vtags, ntags, 1, 0);
2243 	} else
2244 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2245 					 	NULL, 0, 1, 1);
2246 	return status;
2247 }
2248 
2249 
2250 static void
2251 oce_mac_addr_set(POCE_SOFTC sc)
2252 {
2253 	uint32_t old_pmac_id = sc->pmac_id;
2254 	int status = 0;
2255 
2256 
2257 	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2258 			 sc->macaddr.size_of_struct);
2259 	if (!status)
2260 		return;
2261 
2262 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
2263 					sc->if_id, &sc->pmac_id);
2264 	if (!status) {
2265 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2266 		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
2267 				 sc->macaddr.size_of_struct);
2268 	}
2269 	if (status)
2270 		device_printf(sc->dev, "Failed update macaddress\n");
2271 
2272 }
2273 
2274 
2275 static int
2276 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
2277 {
2278 	POCE_SOFTC sc = ifp->if_softc;
2279 	struct ifreq *ifr = (struct ifreq *)data;
2280 	int rc = ENXIO;
2281 	char cookie[32] = {0};
2282 	void *priv_data = ifr_data_get_ptr(ifr);
2283 	void *ioctl_ptr;
2284 	uint32_t req_size;
2285 	struct mbx_hdr req;
2286 	OCE_DMA_MEM dma_mem;
2287 	struct mbx_common_get_cntl_attr *fw_cmd;
2288 
2289 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2290 		return EFAULT;
2291 
2292 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2293 		return EINVAL;
2294 
2295 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2296 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2297 		return EFAULT;
2298 
2299 	req_size = le32toh(req.u0.req.request_length);
2300 	if (req_size > 65536)
2301 		return EINVAL;
2302 
2303 	req_size += sizeof(struct mbx_hdr);
2304 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2305 	if (rc)
2306 		return ENOMEM;
2307 
2308 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2309 		rc = EFAULT;
2310 		goto dma_free;
2311 	}
2312 
2313 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2314 	if (rc) {
2315 		rc = EIO;
2316 		goto dma_free;
2317 	}
2318 
2319 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
2320 		rc =  EFAULT;
2321 
2322 	/*
2323 	   firmware is filling all the attributes for this ioctl except
2324 	   the driver version..so fill it
2325 	 */
2326 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2327 		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
2328 		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2329 			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
2330 	}
2331 
2332 dma_free:
2333 	oce_dma_free(sc, &dma_mem);
2334 	return rc;
2335 
2336 }
2337 
2338 static void
2339 oce_eqd_set_periodic(POCE_SOFTC sc)
2340 {
2341 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2342 	struct oce_aic_obj *aic;
2343 	struct oce_eq *eqo;
2344 	uint64_t now = 0, delta;
2345 	int eqd, i, num = 0;
2346 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2347 	struct oce_wq *wq;
2348 	struct oce_rq *rq;
2349 
2350 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2351 
2352 	for (i = 0 ; i < sc->neqs; i++) {
2353 		eqo = sc->eq[i];
2354 		aic = &sc->aic_obj[i];
2355 		/* When setting the static eq delay from the user space */
2356 		if (!aic->enable) {
2357 			if (aic->ticks)
2358 				aic->ticks = 0;
2359 			eqd = aic->et_eqd;
2360 			goto modify_eqd;
2361 		}
2362 
2363 		rq = sc->rq[i];
2364 		rxpkts = rq->rx_stats.rx_pkts;
2365 		wq = sc->wq[i];
2366 		tx_reqs = wq->tx_stats.tx_reqs;
2367 		now = ticks;
2368 
2369 		if (!aic->ticks || now < aic->ticks ||
2370 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2371 			aic->prev_rxpkts = rxpkts;
2372 			aic->prev_txreqs = tx_reqs;
2373 			aic->ticks = now;
2374 			continue;
2375 		}
2376 
2377 		delta = ticks_to_msecs(now - aic->ticks);
2378 
2379 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2380 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2381 		eqd = (pps / 15000) << 2;
2382 		if (eqd < 8)
2383 			eqd = 0;
2384 
2385 		/* Make sure that the eq delay is in the known range */
2386 		eqd = min(eqd, aic->max_eqd);
2387 		eqd = max(eqd, aic->min_eqd);
2388 
2389 		aic->prev_rxpkts = rxpkts;
2390 		aic->prev_txreqs = tx_reqs;
2391 		aic->ticks = now;
2392 
2393 modify_eqd:
2394 		if (eqd != aic->cur_eqd) {
2395 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2396 			set_eqd[num].eq_id = eqo->eq_id;
2397 			aic->cur_eqd = eqd;
2398 			num++;
2399 		}
2400 	}
2401 
2402 	/* Is there atleast one eq that needs to be modified? */
2403         for(i = 0; i < num; i += 8) {
2404                 if((num - i) >=8 )
2405                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2406                 else
2407                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2408         }
2409 
2410 }
2411 
2412 static void oce_detect_hw_error(POCE_SOFTC sc)
2413 {
2414 
2415 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2416 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2417 	uint32_t i;
2418 
2419 	if (sc->hw_error)
2420 		return;
2421 
2422 	if (IS_XE201(sc)) {
2423 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2424 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2425 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2426 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2427 		}
2428 	} else {
2429 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2430 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2431 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2432 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2433 
2434 		ue_low = (ue_low & ~ue_low_mask);
2435 		ue_high = (ue_high & ~ue_high_mask);
2436 	}
2437 
2438 	/* On certain platforms BE hardware can indicate spurious UEs.
2439 	 * Allow the h/w to stop working completely in case of a real UE.
2440 	 * Hence not setting the hw_error for UE detection.
2441 	 */
2442 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2443 		sc->hw_error = TRUE;
2444 		device_printf(sc->dev, "Error detected in the card\n");
2445 	}
2446 
2447 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2448 		device_printf(sc->dev,
2449 				"ERR: sliport status 0x%x\n", sliport_status);
2450 		device_printf(sc->dev,
2451 				"ERR: sliport error1 0x%x\n", sliport_err1);
2452 		device_printf(sc->dev,
2453 				"ERR: sliport error2 0x%x\n", sliport_err2);
2454 	}
2455 
2456 	if (ue_low) {
2457 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2458 			if (ue_low & 1)
2459 				device_printf(sc->dev, "UE: %s bit set\n",
2460 							ue_status_low_desc[i]);
2461 		}
2462 	}
2463 
2464 	if (ue_high) {
2465 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2466 			if (ue_high & 1)
2467 				device_printf(sc->dev, "UE: %s bit set\n",
2468 							ue_status_hi_desc[i]);
2469 		}
2470 	}
2471 
2472 }
2473 
2474 
2475 static void
2476 oce_local_timer(void *arg)
2477 {
2478 	POCE_SOFTC sc = arg;
2479 	int i = 0;
2480 
2481 	oce_detect_hw_error(sc);
2482 	oce_refresh_nic_stats(sc);
2483 	oce_refresh_queue_stats(sc);
2484 	oce_mac_addr_set(sc);
2485 
2486 	/* TX Watch Dog*/
2487 	for (i = 0; i < sc->nwqs; i++)
2488 		oce_tx_restart(sc, sc->wq[i]);
2489 
2490 	/* calculate and set the eq delay for optimal interrupt rate */
2491 	if (IS_BE(sc) || IS_SH(sc))
2492 		oce_eqd_set_periodic(sc);
2493 
2494 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2495 }
2496 
2497 static void
2498 oce_tx_compl_clean(POCE_SOFTC sc)
2499 {
2500 	struct oce_wq *wq;
2501 	int i = 0, timeo = 0, num_wqes = 0;
2502 	int pending_txqs = sc->nwqs;
2503 
2504 	/* Stop polling for compls when HW has been silent for 10ms or
2505 	 * hw_error or no outstanding completions expected
2506 	 */
2507 	do {
2508 		pending_txqs = sc->nwqs;
2509 
2510 		for_all_wq_queues(sc, wq, i) {
2511 			num_wqes = oce_wq_handler(wq);
2512 
2513 			if(num_wqes)
2514 				timeo = 0;
2515 
2516 			if(!wq->ring->num_used)
2517 				pending_txqs--;
2518 		}
2519 
2520 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2521 			break;
2522 
2523 		DELAY(1000);
2524 	} while (TRUE);
2525 
2526 	for_all_wq_queues(sc, wq, i) {
2527 		while(wq->ring->num_used) {
2528 			LOCK(&wq->tx_compl_lock);
2529 			oce_process_tx_completion(wq);
2530 			UNLOCK(&wq->tx_compl_lock);
2531 		}
2532 	}
2533 
2534 }
2535 
2536 /* NOTE : This should only be called holding
2537  *        DEVICE_LOCK.
2538  */
2539 static void
2540 oce_if_deactivate(POCE_SOFTC sc)
2541 {
2542 	int i;
2543 	struct oce_rq *rq;
2544 	struct oce_wq *wq;
2545 	struct oce_eq *eq;
2546 
2547 	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2548 
2549 	oce_tx_compl_clean(sc);
2550 
2551 	/* Stop intrs and finish any bottom halves pending */
2552 	oce_hw_intr_disable(sc);
2553 
2554 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2555 	   any other lock. So unlock device lock and require after
2556 	   completing taskqueue_drain.
2557 	*/
2558 	UNLOCK(&sc->dev_lock);
2559 	for (i = 0; i < sc->intr_count; i++) {
2560 		if (sc->intrs[i].tq != NULL) {
2561 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2562 		}
2563 	}
2564 	LOCK(&sc->dev_lock);
2565 
2566 	/* Delete RX queue in card with flush param */
2567 	oce_stop_rx(sc);
2568 
2569 	/* Invalidate any pending cq and eq entries*/
2570 	for_all_evnt_queues(sc, eq, i)
2571 		oce_drain_eq(eq);
2572 	for_all_rq_queues(sc, rq, i)
2573 		oce_drain_rq_cq(rq);
2574 	for_all_wq_queues(sc, wq, i)
2575 		oce_drain_wq_cq(wq);
2576 
2577 	/* But still we need to get MCC aync events.
2578 	   So enable intrs and also arm first EQ
2579 	*/
2580 	oce_hw_intr_enable(sc);
2581 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2582 
2583 	DELAY(10);
2584 }
2585 
2586 
2587 static void
2588 oce_if_activate(POCE_SOFTC sc)
2589 {
2590 	struct oce_eq *eq;
2591 	struct oce_rq *rq;
2592 	struct oce_wq *wq;
2593 	int i, rc = 0;
2594 
2595 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2596 
2597 	oce_hw_intr_disable(sc);
2598 
2599 	oce_start_rx(sc);
2600 
2601 	for_all_rq_queues(sc, rq, i) {
2602 		rc = oce_start_rq(rq);
2603 		if (rc)
2604 			device_printf(sc->dev, "Unable to start RX\n");
2605 	}
2606 
2607 	for_all_wq_queues(sc, wq, i) {
2608 		rc = oce_start_wq(wq);
2609 		if (rc)
2610 			device_printf(sc->dev, "Unable to start TX\n");
2611 	}
2612 
2613 
2614 	for_all_evnt_queues(sc, eq, i)
2615 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2616 
2617 	oce_hw_intr_enable(sc);
2618 
2619 }
2620 
2621 static void
2622 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2623 {
2624 	/* Update Link status */
2625 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2626 	     ASYNC_EVENT_LINK_UP) {
2627 		sc->link_status = ASYNC_EVENT_LINK_UP;
2628 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2629 	} else {
2630 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2631 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2632 	}
2633 }
2634 
2635 
2636 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2637 					 struct oce_async_evt_grp5_os2bmc *evt)
2638 {
2639 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2640 	if (evt->u.s.mgmt_enable)
2641 		sc->flags |= OCE_FLAGS_OS2BMC;
2642 	else
2643 		return;
2644 
2645 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2646 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2647 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2648 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2649 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2650 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2651 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2652 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2653 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2654 }
2655 
2656 
2657 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2658 {
2659 	struct oce_async_event_grp5_pvid_state *gcqe;
2660 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2661 
2662 	switch (cqe->u0.s.async_type) {
2663 	case ASYNC_EVENT_PVID_STATE:
2664 		/* GRP5 PVID */
2665 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2666 		if (gcqe->enabled)
2667 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2668 		else
2669 			sc->pvid = 0;
2670 		break;
2671 	case ASYNC_EVENT_OS2BMC:
2672 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2673 		oce_async_grp5_osbmc_process(sc, bmccqe);
2674 		break;
2675 	default:
2676 		break;
2677 	}
2678 }
2679 
2680 /* Handle the Completion Queue for the Mailbox/Async notifications */
2681 uint16_t
2682 oce_mq_handler(void *arg)
2683 {
2684 	struct oce_mq *mq = (struct oce_mq *)arg;
2685 	POCE_SOFTC sc = mq->parent;
2686 	struct oce_cq *cq = mq->cq;
2687 	int num_cqes = 0, evt_type = 0, optype = 0;
2688 	struct oce_mq_cqe *cqe;
2689 	struct oce_async_cqe_link_state *acqe;
2690 	struct oce_async_event_qnq *dbgcqe;
2691 
2692 
2693 	bus_dmamap_sync(cq->ring->dma.tag,
2694 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2695 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2696 
2697 	while (cqe->u0.dw[3]) {
2698 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2699 		if (cqe->u0.s.async_event) {
2700 			evt_type = cqe->u0.s.event_type;
2701 			optype = cqe->u0.s.async_type;
2702 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2703 				/* Link status evt */
2704 				acqe = (struct oce_async_cqe_link_state *)cqe;
2705 				process_link_state(sc, acqe);
2706 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2707 				oce_process_grp5_events(sc, cqe);
2708 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2709 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2710 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2711 				if(dbgcqe->valid)
2712 					sc->qnqid = dbgcqe->vlan_tag;
2713 				sc->qnq_debug_event = TRUE;
2714 			}
2715 		}
2716 		cqe->u0.dw[3] = 0;
2717 		RING_GET(cq->ring, 1);
2718 		bus_dmamap_sync(cq->ring->dma.tag,
2719 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2720 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2721 		num_cqes++;
2722 	}
2723 
2724 	if (num_cqes)
2725 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2726 
2727 	return 0;
2728 }
2729 
2730 
2731 static void
2732 setup_max_queues_want(POCE_SOFTC sc)
2733 {
2734 	/* Check if it is FLEX machine. Is so dont use RSS */
2735 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2736 	    (sc->function_mode & FNM_UMC_MODE)    ||
2737 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2738 	    (!is_rss_enabled(sc))		  ||
2739 	    IS_BE2(sc)) {
2740 		sc->nrqs = 1;
2741 		sc->nwqs = 1;
2742 	} else {
2743 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2744 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2745 	}
2746 
2747 	if (IS_BE2(sc) && is_rss_enabled(sc))
2748 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2749 }
2750 
2751 
2752 static void
2753 update_queues_got(POCE_SOFTC sc)
2754 {
2755 	if (is_rss_enabled(sc)) {
2756 		sc->nrqs = sc->intr_count + 1;
2757 		sc->nwqs = sc->intr_count;
2758 	} else {
2759 		sc->nrqs = 1;
2760 		sc->nwqs = 1;
2761 	}
2762 
2763 	if (IS_BE2(sc))
2764 		sc->nwqs = 1;
2765 }
2766 
2767 static int
2768 oce_check_ipv6_ext_hdr(struct mbuf *m)
2769 {
2770 	struct ether_header *eh = mtod(m, struct ether_header *);
2771 	caddr_t m_datatemp = m->m_data;
2772 
2773 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2774 		m->m_data += sizeof(struct ether_header);
2775 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2776 
2777 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2778 				(ip6->ip6_nxt != IPPROTO_UDP)){
2779 			struct ip6_ext *ip6e = NULL;
2780 			m->m_data += sizeof(struct ip6_hdr);
2781 
2782 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2783 			if(ip6e->ip6e_len == 0xff) {
2784 				m->m_data = m_datatemp;
2785 				return TRUE;
2786 			}
2787 		}
2788 		m->m_data = m_datatemp;
2789 	}
2790 	return FALSE;
2791 }
2792 
2793 static int
2794 is_be3_a1(POCE_SOFTC sc)
2795 {
2796 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2797 		return TRUE;
2798 	}
2799 	return FALSE;
2800 }
2801 
2802 static struct mbuf *
2803 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2804 {
2805 	uint16_t vlan_tag = 0;
2806 
2807 	if(!M_WRITABLE(m))
2808 		return NULL;
2809 
2810 	/* Embed vlan tag in the packet if it is not part of it */
2811 	if(m->m_flags & M_VLANTAG) {
2812 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2813 		m->m_flags &= ~M_VLANTAG;
2814 	}
2815 
2816 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2817 	if(sc->pvid) {
2818 		if(!vlan_tag)
2819 			vlan_tag = sc->pvid;
2820 		if (complete)
2821 			*complete = FALSE;
2822 	}
2823 
2824 	if(vlan_tag) {
2825 		m = ether_vlanencap(m, vlan_tag);
2826 	}
2827 
2828 	if(sc->qnqid) {
2829 		m = ether_vlanencap(m, sc->qnqid);
2830 
2831 		if (complete)
2832 			*complete = FALSE;
2833 	}
2834 	return m;
2835 }
2836 
2837 static int
2838 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2839 {
2840 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2841 			oce_check_ipv6_ext_hdr(m)) {
2842 		return TRUE;
2843 	}
2844 	return FALSE;
2845 }
2846 
2847 static void
2848 oce_get_config(POCE_SOFTC sc)
2849 {
2850 	int rc = 0;
2851 	uint32_t max_rss = 0;
2852 
2853 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2854 		max_rss = OCE_LEGACY_MODE_RSS;
2855 	else
2856 		max_rss = OCE_MAX_RSS;
2857 
2858 	if (!IS_BE(sc)) {
2859 		rc = oce_get_profile_config(sc, max_rss);
2860 		if (rc) {
2861 			sc->nwqs = OCE_MAX_WQ;
2862 			sc->nrssqs = max_rss;
2863 			sc->nrqs = sc->nrssqs + 1;
2864 		}
2865 	}
2866 	else { /* For BE3 don't rely on fw for determining the resources */
2867 		sc->nrssqs = max_rss;
2868 		sc->nrqs = sc->nrssqs + 1;
2869 		sc->nwqs = OCE_MAX_WQ;
2870 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2871 	}
2872 }
2873 
2874 static void
2875 oce_rdma_close(void)
2876 {
2877   if (oce_rdma_if != NULL) {
2878     oce_rdma_if = NULL;
2879   }
2880 }
2881 
2882 static void
2883 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2884 {
2885   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2886 }
2887 
2888 int
2889 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2890 {
2891   POCE_SOFTC sc;
2892   struct oce_dev_info di;
2893   int i;
2894 
2895   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2896     return -EINVAL;
2897   }
2898 
2899   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2900       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2901     return -ENXIO;
2902   }
2903 
2904   rdma_info->close = oce_rdma_close;
2905   rdma_info->mbox_post = oce_mbox_post;
2906   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2907   rdma_info->get_mac_addr = oce_get_mac_addr;
2908 
2909   oce_rdma_if = rdma_if;
2910 
2911   sc = softc_head;
2912   while (sc != NULL) {
2913     if (oce_rdma_if->announce != NULL) {
2914       memset(&di, 0, sizeof(di));
2915       di.dev = sc->dev;
2916       di.softc = sc;
2917       di.ifp = sc->ifp;
2918       di.db_bhandle = sc->db_bhandle;
2919       di.db_btag = sc->db_btag;
2920       di.db_page_size = 4096;
2921       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2922         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2923       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2924         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2925       } else {
2926         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2927       }
2928       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2929       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2930         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2931         di.msix.start_vector = sc->intr_count;
2932         for (i=0; i<di.msix.num_vectors; i++) {
2933           di.msix.vector_list[i] = sc->intrs[i].vector;
2934         }
2935       } else {
2936       }
2937       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2938       di.vendor_id = pci_get_vendor(sc->dev);
2939       di.dev_id = pci_get_device(sc->dev);
2940 
2941       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2942           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2943       }
2944 
2945       rdma_if->announce(&di);
2946       sc = sc->next;
2947     }
2948   }
2949 
2950   return 0;
2951 }
2952 
2953 static void
2954 oce_read_env_variables( POCE_SOFTC sc )
2955 {
2956 	char *value = NULL;
2957 	int rc = 0;
2958 
2959         /* read if user wants to enable hwlro or swlro */
2960         //value = getenv("oce_enable_hwlro");
2961         if(value && IS_SH(sc)) {
2962                 sc->enable_hwlro = strtol(value, NULL, 10);
2963                 if(sc->enable_hwlro) {
2964                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2965                         if(rc) {
2966                                 device_printf(sc->dev, "no hardware lro support\n");
2967                 		device_printf(sc->dev, "software lro enabled\n");
2968                                 sc->enable_hwlro = 0;
2969                         }else {
2970                                 device_printf(sc->dev, "hardware lro enabled\n");
2971 				oce_max_rsp_handled = 32;
2972                         }
2973                 }else {
2974                         device_printf(sc->dev, "software lro enabled\n");
2975                 }
2976         }else {
2977                 sc->enable_hwlro = 0;
2978         }
2979 
2980         /* read mbuf size */
2981         //value = getenv("oce_rq_buf_size");
2982         if(value && IS_SH(sc)) {
2983                 oce_rq_buf_size = strtol(value, NULL, 10);
2984                 switch(oce_rq_buf_size) {
2985                 case 2048:
2986                 case 4096:
2987                 case 9216:
2988                 case 16384:
2989                         break;
2990 
2991                 default:
2992                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
2993                         oce_rq_buf_size = 2048;
2994                 }
2995         }
2996 
2997 	return;
2998 }
2999