xref: /freebsd/sys/dev/oce/oce_if.c (revision 0fc7bdc978366abb4351b0b76b50a5848cc5d982)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 2013 Emulex
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  *    this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the Emulex Corporation nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Contact Information:
34  * freebsd-drivers@emulex.com
35  *
36  * Emulex
37  * 3333 Susan Street
38  * Costa Mesa, CA 92626
39  */
40 
41 
42 #include "opt_inet6.h"
43 #include "opt_inet.h"
44 
45 #include "oce_if.h"
46 #include "oce_user.h"
47 
48 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
49 
50 /* UE Status Low CSR */
51 static char *ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 
86 /* UE Status High CSR */
87 static char *ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121 
122 struct oce_common_cqe_info{
123         uint8_t vtp:1;
124         uint8_t l4_cksum_pass:1;
125         uint8_t ip_cksum_pass:1;
126         uint8_t ipv6_frame:1;
127         uint8_t qnq:1;
128         uint8_t rsvd:3;
129         uint8_t num_frags;
130         uint16_t pkt_size;
131         uint16_t vtag;
132 };
133 
134 /* Driver entry points prototypes */
135 static int  oce_probe(device_t dev);
136 static int  oce_attach(device_t dev);
137 static int  oce_detach(device_t dev);
138 static int  oce_shutdown(device_t dev);
139 static int  oce_ioctl(if_t ifp, u_long command, caddr_t data);
140 static void oce_init(void *xsc);
141 static int  oce_multiq_start(if_t ifp, struct mbuf *m);
142 static void oce_multiq_flush(if_t ifp);
143 
144 /* Driver interrupt routines protypes */
145 static void oce_intr(void *arg, int pending);
146 static int  oce_setup_intr(POCE_SOFTC sc);
147 static int  oce_fast_isr(void *arg);
148 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
149 			  void (*isr) (void *arg, int pending));
150 
151 /* Media callbacks prototypes */
152 static void oce_media_status(if_t ifp, struct ifmediareq *req);
153 static int  oce_media_change(if_t ifp);
154 
155 /* Transmit routines prototypes */
156 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
157 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
158 static void oce_process_tx_completion(struct oce_wq *wq);
159 static int  oce_multiq_transmit(if_t ifp, struct mbuf *m,
160 				 struct oce_wq *wq);
161 
162 /* Receive routines prototypes */
163 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
164 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
166 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
167 static uint16_t oce_rq_handler_lro(void *arg);
168 static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
171 
172 /* Helper function prototypes in this file */
173 static void oce_attach_ifp(POCE_SOFTC sc);
174 static void oce_add_vlan(void *arg, if_t ifp, uint16_t vtag);
175 static void oce_del_vlan(void *arg, if_t ifp, uint16_t vtag);
176 static int  oce_vid_config(POCE_SOFTC sc);
177 static void oce_mac_addr_set(POCE_SOFTC sc);
178 static int  oce_handle_passthrough(if_t ifp, caddr_t data);
179 static void oce_local_timer(void *arg);
180 static void oce_if_deactivate(POCE_SOFTC sc);
181 static void oce_if_activate(POCE_SOFTC sc);
182 static void setup_max_queues_want(POCE_SOFTC sc);
183 static void update_queues_got(POCE_SOFTC sc);
184 static void process_link_state(POCE_SOFTC sc,
185 		 struct oce_async_cqe_link_state *acqe);
186 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
187 static void oce_get_config(POCE_SOFTC sc);
188 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
189 static void oce_read_env_variables(POCE_SOFTC sc);
190 
191 /* IP specific */
192 #if defined(INET6) || defined(INET)
193 static int  oce_init_lro(POCE_SOFTC sc);
194 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
195 #endif
196 
197 static device_method_t oce_dispatch[] = {
198 	DEVMETHOD(device_probe, oce_probe),
199 	DEVMETHOD(device_attach, oce_attach),
200 	DEVMETHOD(device_detach, oce_detach),
201 	DEVMETHOD(device_shutdown, oce_shutdown),
202 
203 	DEVMETHOD_END
204 };
205 
206 static driver_t oce_driver = {
207 	"oce",
208 	oce_dispatch,
209 	sizeof(OCE_SOFTC)
210 };
211 
212 /* global vars */
213 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
214 
215 /* Module capabilites and parameters */
216 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
217 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
218 uint32_t oce_rq_buf_size = 2048;
219 
220 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
221 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
222 
223 /* Supported devices table */
224 static uint32_t supportedDevices[] =  {
225 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
226 	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
227 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
228 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
229 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
230 	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
231 };
232 
233 DRIVER_MODULE(oce, pci, oce_driver, 0, 0);
234 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
235     nitems(supportedDevices));
236 MODULE_DEPEND(oce, pci, 1, 1, 1);
237 MODULE_DEPEND(oce, ether, 1, 1, 1);
238 MODULE_VERSION(oce, 1);
239 
240 POCE_SOFTC softc_head = NULL;
241 POCE_SOFTC softc_tail = NULL;
242 
243 struct oce_rdma_if *oce_rdma_if = NULL;
244 
245 /*****************************************************************************
246  *			Driver entry points functions                        *
247  *****************************************************************************/
248 
249 static int
oce_probe(device_t dev)250 oce_probe(device_t dev)
251 {
252 	uint16_t vendor = 0;
253 	uint16_t device = 0;
254 	int i = 0;
255 	POCE_SOFTC sc;
256 
257 	sc = device_get_softc(dev);
258 	bzero(sc, sizeof(OCE_SOFTC));
259 	sc->dev = dev;
260 
261 	vendor = pci_get_vendor(dev);
262 	device = pci_get_device(dev);
263 
264 	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
265 		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
266 			if (device == (supportedDevices[i] & 0xffff)) {
267 				device_set_descf(dev,
268 				    "%s:%s", "Emulex CNA NIC function",
269 				    component_revision);
270 
271 				switch (device) {
272 				case PCI_PRODUCT_BE2:
273 					sc->flags |= OCE_FLAGS_BE2;
274 					break;
275 				case PCI_PRODUCT_BE3:
276 					sc->flags |= OCE_FLAGS_BE3;
277 					break;
278 				case PCI_PRODUCT_XE201:
279 				case PCI_PRODUCT_XE201_VF:
280 					sc->flags |= OCE_FLAGS_XE201;
281 					break;
282 				case PCI_PRODUCT_SH:
283 					sc->flags |= OCE_FLAGS_SH;
284 					break;
285 				default:
286 					return ENXIO;
287 				}
288 				return BUS_PROBE_DEFAULT;
289 			}
290 		}
291 	}
292 
293 	return ENXIO;
294 }
295 
296 static int
oce_attach(device_t dev)297 oce_attach(device_t dev)
298 {
299 	POCE_SOFTC sc;
300 	int rc = 0;
301 
302 	sc = device_get_softc(dev);
303 
304 	rc = oce_hw_pci_alloc(sc);
305 	if (rc)
306 		return rc;
307 
308 	sc->tx_ring_size = OCE_TX_RING_SIZE;
309 	sc->rx_ring_size = OCE_RX_RING_SIZE;
310 	/* receive fragment size should be multiple of 2K */
311 	sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
312 	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
313 	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
314 
315 	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
316 	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
317 
318 	/* initialise the hardware */
319 	rc = oce_hw_init(sc);
320 	if (rc)
321 		goto pci_res_free;
322 
323 	oce_read_env_variables(sc);
324 
325 	oce_get_config(sc);
326 
327 	setup_max_queues_want(sc);
328 
329 	rc = oce_setup_intr(sc);
330 	if (rc)
331 		goto mbox_free;
332 
333 	rc = oce_queue_init_all(sc);
334 	if (rc)
335 		goto intr_free;
336 
337 	oce_attach_ifp(sc);
338 
339 #if defined(INET6) || defined(INET)
340 	rc = oce_init_lro(sc);
341 	if (rc)
342 		goto ifp_free;
343 #endif
344 
345 	rc = oce_hw_start(sc);
346 	if (rc)
347 		goto lro_free;
348 
349 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
350 				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
351 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
352 				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
353 
354 	rc = oce_stats_init(sc);
355 	if (rc)
356 		goto vlan_free;
357 
358 	oce_add_sysctls(sc);
359 
360 	callout_init(&sc->timer, CALLOUT_MPSAFE);
361 	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
362 	if (rc)
363 		goto stats_free;
364 
365 	sc->next =NULL;
366 	if (softc_tail != NULL) {
367 	  softc_tail->next = sc;
368 	} else {
369 	  softc_head = sc;
370 	}
371 	softc_tail = sc;
372 
373 	gone_in_dev(dev, 15, "relatively uncommon 10GbE NIC");
374 
375 	return 0;
376 
377 stats_free:
378 	callout_drain(&sc->timer);
379 	oce_stats_free(sc);
380 vlan_free:
381 	if (sc->vlan_attach)
382 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
383 	if (sc->vlan_detach)
384 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
385 	oce_hw_intr_disable(sc);
386 lro_free:
387 #if defined(INET6) || defined(INET)
388 	oce_free_lro(sc);
389 ifp_free:
390 #endif
391 	ether_ifdetach(sc->ifp);
392 	if_free(sc->ifp);
393 	oce_queue_release_all(sc);
394 intr_free:
395 	oce_intr_free(sc);
396 mbox_free:
397 	oce_dma_free(sc, &sc->bsmbx);
398 pci_res_free:
399 	oce_hw_pci_free(sc);
400 	LOCK_DESTROY(&sc->dev_lock);
401 	LOCK_DESTROY(&sc->bmbx_lock);
402 	return rc;
403 
404 }
405 
406 static int
oce_detach(device_t dev)407 oce_detach(device_t dev)
408 {
409 	POCE_SOFTC sc = device_get_softc(dev);
410 	POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
411 
412         poce_sc_tmp = softc_head;
413         ppoce_sc_tmp1 = &softc_head;
414         while (poce_sc_tmp != NULL) {
415           if (poce_sc_tmp == sc) {
416             *ppoce_sc_tmp1 = sc->next;
417             if (sc->next == NULL) {
418               softc_tail = poce_sc_tmp2;
419             }
420             break;
421           }
422           poce_sc_tmp2 = poce_sc_tmp;
423           ppoce_sc_tmp1 = &poce_sc_tmp->next;
424           poce_sc_tmp = poce_sc_tmp->next;
425         }
426 
427 	LOCK(&sc->dev_lock);
428 	oce_if_deactivate(sc);
429 	UNLOCK(&sc->dev_lock);
430 
431 	callout_drain(&sc->timer);
432 
433 	if (sc->vlan_attach != NULL)
434 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
435 	if (sc->vlan_detach != NULL)
436 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
437 
438 	ether_ifdetach(sc->ifp);
439 
440 	if_free(sc->ifp);
441 
442 	oce_hw_shutdown(sc);
443 
444 	bus_generic_detach(dev);
445 
446 	return 0;
447 }
448 
449 static int
oce_shutdown(device_t dev)450 oce_shutdown(device_t dev)
451 {
452 	int rc;
453 
454 	rc = oce_detach(dev);
455 
456 	return rc;
457 }
458 
459 static int
oce_ioctl(if_t ifp,u_long command,caddr_t data)460 oce_ioctl(if_t ifp, u_long command, caddr_t data)
461 {
462 	struct ifreq *ifr = (struct ifreq *)data;
463 	POCE_SOFTC sc = if_getsoftc(ifp);
464 	struct ifi2creq i2c;
465 	uint8_t	offset = 0;
466 	int rc = 0;
467 	uint32_t u;
468 
469 	switch (command) {
470 	case SIOCGIFMEDIA:
471 		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
472 		break;
473 
474 	case SIOCSIFMTU:
475 		if (ifr->ifr_mtu > OCE_MAX_MTU)
476 			rc = EINVAL;
477 		else
478 			if_setmtu(ifp, ifr->ifr_mtu);
479 		break;
480 
481 	case SIOCSIFFLAGS:
482 		if (if_getflags(ifp) & IFF_UP) {
483 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
484 				if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
485 				oce_init(sc);
486 			}
487 			device_printf(sc->dev, "Interface Up\n");
488 		} else {
489 			LOCK(&sc->dev_lock);
490 
491 			if_setdrvflagbits(sc->ifp, 0,
492 			    IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
493 			oce_if_deactivate(sc);
494 
495 			UNLOCK(&sc->dev_lock);
496 
497 			device_printf(sc->dev, "Interface Down\n");
498 		}
499 
500 		if ((if_getflags(ifp) & IFF_PROMISC) && !sc->promisc) {
501 			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
502 				sc->promisc = TRUE;
503 		} else if (!(if_getflags(ifp) & IFF_PROMISC) && sc->promisc) {
504 			if (!oce_rxf_set_promiscuous(sc, 0))
505 				sc->promisc = FALSE;
506 		}
507 
508 		break;
509 
510 	case SIOCADDMULTI:
511 	case SIOCDELMULTI:
512 		rc = oce_hw_update_multicast(sc);
513 		if (rc)
514 			device_printf(sc->dev,
515 				"Update multicast address failed\n");
516 		break;
517 
518 	case SIOCSIFCAP:
519 		u = ifr->ifr_reqcap ^ if_getcapenable(ifp);
520 
521 		if (u & IFCAP_TXCSUM) {
522 			if_togglecapenable(ifp, IFCAP_TXCSUM);
523 			if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
524 
525 			if (IFCAP_TSO & if_getcapenable(ifp) &&
526 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
527 				u &= ~IFCAP_TSO;
528 				if_setcapenablebit(ifp, 0, IFCAP_TSO);
529 				if_sethwassistbits(ifp, 0, CSUM_TSO);
530 				if_printf(ifp,
531 					 "TSO disabled due to -txcsum.\n");
532 			}
533 		}
534 
535 		if (u & IFCAP_RXCSUM)
536 			if_togglecapenable(ifp, IFCAP_RXCSUM);
537 
538 		if (u & IFCAP_TSO4) {
539 			if_togglecapenable(ifp, IFCAP_TSO4);
540 
541 			if (IFCAP_TSO & if_getcapenable(ifp)) {
542 				if (IFCAP_TXCSUM & if_getcapenable(ifp))
543 					if_sethwassistbits(ifp, CSUM_TSO, 0);
544 				else {
545 					if_setcapenablebit(ifp, 0, IFCAP_TSO);
546 					if_sethwassistbits(ifp, 0, CSUM_TSO);
547 					if_printf(ifp,
548 					    "Enable txcsum first.\n");
549 					rc = EAGAIN;
550 				}
551 			} else
552 				if_sethwassistbits(ifp, 0, CSUM_TSO);
553 		}
554 
555 		if (u & IFCAP_VLAN_HWTAGGING)
556 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
557 
558 		if (u & IFCAP_VLAN_HWFILTER) {
559 			if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
560 			oce_vid_config(sc);
561 		}
562 #if defined(INET6) || defined(INET)
563 		if (u & IFCAP_LRO) {
564 			if_togglecapenable(ifp, IFCAP_LRO);
565 			if(sc->enable_hwlro) {
566 				if(if_getcapenable(ifp) & IFCAP_LRO) {
567 					rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
568 				}else {
569 					rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
570 				}
571 			}
572 		}
573 #endif
574 
575 		break;
576 
577 	case SIOCGI2C:
578 		rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
579 		if (rc)
580 			break;
581 
582 		if (i2c.dev_addr == PAGE_NUM_A0) {
583 			offset = i2c.offset;
584 		} else if (i2c.dev_addr == PAGE_NUM_A2) {
585 			offset = TRANSCEIVER_A0_SIZE + i2c.offset;
586 		} else {
587 			rc = EINVAL;
588 			break;
589 		}
590 
591 		if (i2c.len > sizeof(i2c.data) ||
592 		    i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
593 			rc = EINVAL;
594 			break;
595 		}
596 
597 		rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
598 		if (rc) {
599 			rc = -rc;
600 			break;
601 		}
602 
603 		memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);
604 
605 		rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
606 		break;
607 
608 	case SIOCGPRIVATE_0:
609 		rc = priv_check(curthread, PRIV_DRIVER);
610 		if (rc != 0)
611 			break;
612 		rc = oce_handle_passthrough(ifp, data);
613 		break;
614 	default:
615 		rc = ether_ioctl(ifp, command, data);
616 		break;
617 	}
618 
619 	return rc;
620 }
621 
622 static void
oce_init(void * arg)623 oce_init(void *arg)
624 {
625 	POCE_SOFTC sc = arg;
626 
627 	LOCK(&sc->dev_lock);
628 
629 	if (if_getflags(sc->ifp) & IFF_UP) {
630 		oce_if_deactivate(sc);
631 		oce_if_activate(sc);
632 	}
633 
634 	UNLOCK(&sc->dev_lock);
635 
636 }
637 
638 static int
oce_multiq_start(if_t ifp,struct mbuf * m)639 oce_multiq_start(if_t ifp, struct mbuf *m)
640 {
641 	POCE_SOFTC sc = if_getsoftc(ifp);
642 	struct oce_wq *wq = NULL;
643 	int queue_index = 0;
644 	int status = 0;
645 
646 	if (!sc->link_status)
647 		return ENXIO;
648 
649 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
650 		queue_index = m->m_pkthdr.flowid % sc->nwqs;
651 
652 	wq = sc->wq[queue_index];
653 
654 	LOCK(&wq->tx_lock);
655 	status = oce_multiq_transmit(ifp, m, wq);
656 	UNLOCK(&wq->tx_lock);
657 
658 	return status;
659 
660 }
661 
662 static void
oce_multiq_flush(if_t ifp)663 oce_multiq_flush(if_t ifp)
664 {
665 	POCE_SOFTC sc = if_getsoftc(ifp);
666 	struct mbuf     *m;
667 	int i = 0;
668 
669 	for (i = 0; i < sc->nwqs; i++) {
670 		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
671 			m_freem(m);
672 	}
673 	if_qflush(ifp);
674 }
675 
676 /*****************************************************************************
677  *                   Driver interrupt routines functions                     *
678  *****************************************************************************/
679 
680 static void
oce_intr(void * arg,int pending)681 oce_intr(void *arg, int pending)
682 {
683 
684 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
685 	POCE_SOFTC sc = ii->sc;
686 	struct oce_eq *eq = ii->eq;
687 	struct oce_eqe *eqe;
688 	struct oce_cq *cq = NULL;
689 	int i, num_eqes = 0;
690 
691 	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
692 				 BUS_DMASYNC_POSTWRITE);
693 	do {
694 		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
695 		if (eqe->evnt == 0)
696 			break;
697 		eqe->evnt = 0;
698 		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
699 					BUS_DMASYNC_POSTWRITE);
700 		RING_GET(eq->ring, 1);
701 		num_eqes++;
702 
703 	} while (TRUE);
704 
705 	if (!num_eqes)
706 		goto eq_arm; /* Spurious */
707 
708  	/* Clear EQ entries, but dont arm */
709 	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
710 
711 	/* Process TX, RX and MCC. But dont arm CQ*/
712 	for (i = 0; i < eq->cq_valid; i++) {
713 		cq = eq->cq[i];
714 		(*cq->cq_handler)(cq->cb_arg);
715 	}
716 
717 	/* Arm all cqs connected to this EQ */
718 	for (i = 0; i < eq->cq_valid; i++) {
719 		cq = eq->cq[i];
720 		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
721 	}
722 
723 eq_arm:
724 	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
725 
726 	return;
727 }
728 
729 static int
oce_setup_intr(POCE_SOFTC sc)730 oce_setup_intr(POCE_SOFTC sc)
731 {
732 	int rc = 0, use_intx = 0;
733 	int vector = 0, req_vectors = 0;
734 	int tot_req_vectors, tot_vectors;
735 
736 	if (is_rss_enabled(sc))
737 		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
738 	else
739 		req_vectors = 1;
740 
741 	tot_req_vectors = req_vectors;
742 	if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
743 	  if (req_vectors > 1) {
744 	    tot_req_vectors += OCE_RDMA_VECTORS;
745 	    sc->roce_intr_count = OCE_RDMA_VECTORS;
746 	  }
747 	}
748 
749         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
750 		sc->intr_count = req_vectors;
751                 tot_vectors = tot_req_vectors;
752 		rc = pci_alloc_msix(sc->dev, &tot_vectors);
753 		if (rc != 0) {
754 			use_intx = 1;
755 			pci_release_msi(sc->dev);
756 		} else {
757 		  if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
758 		    if (tot_vectors < tot_req_vectors) {
759 		      if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
760 			sc->roce_intr_count = (tot_vectors / 2);
761 		      }
762 		      sc->intr_count = tot_vectors - sc->roce_intr_count;
763 		    }
764 		  } else {
765 		    sc->intr_count = tot_vectors;
766 		  }
767     		  sc->flags |= OCE_FLAGS_USING_MSIX;
768 		}
769 	} else
770 		use_intx = 1;
771 
772 	if (use_intx)
773 		sc->intr_count = 1;
774 
775 	/* Scale number of queues based on intr we got */
776 	update_queues_got(sc);
777 
778 	if (use_intx) {
779 		device_printf(sc->dev, "Using legacy interrupt\n");
780 		rc = oce_alloc_intr(sc, vector, oce_intr);
781 		if (rc)
782 			goto error;
783 	} else {
784 		for (; vector < sc->intr_count; vector++) {
785 			rc = oce_alloc_intr(sc, vector, oce_intr);
786 			if (rc)
787 				goto error;
788 		}
789 	}
790 
791 	return 0;
792 error:
793 	oce_intr_free(sc);
794 	return rc;
795 }
796 
797 static int
oce_fast_isr(void * arg)798 oce_fast_isr(void *arg)
799 {
800 	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
801 	POCE_SOFTC sc = ii->sc;
802 
803 	if (ii->eq == NULL)
804 		return FILTER_STRAY;
805 
806 	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
807 
808 	taskqueue_enqueue(ii->tq, &ii->task);
809 
810  	ii->eq->intr++;
811 
812 	return FILTER_HANDLED;
813 }
814 
815 static int
oce_alloc_intr(POCE_SOFTC sc,int vector,void (* isr)(void * arg,int pending))816 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
817 {
818 	POCE_INTR_INFO ii;
819 	int rc = 0, rr;
820 
821 	if (vector >= OCE_MAX_EQ)
822 		return (EINVAL);
823 
824 	ii = &sc->intrs[vector];
825 
826 	/* Set the resource id for the interrupt.
827 	 * MSIx is vector + 1 for the resource id,
828 	 * INTx is 0 for the resource id.
829 	 */
830 	if (sc->flags & OCE_FLAGS_USING_MSIX)
831 		rr = vector + 1;
832 	else
833 		rr = 0;
834 	ii->intr_res = bus_alloc_resource_any(sc->dev,
835 					      SYS_RES_IRQ,
836 					      &rr, RF_ACTIVE|RF_SHAREABLE);
837 	ii->irq_rr = rr;
838 	if (ii->intr_res == NULL) {
839 		device_printf(sc->dev,
840 			  "Could not allocate interrupt\n");
841 		rc = ENXIO;
842 		return rc;
843 	}
844 
845 	TASK_INIT(&ii->task, 0, isr, ii);
846 	ii->vector = vector;
847 	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
848 	ii->tq = taskqueue_create_fast(ii->task_name,
849 			M_NOWAIT,
850 			taskqueue_thread_enqueue,
851 			&ii->tq);
852 	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
853 			device_get_nameunit(sc->dev));
854 
855 	ii->sc = sc;
856 	rc = bus_setup_intr(sc->dev,
857 			ii->intr_res,
858 			INTR_TYPE_NET,
859 			oce_fast_isr, NULL, ii, &ii->tag);
860 	return rc;
861 
862 }
863 
864 void
oce_intr_free(POCE_SOFTC sc)865 oce_intr_free(POCE_SOFTC sc)
866 {
867 	int i = 0;
868 
869 	for (i = 0; i < sc->intr_count; i++) {
870 
871 		if (sc->intrs[i].tag != NULL)
872 			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
873 						sc->intrs[i].tag);
874 		if (sc->intrs[i].tq != NULL)
875 			taskqueue_free(sc->intrs[i].tq);
876 
877 		if (sc->intrs[i].intr_res != NULL)
878 			bus_release_resource(sc->dev, SYS_RES_IRQ,
879 						sc->intrs[i].irq_rr,
880 						sc->intrs[i].intr_res);
881 		sc->intrs[i].tag = NULL;
882 		sc->intrs[i].intr_res = NULL;
883 	}
884 
885 	if (sc->flags & OCE_FLAGS_USING_MSIX)
886 		pci_release_msi(sc->dev);
887 
888 }
889 
890 /******************************************************************************
891 *			  Media callbacks functions 			      *
892 ******************************************************************************/
893 
894 static void
oce_media_status(if_t ifp,struct ifmediareq * req)895 oce_media_status(if_t ifp, struct ifmediareq *req)
896 {
897 	POCE_SOFTC sc = (POCE_SOFTC) if_getsoftc(ifp);
898 
899 	req->ifm_status = IFM_AVALID;
900 	req->ifm_active = IFM_ETHER;
901 
902 	if (sc->link_status == 1)
903 		req->ifm_status |= IFM_ACTIVE;
904 	else
905 		return;
906 
907 	switch (sc->link_speed) {
908 	case 1: /* 10 Mbps */
909 		req->ifm_active |= IFM_10_T | IFM_FDX;
910 		sc->speed = 10;
911 		break;
912 	case 2: /* 100 Mbps */
913 		req->ifm_active |= IFM_100_TX | IFM_FDX;
914 		sc->speed = 100;
915 		break;
916 	case 3: /* 1 Gbps */
917 		req->ifm_active |= IFM_1000_T | IFM_FDX;
918 		sc->speed = 1000;
919 		break;
920 	case 4: /* 10 Gbps */
921 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
922 		sc->speed = 10000;
923 		break;
924 	case 5: /* 20 Gbps */
925 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
926 		sc->speed = 20000;
927 		break;
928 	case 6: /* 25 Gbps */
929 		req->ifm_active |= IFM_10G_SR | IFM_FDX;
930 		sc->speed = 25000;
931 		break;
932 	case 7: /* 40 Gbps */
933 		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
934 		sc->speed = 40000;
935 		break;
936 	default:
937 		sc->speed = 0;
938 		break;
939 	}
940 
941 	return;
942 }
943 
944 int
oce_media_change(if_t ifp)945 oce_media_change(if_t ifp)
946 {
947 	return 0;
948 }
949 
oce_is_pkt_dest_bmc(POCE_SOFTC sc,struct mbuf * m,boolean_t * os2bmc,struct mbuf ** m_new)950 static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
951 				struct mbuf *m, boolean_t *os2bmc,
952 				struct mbuf **m_new)
953 {
954 	struct ether_header *eh = NULL;
955 
956 	eh = mtod(m, struct ether_header *);
957 
958 	if (!is_os2bmc_enabled(sc) || *os2bmc) {
959 		*os2bmc = FALSE;
960 		goto done;
961 	}
962 	if (!ETHER_IS_MULTICAST(eh->ether_dhost))
963 		goto done;
964 
965 	if (is_mc_allowed_on_bmc(sc, eh) ||
966 	    is_bc_allowed_on_bmc(sc, eh) ||
967 	    is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
968 		*os2bmc = TRUE;
969 		goto done;
970 	}
971 
972 	if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
973 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
974 		uint8_t nexthdr = ip6->ip6_nxt;
975 		if (nexthdr == IPPROTO_ICMPV6) {
976 			struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
977 			switch (icmp6->icmp6_type) {
978 			case ND_ROUTER_ADVERT:
979 				*os2bmc = is_ipv6_ra_filt_enabled(sc);
980 				goto done;
981 			case ND_NEIGHBOR_ADVERT:
982 				*os2bmc = is_ipv6_na_filt_enabled(sc);
983 				goto done;
984 			default:
985 				break;
986 			}
987 		}
988 	}
989 
990 	if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
991 		struct ip *ip = mtod(m, struct ip *);
992 		int iphlen = ip->ip_hl << 2;
993 		struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
994 		switch (uh->uh_dport) {
995 		case DHCP_CLIENT_PORT:
996 			*os2bmc = is_dhcp_client_filt_enabled(sc);
997 			goto done;
998 		case DHCP_SERVER_PORT:
999 			*os2bmc = is_dhcp_srvr_filt_enabled(sc);
1000 			goto done;
1001 		case NET_BIOS_PORT1:
1002 		case NET_BIOS_PORT2:
1003 			*os2bmc = is_nbios_filt_enabled(sc);
1004 			goto done;
1005 		case DHCPV6_RAS_PORT:
1006 			*os2bmc = is_ipv6_ras_filt_enabled(sc);
1007 			goto done;
1008 		default:
1009 			break;
1010 		}
1011 	}
1012 done:
1013 	if (*os2bmc) {
1014 		*m_new = m_dup(m, M_NOWAIT);
1015 		if (!*m_new) {
1016 			*os2bmc = FALSE;
1017 			return;
1018 		}
1019 		*m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
1020 	}
1021 }
1022 
1023 /*****************************************************************************
1024  *			  Transmit routines functions			     *
1025  *****************************************************************************/
1026 
1027 static int
oce_tx(POCE_SOFTC sc,struct mbuf ** mpp,int wq_index)1028 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
1029 {
1030 	int rc = 0, i, retry_cnt = 0;
1031 	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
1032 	struct mbuf *m, *m_temp, *m_new = NULL;
1033 	struct oce_wq *wq = sc->wq[wq_index];
1034 	struct oce_packet_desc *pd;
1035 	struct oce_nic_hdr_wqe *nichdr;
1036 	struct oce_nic_frag_wqe *nicfrag;
1037 	struct ether_header *eh = NULL;
1038 	int num_wqes;
1039 	uint32_t reg_value;
1040 	boolean_t complete = TRUE;
1041 	boolean_t os2bmc = FALSE;
1042 
1043 	m = *mpp;
1044 	if (!m)
1045 		return EINVAL;
1046 
1047 	if (!(m->m_flags & M_PKTHDR)) {
1048 		rc = ENXIO;
1049 		goto free_ret;
1050 	}
1051 
1052 	/* Don't allow non-TSO packets longer than MTU */
1053 	if (!is_tso_pkt(m)) {
1054 		eh = mtod(m, struct ether_header *);
1055 		if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1056 			 goto free_ret;
1057 	}
1058 
1059 	if(oce_tx_asic_stall_verify(sc, m)) {
1060 		m = oce_insert_vlan_tag(sc, m, &complete);
1061 		if(!m) {
1062 			device_printf(sc->dev, "Insertion unsuccessful\n");
1063 			return 0;
1064 		}
1065 	}
1066 
1067 	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
1068 	 * may cause a transmit stall on that port. So the work-around is to
1069 	 * pad short packets (<= 32 bytes) to a 36-byte length.
1070 	*/
1071 	if(IS_SH(sc) || IS_XE201(sc) ) {
1072 		if(m->m_pkthdr.len <= 32) {
1073 			char buf[36];
1074 			bzero((void *)buf, 36);
1075 			m_append(m, (36 - m->m_pkthdr.len), buf);
1076 		}
1077 	}
1078 
1079 tx_start:
1080 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1081 		/* consolidate packet buffers for TSO/LSO segment offload */
1082 #if defined(INET6) || defined(INET)
1083 		m = oce_tso_setup(sc, mpp);
1084 #else
1085 		m = NULL;
1086 #endif
1087 		if (m == NULL) {
1088 			rc = ENXIO;
1089 			goto free_ret;
1090 		}
1091 	}
1092 
1093 	pd = &wq->pckts[wq->pkt_desc_head];
1094 
1095 retry:
1096 	rc = bus_dmamap_load_mbuf_sg(wq->tag,
1097 				     pd->map,
1098 				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1099 	if (rc == 0) {
1100 		num_wqes = pd->nsegs + 1;
1101 		if (IS_BE(sc) || IS_SH(sc)) {
1102 			/*Dummy required only for BE3.*/
1103 			if (num_wqes & 1)
1104 				num_wqes++;
1105 		}
1106 		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1107 			bus_dmamap_unload(wq->tag, pd->map);
1108 			return EBUSY;
1109 		}
1110 		atomic_store_rel_int(&wq->pkt_desc_head,
1111 				     (wq->pkt_desc_head + 1) % \
1112 				      OCE_WQ_PACKET_ARRAY_SIZE);
1113 		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1114 		pd->mbuf = m;
1115 
1116 		nichdr =
1117 		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1118 		nichdr->u0.dw[0] = 0;
1119 		nichdr->u0.dw[1] = 0;
1120 		nichdr->u0.dw[2] = 0;
1121 		nichdr->u0.dw[3] = 0;
1122 
1123 		nichdr->u0.s.complete = complete;
1124 		nichdr->u0.s.mgmt = os2bmc;
1125 		nichdr->u0.s.event = 1;
1126 		nichdr->u0.s.crc = 1;
1127 		nichdr->u0.s.forward = 0;
1128 		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1129 		nichdr->u0.s.udpcs =
1130 			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1131 		nichdr->u0.s.tcpcs =
1132 			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1133 		nichdr->u0.s.num_wqe = num_wqes;
1134 		nichdr->u0.s.total_length = m->m_pkthdr.len;
1135 
1136 		if (m->m_flags & M_VLANTAG) {
1137 			nichdr->u0.s.vlan = 1; /*Vlan present*/
1138 			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1139 		}
1140 
1141 		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1142 			if (m->m_pkthdr.tso_segsz) {
1143 				nichdr->u0.s.lso = 1;
1144 				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
1145 			}
1146 			if (!IS_BE(sc) || !IS_SH(sc))
1147 				nichdr->u0.s.ipcs = 1;
1148 		}
1149 
1150 		RING_PUT(wq->ring, 1);
1151 		atomic_add_int(&wq->ring->num_used, 1);
1152 
1153 		for (i = 0; i < pd->nsegs; i++) {
1154 			nicfrag =
1155 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1156 						      struct oce_nic_frag_wqe);
1157 			nicfrag->u0.s.rsvd0 = 0;
1158 			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1159 			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1160 			nicfrag->u0.s.frag_len = segs[i].ds_len;
1161 			pd->wqe_idx = wq->ring->pidx;
1162 			RING_PUT(wq->ring, 1);
1163 			atomic_add_int(&wq->ring->num_used, 1);
1164 		}
1165 		if (num_wqes > (pd->nsegs + 1)) {
1166 			nicfrag =
1167 			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
1168 						      struct oce_nic_frag_wqe);
1169 			nicfrag->u0.dw[0] = 0;
1170 			nicfrag->u0.dw[1] = 0;
1171 			nicfrag->u0.dw[2] = 0;
1172 			nicfrag->u0.dw[3] = 0;
1173 			pd->wqe_idx = wq->ring->pidx;
1174 			RING_PUT(wq->ring, 1);
1175 			atomic_add_int(&wq->ring->num_used, 1);
1176 			pd->nsegs++;
1177 		}
1178 
1179 		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1180 		wq->tx_stats.tx_reqs++;
1181 		wq->tx_stats.tx_wrbs += num_wqes;
1182 		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1183 		wq->tx_stats.tx_pkts++;
1184 
1185 		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1186 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1187 		reg_value = (num_wqes << 16) | wq->wq_id;
1188 
1189 		/* if os2bmc is not enabled or if the pkt is already tagged as
1190 		   bmc, do nothing
1191 		 */
1192 		oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
1193 
1194 		if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1195 		if (m->m_flags & M_MCAST)
1196 			if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
1197 		ETHER_BPF_MTAP(sc->ifp, m);
1198 
1199 		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1200 
1201 	} else if (rc == EFBIG)	{
1202 		if (retry_cnt == 0) {
1203 			m_temp = m_defrag(m, M_NOWAIT);
1204 			if (m_temp == NULL)
1205 				goto free_ret;
1206 			m = m_temp;
1207 			*mpp = m_temp;
1208 			retry_cnt = retry_cnt + 1;
1209 			goto retry;
1210 		} else
1211 			goto free_ret;
1212 	} else if (rc == ENOMEM)
1213 		return rc;
1214 	else
1215 		goto free_ret;
1216 
1217 	if (os2bmc) {
1218 		m = m_new;
1219 		goto tx_start;
1220 	}
1221 
1222 	return 0;
1223 
1224 free_ret:
1225 	m_freem(*mpp);
1226 	*mpp = NULL;
1227 	return rc;
1228 }
1229 
1230 static void
oce_process_tx_completion(struct oce_wq * wq)1231 oce_process_tx_completion(struct oce_wq *wq)
1232 {
1233 	struct oce_packet_desc *pd;
1234 	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1235 	struct mbuf *m;
1236 
1237 	pd = &wq->pckts[wq->pkt_desc_tail];
1238 	atomic_store_rel_int(&wq->pkt_desc_tail,
1239 			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1240 	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1241 	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1242 	bus_dmamap_unload(wq->tag, pd->map);
1243 
1244 	m = pd->mbuf;
1245 	m_freem(m);
1246 	pd->mbuf = NULL;
1247 
1248 	if (if_getdrvflags(sc->ifp) & IFF_DRV_OACTIVE) {
1249 		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1250 			if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_OACTIVE));
1251 			oce_tx_restart(sc, wq);
1252 		}
1253 	}
1254 }
1255 
1256 static void
oce_tx_restart(POCE_SOFTC sc,struct oce_wq * wq)1257 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1258 {
1259 
1260 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1261 		return;
1262 
1263 	if (!drbr_empty(sc->ifp, wq->br))
1264 		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1265 
1266 }
1267 
1268 #if defined(INET6) || defined(INET)
1269 static struct mbuf *
oce_tso_setup(POCE_SOFTC sc,struct mbuf ** mpp)1270 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1271 {
1272 	struct mbuf *m;
1273 #ifdef INET
1274 	struct ip *ip;
1275 #endif
1276 #ifdef INET6
1277 	struct ip6_hdr *ip6;
1278 #endif
1279 	struct ether_vlan_header *eh;
1280 	struct tcphdr *th;
1281 	uint16_t etype;
1282 	int total_len = 0, ehdrlen = 0;
1283 
1284 	m = *mpp;
1285 
1286 	if (M_WRITABLE(m) == 0) {
1287 		m = m_dup(*mpp, M_NOWAIT);
1288 		if (!m)
1289 			return NULL;
1290 		m_freem(*mpp);
1291 		*mpp = m;
1292 	}
1293 
1294 	eh = mtod(m, struct ether_vlan_header *);
1295 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1296 		etype = ntohs(eh->evl_proto);
1297 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1298 	} else {
1299 		etype = ntohs(eh->evl_encap_proto);
1300 		ehdrlen = ETHER_HDR_LEN;
1301 	}
1302 
1303 	switch (etype) {
1304 #ifdef INET
1305 	case ETHERTYPE_IP:
1306 		ip = (struct ip *)(m->m_data + ehdrlen);
1307 		if (ip->ip_p != IPPROTO_TCP)
1308 			return NULL;
1309 		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1310 
1311 		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1312 		break;
1313 #endif
1314 #ifdef INET6
1315 	case ETHERTYPE_IPV6:
1316 		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1317 		if (ip6->ip6_nxt != IPPROTO_TCP)
1318 			return NULL;
1319 		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1320 
1321 		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1322 		break;
1323 #endif
1324 	default:
1325 		return NULL;
1326 	}
1327 
1328 	m = m_pullup(m, total_len);
1329 	*mpp = m;
1330 	return m;
1331 }
1332 #endif /* INET6 || INET */
1333 
1334 void
oce_tx_task(void * arg,int npending)1335 oce_tx_task(void *arg, int npending)
1336 {
1337 	struct oce_wq *wq = arg;
1338 	POCE_SOFTC sc = wq->parent;
1339 	if_t ifp = sc->ifp;
1340 	int rc = 0;
1341 
1342 	LOCK(&wq->tx_lock);
1343 	rc = oce_multiq_transmit(ifp, NULL, wq);
1344 	if (rc) {
1345 		device_printf(sc->dev,
1346 				"TX[%d] restart failed\n", wq->queue_index);
1347 	}
1348 	UNLOCK(&wq->tx_lock);
1349 }
1350 
1351 void
oce_start(if_t ifp)1352 oce_start(if_t ifp)
1353 {
1354 	POCE_SOFTC sc = if_getsoftc(ifp);
1355 	struct mbuf *m;
1356 	int rc = 0;
1357 	int def_q = 0; /* Defualt tx queue is 0*/
1358 
1359 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1360 			IFF_DRV_RUNNING)
1361 		return;
1362 
1363 	if (!sc->link_status)
1364 		return;
1365 
1366 	while (true) {
1367 		m = if_dequeue(sc->ifp);
1368 		if (m == NULL)
1369 			break;
1370 
1371 		LOCK(&sc->wq[def_q]->tx_lock);
1372 		rc = oce_tx(sc, &m, def_q);
1373 		UNLOCK(&sc->wq[def_q]->tx_lock);
1374 		if (rc) {
1375 			if (m != NULL) {
1376 				sc->wq[def_q]->tx_stats.tx_stops ++;
1377 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1378 				if_sendq_prepend(ifp, m);
1379 				m = NULL;
1380 			}
1381 			break;
1382 		}
1383 	}
1384 }
1385 
1386 /* Handle the Completion Queue for transmit */
1387 uint16_t
oce_wq_handler(void * arg)1388 oce_wq_handler(void *arg)
1389 {
1390 	struct oce_wq *wq = (struct oce_wq *)arg;
1391 	POCE_SOFTC sc = wq->parent;
1392 	struct oce_cq *cq = wq->cq;
1393 	struct oce_nic_tx_cqe *cqe;
1394 	int num_cqes = 0;
1395 
1396 	LOCK(&wq->tx_compl_lock);
1397 	bus_dmamap_sync(cq->ring->dma.tag,
1398 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1399 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1400 	while (cqe->u0.dw[3]) {
1401 		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1402 
1403 		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1404 		if (wq->ring->cidx >= wq->ring->num_items)
1405 			wq->ring->cidx -= wq->ring->num_items;
1406 
1407 		oce_process_tx_completion(wq);
1408 		wq->tx_stats.tx_compl++;
1409 		cqe->u0.dw[3] = 0;
1410 		RING_GET(cq->ring, 1);
1411 		bus_dmamap_sync(cq->ring->dma.tag,
1412 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1413 		cqe =
1414 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1415 		num_cqes++;
1416 	}
1417 
1418 	if (num_cqes)
1419 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1420 
1421 	UNLOCK(&wq->tx_compl_lock);
1422 	return num_cqes;
1423 }
1424 
1425 static int
oce_multiq_transmit(if_t ifp,struct mbuf * m,struct oce_wq * wq)1426 oce_multiq_transmit(if_t ifp, struct mbuf *m, struct oce_wq *wq)
1427 {
1428 	POCE_SOFTC sc = if_getsoftc(ifp);
1429 	int status = 0, queue_index = 0;
1430 	struct mbuf *next = NULL;
1431 	struct buf_ring *br = NULL;
1432 
1433 	br  = wq->br;
1434 	queue_index = wq->queue_index;
1435 
1436 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1437 		IFF_DRV_RUNNING) {
1438 		if (m != NULL)
1439 			status = drbr_enqueue(ifp, br, m);
1440 		return status;
1441 	}
1442 
1443 	if (m != NULL) {
1444 		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1445 			return status;
1446 	}
1447 	while ((next = drbr_peek(ifp, br)) != NULL) {
1448 		if (oce_tx(sc, &next, queue_index)) {
1449 			if (next == NULL) {
1450 				drbr_advance(ifp, br);
1451 			} else {
1452 				drbr_putback(ifp, br, next);
1453 				wq->tx_stats.tx_stops ++;
1454 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1455 			}
1456 			break;
1457 		}
1458 		drbr_advance(ifp, br);
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 /*****************************************************************************
1465  *			    Receive  routines functions 		     *
1466  *****************************************************************************/
1467 
1468 static void
oce_correct_header(struct mbuf * m,struct nic_hwlro_cqe_part1 * cqe1,struct nic_hwlro_cqe_part2 * cqe2)1469 oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
1470 {
1471 	uint32_t *p;
1472         struct ether_header *eh = NULL;
1473         struct tcphdr *tcp_hdr = NULL;
1474         struct ip *ip4_hdr = NULL;
1475         struct ip6_hdr *ip6 = NULL;
1476         uint32_t payload_len = 0;
1477 
1478         eh = mtod(m, struct ether_header *);
1479         /* correct IP header */
1480         if(!cqe2->ipv6_frame) {
1481 		ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
1482                 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1483                 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1484                 tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
1485         }else {
1486         	ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
1487                 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1488                 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1489                                                 - sizeof(struct ip6_hdr);
1490                 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1491                 tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
1492         }
1493 
1494         /* correct tcp header */
1495         tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1496         if(cqe2->push) {
1497 		tcp_set_flags(tcp_hdr, tcp_get_flags(tcp_hdr) | TH_PUSH);
1498         }
1499         tcp_hdr->th_win = htons(cqe2->tcp_window);
1500         tcp_hdr->th_sum = 0xffff;
1501         if(cqe2->ts_opt) {
1502                 p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
1503                 *p = cqe1->tcp_timestamp_val;
1504                 *(p+1) = cqe1->tcp_timestamp_ecr;
1505         }
1506 
1507 	return;
1508 }
1509 
1510 static void
oce_rx_mbuf_chain(struct oce_rq * rq,struct oce_common_cqe_info * cqe_info,struct mbuf ** m)1511 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1512 {
1513 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1514         uint32_t i = 0, frag_len = 0;
1515 	uint32_t len = cqe_info->pkt_size;
1516         struct oce_packet_desc *pd;
1517         struct mbuf *tail = NULL;
1518 
1519         for (i = 0; i < cqe_info->num_frags; i++) {
1520                 if (rq->ring->cidx == rq->ring->pidx) {
1521                         device_printf(sc->dev,
1522                                   "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1523                         return;
1524                 }
1525                 pd = &rq->pckts[rq->ring->cidx];
1526 
1527                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1528                 bus_dmamap_unload(rq->tag, pd->map);
1529 		RING_GET(rq->ring, 1);
1530                 rq->pending--;
1531 
1532                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1533                 pd->mbuf->m_len = frag_len;
1534 
1535                 if (tail != NULL) {
1536                         /* additional fragments */
1537                         pd->mbuf->m_flags &= ~M_PKTHDR;
1538                         tail->m_next = pd->mbuf;
1539 			if(rq->islro)
1540                         	tail->m_nextpkt = NULL;
1541                         tail = pd->mbuf;
1542                 } else {
1543                         /* first fragment, fill out much of the packet header */
1544                         pd->mbuf->m_pkthdr.len = len;
1545 			if(rq->islro)
1546                         	pd->mbuf->m_nextpkt = NULL;
1547                         pd->mbuf->m_pkthdr.csum_flags = 0;
1548                         if (IF_CSUM_ENABLED(sc)) {
1549                                 if (cqe_info->l4_cksum_pass) {
1550                                         if(!cqe_info->ipv6_frame) { /* IPV4 */
1551                                                 pd->mbuf->m_pkthdr.csum_flags |=
1552                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1553                                         }else { /* IPV6 frame */
1554 						if(rq->islro) {
1555                                                 	pd->mbuf->m_pkthdr.csum_flags |=
1556                                                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1557 						}
1558                                         }
1559                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1560                                 }
1561                                 if (cqe_info->ip_cksum_pass) {
1562                                         pd->mbuf->m_pkthdr.csum_flags |=
1563                                                (CSUM_IP_CHECKED|CSUM_IP_VALID);
1564                                 }
1565                         }
1566                         *m = tail = pd->mbuf;
1567                }
1568                 pd->mbuf = NULL;
1569                 len -= frag_len;
1570         }
1571 
1572         return;
1573 }
1574 
1575 static void
oce_rx_lro(struct oce_rq * rq,struct nic_hwlro_singleton_cqe * cqe,struct nic_hwlro_cqe_part2 * cqe2)1576 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1577 {
1578         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1579         struct nic_hwlro_cqe_part1 *cqe1 = NULL;
1580         struct mbuf *m = NULL;
1581 	struct oce_common_cqe_info cq_info;
1582 
1583 	/* parse cqe */
1584         if(cqe2 == NULL) {
1585                 cq_info.pkt_size =  cqe->pkt_size;
1586                 cq_info.vtag = cqe->vlan_tag;
1587                 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1588                 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1589                 cq_info.ipv6_frame = cqe->ipv6_frame;
1590                 cq_info.vtp = cqe->vtp;
1591                 cq_info.qnq = cqe->qnq;
1592         }else {
1593                 cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
1594                 cq_info.pkt_size =  cqe2->coalesced_size;
1595                 cq_info.vtag = cqe2->vlan_tag;
1596                 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1597                 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1598                 cq_info.ipv6_frame = cqe2->ipv6_frame;
1599                 cq_info.vtp = cqe2->vtp;
1600                 cq_info.qnq = cqe1->qnq;
1601         }
1602 
1603 	cq_info.vtag = BSWAP_16(cq_info.vtag);
1604 
1605         cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1606         if(cq_info.pkt_size % rq->cfg.frag_size)
1607                 cq_info.num_frags++;
1608 
1609 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1610 
1611 	if (m) {
1612 		if(cqe2) {
1613 			//assert(cqe2->valid != 0);
1614 
1615 			//assert(cqe2->cqe_type != 2);
1616 			oce_correct_header(m, cqe1, cqe2);
1617 		}
1618 
1619 		m->m_pkthdr.rcvif = sc->ifp;
1620 		if (rq->queue_index)
1621 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1622 		else
1623 			m->m_pkthdr.flowid = rq->queue_index;
1624 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1625 
1626 		/* This deternies if vlan tag is Valid */
1627 		if (cq_info.vtp) {
1628 			if (sc->function_mode & FNM_FLEX10_MODE) {
1629 				/* FLEX10. If QnQ is not set, neglect VLAN */
1630 				if (cq_info.qnq) {
1631 					m->m_pkthdr.ether_vtag = cq_info.vtag;
1632 					m->m_flags |= M_VLANTAG;
1633 				}
1634 			} else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK))  {
1635 				/* In UMC mode generally pvid will be striped by
1636 				   hw. But in some cases we have seen it comes
1637 				   with pvid. So if pvid == vlan, neglect vlan.
1638 				 */
1639 				m->m_pkthdr.ether_vtag = cq_info.vtag;
1640 				m->m_flags |= M_VLANTAG;
1641 			}
1642 		}
1643 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1644 
1645 		if_input(sc->ifp, m);
1646 
1647 		/* Update rx stats per queue */
1648 		rq->rx_stats.rx_pkts++;
1649 		rq->rx_stats.rx_bytes += cq_info.pkt_size;
1650 		rq->rx_stats.rx_frags += cq_info.num_frags;
1651 		rq->rx_stats.rx_ucast_pkts++;
1652 	}
1653         return;
1654 }
1655 
1656 static void
oce_rx(struct oce_rq * rq,struct oce_nic_rx_cqe * cqe)1657 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1658 {
1659 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1660 	int len;
1661 	struct mbuf *m = NULL;
1662 	struct oce_common_cqe_info cq_info;
1663 	uint16_t vtag = 0;
1664 
1665 	/* Is it a flush compl that has no data */
1666 	if(!cqe->u0.s.num_fragments)
1667 		goto exit;
1668 
1669 	len = cqe->u0.s.pkt_size;
1670 	if (!len) {
1671 		/*partial DMA workaround for Lancer*/
1672 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1673 		goto exit;
1674 	}
1675 
1676 	if (!oce_cqe_portid_valid(sc, cqe)) {
1677 		oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1678 		goto exit;
1679 	}
1680 
1681 	 /* Get vlan_tag value */
1682 	if(IS_BE(sc) || IS_SH(sc))
1683 		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1684 	else
1685 		vtag = cqe->u0.s.vlan_tag;
1686 
1687 	cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1688 	cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1689 	cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1690 	cq_info.num_frags = cqe->u0.s.num_fragments;
1691 	cq_info.pkt_size = cqe->u0.s.pkt_size;
1692 
1693 	oce_rx_mbuf_chain(rq, &cq_info, &m);
1694 
1695 	if (m) {
1696 		m->m_pkthdr.rcvif = sc->ifp;
1697 		if (rq->queue_index)
1698 			m->m_pkthdr.flowid = (rq->queue_index - 1);
1699 		else
1700 			m->m_pkthdr.flowid = rq->queue_index;
1701 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1702 
1703 		/* This deternies if vlan tag is Valid */
1704 		if (oce_cqe_vtp_valid(sc, cqe)) {
1705 			if (sc->function_mode & FNM_FLEX10_MODE) {
1706 				/* FLEX10. If QnQ is not set, neglect VLAN */
1707 				if (cqe->u0.s.qnq) {
1708 					m->m_pkthdr.ether_vtag = vtag;
1709 					m->m_flags |= M_VLANTAG;
1710 				}
1711 			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1712 				/* In UMC mode generally pvid will be striped by
1713 				   hw. But in some cases we have seen it comes
1714 				   with pvid. So if pvid == vlan, neglect vlan.
1715 				*/
1716 				m->m_pkthdr.ether_vtag = vtag;
1717 				m->m_flags |= M_VLANTAG;
1718 			}
1719 		}
1720 
1721 		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1722 #if defined(INET6) || defined(INET)
1723 		/* Try to queue to LRO */
1724 		if (IF_LRO_ENABLED(sc) &&
1725 		    (cqe->u0.s.ip_cksum_pass) &&
1726 		    (cqe->u0.s.l4_cksum_pass) &&
1727 		    (!cqe->u0.s.ip_ver)       &&
1728 		    (rq->lro.lro_cnt != 0)) {
1729 			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1730 				rq->lro_pkts_queued ++;
1731 				goto post_done;
1732 			}
1733 			/* If LRO posting fails then try to post to STACK */
1734 		}
1735 #endif
1736 
1737 		if_input(sc->ifp, m);
1738 #if defined(INET6) || defined(INET)
1739 post_done:
1740 #endif
1741 		/* Update rx stats per queue */
1742 		rq->rx_stats.rx_pkts++;
1743 		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1744 		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1745 		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1746 			rq->rx_stats.rx_mcast_pkts++;
1747 		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1748 			rq->rx_stats.rx_ucast_pkts++;
1749 	}
1750 exit:
1751 	return;
1752 }
1753 
1754 void
oce_discard_rx_comp(struct oce_rq * rq,int num_frags)1755 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1756 {
1757 	uint32_t i = 0;
1758 	struct oce_packet_desc *pd;
1759 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1760 
1761 	for (i = 0; i < num_frags; i++) {
1762                 if (rq->ring->cidx == rq->ring->pidx) {
1763                         device_printf(sc->dev,
1764                                 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1765                         return;
1766                 }
1767                 pd = &rq->pckts[rq->ring->cidx];
1768                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1769                 bus_dmamap_unload(rq->tag, pd->map);
1770                 if (pd->mbuf != NULL) {
1771                         m_freem(pd->mbuf);
1772                         pd->mbuf = NULL;
1773                 }
1774 
1775 		RING_GET(rq->ring, 1);
1776                 rq->pending--;
1777 	}
1778 }
1779 
1780 static int
oce_cqe_vtp_valid(POCE_SOFTC sc,struct oce_nic_rx_cqe * cqe)1781 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1782 {
1783 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1784 	int vtp = 0;
1785 
1786 	if (sc->be3_native) {
1787 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1788 		vtp =  cqe_v1->u0.s.vlan_tag_present;
1789 	} else
1790 		vtp = cqe->u0.s.vlan_tag_present;
1791 
1792 	return vtp;
1793 
1794 }
1795 
1796 static int
oce_cqe_portid_valid(POCE_SOFTC sc,struct oce_nic_rx_cqe * cqe)1797 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1798 {
1799 	struct oce_nic_rx_cqe_v1 *cqe_v1;
1800 	int port_id = 0;
1801 
1802 	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1803 		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1804 		port_id =  cqe_v1->u0.s.port;
1805 		if (sc->port_id != port_id)
1806 			return 0;
1807 	} else
1808 		;/* For BE3 legacy and Lancer this is dummy */
1809 
1810 	return 1;
1811 
1812 }
1813 
1814 #if defined(INET6) || defined(INET)
1815 void
oce_rx_flush_lro(struct oce_rq * rq)1816 oce_rx_flush_lro(struct oce_rq *rq)
1817 {
1818 	struct lro_ctrl	*lro = &rq->lro;
1819 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1820 
1821 	if (!IF_LRO_ENABLED(sc))
1822 		return;
1823 
1824 	tcp_lro_flush_all(lro);
1825 	rq->lro_pkts_queued = 0;
1826 
1827 	return;
1828 }
1829 
1830 static int
oce_init_lro(POCE_SOFTC sc)1831 oce_init_lro(POCE_SOFTC sc)
1832 {
1833 	struct lro_ctrl *lro = NULL;
1834 	int i = 0, rc = 0;
1835 
1836 	for (i = 0; i < sc->nrqs; i++) {
1837 		lro = &sc->rq[i]->lro;
1838 		rc = tcp_lro_init(lro);
1839 		if (rc != 0) {
1840 			device_printf(sc->dev, "LRO init failed\n");
1841 			return rc;
1842 		}
1843 		lro->ifp = sc->ifp;
1844 	}
1845 
1846 	return rc;
1847 }
1848 
1849 void
oce_free_lro(POCE_SOFTC sc)1850 oce_free_lro(POCE_SOFTC sc)
1851 {
1852 	struct lro_ctrl *lro = NULL;
1853 	int i = 0;
1854 
1855 	for (i = 0; i < sc->nrqs; i++) {
1856 		lro = &sc->rq[i]->lro;
1857 		if (lro)
1858 			tcp_lro_free(lro);
1859 	}
1860 }
1861 #endif
1862 
1863 int
oce_alloc_rx_bufs(struct oce_rq * rq,int count)1864 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1865 {
1866 	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1867 	int i, rc;
1868 	struct oce_packet_desc *pd;
1869 	bus_dma_segment_t segs[6];
1870 	int nsegs, added = 0;
1871 	struct oce_nic_rqe *rqe;
1872 	pd_rxulp_db_t rxdb_reg;
1873 	uint32_t val = 0;
1874 	uint32_t oce_max_rq_posts = 64;
1875 
1876 	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1877 	for (i = 0; i < count; i++) {
1878 		pd = &rq->pckts[rq->ring->pidx];
1879 		pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1880 		if (pd->mbuf == NULL) {
1881 			device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1882 			break;
1883 		}
1884 		pd->mbuf->m_nextpkt = NULL;
1885 
1886 		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1887 
1888 		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1889 					     pd->map,
1890 					     pd->mbuf,
1891 					     segs, &nsegs, BUS_DMA_NOWAIT);
1892 		if (rc) {
1893 			m_free(pd->mbuf);
1894 			device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1895 			break;
1896 		}
1897 
1898 		if (nsegs != 1) {
1899 			i--;
1900 			continue;
1901 		}
1902 
1903 		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1904 
1905 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1906 		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1907 		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1908 		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1909 		RING_PUT(rq->ring, 1);
1910 		added++;
1911 		rq->pending++;
1912 	}
1913 	oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1914 	if (added != 0) {
1915 		for (i = added / oce_max_rq_posts; i > 0; i--) {
1916 			rxdb_reg.bits.num_posted = oce_max_rq_posts;
1917 			rxdb_reg.bits.qid = rq->rq_id;
1918 			if(rq->islro) {
1919                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1920                                 val |= oce_max_rq_posts << 16;
1921                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1922 			}else {
1923 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1924 			}
1925 			added -= oce_max_rq_posts;
1926 		}
1927 		if (added > 0) {
1928 			rxdb_reg.bits.qid = rq->rq_id;
1929 			rxdb_reg.bits.num_posted = added;
1930 			if(rq->islro) {
1931                                 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1932                                 val |= added << 16;
1933                                 OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
1934 			}else {
1935 				OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1936 			}
1937 		}
1938 	}
1939 
1940 	return 0;
1941 }
1942 
1943 static void
oce_check_rx_bufs(POCE_SOFTC sc,uint32_t num_cqes,struct oce_rq * rq)1944 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1945 {
1946         if (num_cqes) {
1947                 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1948 		if(!sc->enable_hwlro) {
1949 			if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1950 				oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1951 		}else {
1952                 	if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1953                         	oce_alloc_rx_bufs(rq, 64);
1954         	}
1955 	}
1956 
1957         return;
1958 }
1959 
1960 uint16_t
oce_rq_handler_lro(void * arg)1961 oce_rq_handler_lro(void *arg)
1962 {
1963         struct oce_rq *rq = (struct oce_rq *)arg;
1964         struct oce_cq *cq = rq->cq;
1965         POCE_SOFTC sc = rq->parent;
1966         struct nic_hwlro_singleton_cqe *cqe;
1967         struct nic_hwlro_cqe_part2 *cqe2;
1968         int num_cqes = 0;
1969 
1970 	LOCK(&rq->rx_lock);
1971         bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1972         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1973         while (cqe->valid) {
1974                 if(cqe->cqe_type == 0) { /* singleton cqe */
1975 			/* we should not get singleton cqe after cqe1 on same rq */
1976 			if(rq->cqe_firstpart != NULL) {
1977 				device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1978 				goto exit_rq_handler_lro;
1979 			}
1980                         if(cqe->error != 0) {
1981                                 rq->rx_stats.rxcp_err++;
1982 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1983                         }
1984                         oce_rx_lro(rq, cqe, NULL);
1985                         rq->rx_stats.rx_compl++;
1986                         cqe->valid = 0;
1987                         RING_GET(cq->ring, 1);
1988                         num_cqes++;
1989                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1990                                 break;
1991                 }else if(cqe->cqe_type == 0x1) { /* first part */
1992 			/* we should not get cqe1 after cqe1 on same rq */
1993 			if(rq->cqe_firstpart != NULL) {
1994 				device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1995 				goto exit_rq_handler_lro;
1996 			}
1997 			rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1998                         RING_GET(cq->ring, 1);
1999                 }else if(cqe->cqe_type == 0x2) { /* second part */
2000 			cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
2001                         if(cqe2->error != 0) {
2002                                 rq->rx_stats.rxcp_err++;
2003 				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2004                         }
2005 			/* We should not get cqe2 without cqe1 */
2006 			if(rq->cqe_firstpart == NULL) {
2007 				device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2008 				goto exit_rq_handler_lro;
2009 			}
2010                         oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2011 
2012                         rq->rx_stats.rx_compl++;
2013                         rq->cqe_firstpart->valid = 0;
2014                         cqe2->valid = 0;
2015 			rq->cqe_firstpart = NULL;
2016 
2017                         RING_GET(cq->ring, 1);
2018                         num_cqes += 2;
2019                         if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2020                                 break;
2021 		}
2022 
2023                 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2024                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2025         }
2026 	oce_check_rx_bufs(sc, num_cqes, rq);
2027 exit_rq_handler_lro:
2028 	UNLOCK(&rq->rx_lock);
2029 	return 0;
2030 }
2031 
2032 /* Handle the Completion Queue for receive */
2033 uint16_t
oce_rq_handler(void * arg)2034 oce_rq_handler(void *arg)
2035 {
2036 	struct epoch_tracker et;
2037 	struct oce_rq *rq = (struct oce_rq *)arg;
2038 	struct oce_cq *cq = rq->cq;
2039 	POCE_SOFTC sc = rq->parent;
2040 	struct oce_nic_rx_cqe *cqe;
2041 	int num_cqes = 0;
2042 
2043 	NET_EPOCH_ENTER(et);
2044 	if(rq->islro) {
2045 		oce_rq_handler_lro(arg);
2046 		NET_EPOCH_EXIT(et);
2047 		return 0;
2048 	}
2049 	LOCK(&rq->rx_lock);
2050 	bus_dmamap_sync(cq->ring->dma.tag,
2051 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2052 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2053 	while (cqe->u0.dw[2]) {
2054 		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
2055 
2056 		if (cqe->u0.s.error == 0) {
2057 			oce_rx(rq, cqe);
2058 		} else {
2059 			rq->rx_stats.rxcp_err++;
2060 			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2061 			/* Post L3/L4 errors to stack.*/
2062 			oce_rx(rq, cqe);
2063 		}
2064 		rq->rx_stats.rx_compl++;
2065 		cqe->u0.dw[2] = 0;
2066 
2067 #if defined(INET6) || defined(INET)
2068 		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2069 			oce_rx_flush_lro(rq);
2070 		}
2071 #endif
2072 
2073 		RING_GET(cq->ring, 1);
2074 		bus_dmamap_sync(cq->ring->dma.tag,
2075 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2076 		cqe =
2077 		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2078 		num_cqes++;
2079 		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
2080 			break;
2081 	}
2082 
2083 #if defined(INET6) || defined(INET)
2084         if (IF_LRO_ENABLED(sc))
2085                 oce_rx_flush_lro(rq);
2086 #endif
2087 
2088 	oce_check_rx_bufs(sc, num_cqes, rq);
2089 	UNLOCK(&rq->rx_lock);
2090 	NET_EPOCH_EXIT(et);
2091 	return 0;
2092 
2093 }
2094 
2095 /*****************************************************************************
2096  *		   Helper function prototypes in this file 		     *
2097  *****************************************************************************/
2098 
2099 static void
oce_attach_ifp(POCE_SOFTC sc)2100 oce_attach_ifp(POCE_SOFTC sc)
2101 {
2102 
2103 	sc->ifp = if_alloc(IFT_ETHER);
2104 
2105 	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2106 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2107 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2108 
2109 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_MULTICAST);
2110 	if_setioctlfn(sc->ifp, oce_ioctl);
2111 	if_setstartfn(sc->ifp, oce_start);
2112 	if_setinitfn(sc->ifp, oce_init);
2113 	if_setmtu(sc->ifp, ETHERMTU);
2114 	if_setsoftc(sc->ifp, sc);
2115 	if_settransmitfn(sc->ifp, oce_multiq_start);
2116 	if_setqflushfn(sc->ifp, oce_multiq_flush);
2117 
2118 	if_initname(sc->ifp,
2119 		    device_get_name(sc->dev), device_get_unit(sc->dev));
2120 
2121 	if_setsendqlen(sc->ifp, OCE_MAX_TX_DESC - 1);
2122 	if_setsendqready(sc->ifp);
2123 
2124 	if_sethwassist(sc->ifp, OCE_IF_HWASSIST);
2125 	if_sethwassistbits(sc->ifp, CSUM_TSO, 0);
2126 	if_sethwassistbits(sc->ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0);
2127 
2128 	if_setcapabilities(sc->ifp, OCE_IF_CAPABILITIES);
2129 	if_setcapabilitiesbit(sc->ifp, IFCAP_HWCSUM, 0);
2130 	if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWFILTER, 0);
2131 
2132 #if defined(INET6) || defined(INET)
2133 	if_setcapabilitiesbit(sc->ifp, IFCAP_TSO, 0);
2134 	if_setcapabilitiesbit(sc->ifp, IFCAP_LRO, 0);
2135 	if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWTSO, 0);
2136 #endif
2137 
2138 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
2139 	if_setbaudrate(sc->ifp, IF_Gbps(10));
2140 
2141 	if_sethwtsomax(sc->ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2142 	if_sethwtsomaxsegcount(sc->ifp, OCE_MAX_TX_ELEMENTS);
2143 	if_sethwtsomaxsegsize(sc->ifp, 4096);
2144 
2145 	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2146 }
2147 
2148 static void
oce_add_vlan(void * arg,if_t ifp,uint16_t vtag)2149 oce_add_vlan(void *arg, if_t ifp, uint16_t vtag)
2150 {
2151 	POCE_SOFTC sc = if_getsoftc(ifp);
2152 
2153 	if (if_getsoftc(ifp) !=  arg)
2154 		return;
2155 	if ((vtag == 0) || (vtag > 4095))
2156 		return;
2157 
2158 	sc->vlan_tag[vtag] = 1;
2159 	sc->vlans_added++;
2160 	if (sc->vlans_added <= (sc->max_vlans + 1))
2161 		oce_vid_config(sc);
2162 }
2163 
2164 static void
oce_del_vlan(void * arg,if_t ifp,uint16_t vtag)2165 oce_del_vlan(void *arg, if_t ifp, uint16_t vtag)
2166 {
2167 	POCE_SOFTC sc = if_getsoftc(ifp);
2168 
2169 	if (if_getsoftc(ifp) !=  arg)
2170 		return;
2171 	if ((vtag == 0) || (vtag > 4095))
2172 		return;
2173 
2174 	sc->vlan_tag[vtag] = 0;
2175 	sc->vlans_added--;
2176 	oce_vid_config(sc);
2177 }
2178 
2179 /*
2180  * A max of 64 vlans can be configured in BE. If the user configures
2181  * more, place the card in vlan promiscuous mode.
2182  */
2183 static int
oce_vid_config(POCE_SOFTC sc)2184 oce_vid_config(POCE_SOFTC sc)
2185 {
2186 	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
2187 	uint16_t ntags = 0, i;
2188 	int status = 0;
2189 
2190 	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2191 			(if_getcapenable(sc->ifp) & IFCAP_VLAN_HWFILTER)) {
2192 		for (i = 0; i < MAX_VLANS; i++) {
2193 			if (sc->vlan_tag[i]) {
2194 				vtags[ntags].vtag = i;
2195 				ntags++;
2196 			}
2197 		}
2198 		if (ntags)
2199 			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2200 						vtags, ntags, 1, 0);
2201 	} else
2202 		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2203 					 	NULL, 0, 1, 1);
2204 	return status;
2205 }
2206 
2207 static void
oce_mac_addr_set(POCE_SOFTC sc)2208 oce_mac_addr_set(POCE_SOFTC sc)
2209 {
2210 	uint32_t old_pmac_id = sc->pmac_id;
2211 	int status = 0;
2212 
2213 	status = bcmp((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2214 			 sc->macaddr.size_of_struct);
2215 	if (!status)
2216 		return;
2217 
2218 	status = oce_mbox_macaddr_add(sc, (uint8_t *)(if_getlladdr(sc->ifp)),
2219 					sc->if_id, &sc->pmac_id);
2220 	if (!status) {
2221 		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2222 		bcopy((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2223 				 sc->macaddr.size_of_struct);
2224 	}
2225 	if (status)
2226 		device_printf(sc->dev, "Failed update macaddress\n");
2227 
2228 }
2229 
2230 static int
oce_handle_passthrough(if_t ifp,caddr_t data)2231 oce_handle_passthrough(if_t ifp, caddr_t data)
2232 {
2233 	POCE_SOFTC sc = if_getsoftc(ifp);
2234 	struct ifreq *ifr = (struct ifreq *)data;
2235 	int rc = ENXIO;
2236 	char cookie[32] = {0};
2237 	void *priv_data = ifr_data_get_ptr(ifr);
2238 	void *ioctl_ptr;
2239 	uint32_t req_size;
2240 	struct mbx_hdr req;
2241 	OCE_DMA_MEM dma_mem;
2242 
2243 	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
2244 		return EFAULT;
2245 
2246 	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
2247 		return EINVAL;
2248 
2249 	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
2250 	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
2251 		return EFAULT;
2252 
2253 	req_size = le32toh(req.u0.req.request_length);
2254 	if (req_size > 65536)
2255 		return EINVAL;
2256 
2257 	req_size += sizeof(struct mbx_hdr);
2258 	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
2259 	if (rc)
2260 		return ENOMEM;
2261 
2262 	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
2263 		rc = EFAULT;
2264 		goto dma_free;
2265 	}
2266 
2267 	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
2268 	if (rc) {
2269 		rc = EIO;
2270 		goto dma_free;
2271 	}
2272 
2273 	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size)) {
2274 		rc =  EFAULT;
2275 		goto dma_free;
2276 	}
2277 
2278 	/*
2279 	   firmware is filling all the attributes for this ioctl except
2280 	   the driver version..so fill it
2281 	 */
2282 	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
2283 		struct mbx_common_get_cntl_attr *fw_cmd =
2284 		    (struct mbx_common_get_cntl_attr *)ioctl_ptr;
2285 		_Static_assert(sizeof(COMPONENT_REVISION) <=
2286 		     sizeof(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str),
2287 		     "driver version string too long");
2288 
2289 		rc = copyout(COMPONENT_REVISION,
2290 		    fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2291 		    sizeof(COMPONENT_REVISION));
2292 	}
2293 
2294 dma_free:
2295 	oce_dma_free(sc, &dma_mem);
2296 	return rc;
2297 
2298 }
2299 
2300 static void
oce_eqd_set_periodic(POCE_SOFTC sc)2301 oce_eqd_set_periodic(POCE_SOFTC sc)
2302 {
2303 	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
2304 	struct oce_aic_obj *aic;
2305 	struct oce_eq *eqo;
2306 	uint64_t now = 0, delta;
2307 	int eqd, i, num = 0;
2308 	uint32_t tx_reqs = 0, rxpkts = 0, pps;
2309 	struct oce_wq *wq;
2310 	struct oce_rq *rq;
2311 
2312 	#define ticks_to_msecs(t)       (1000 * (t) / hz)
2313 
2314 	for (i = 0 ; i < sc->neqs; i++) {
2315 		eqo = sc->eq[i];
2316 		aic = &sc->aic_obj[i];
2317 		/* When setting the static eq delay from the user space */
2318 		if (!aic->enable) {
2319 			if (aic->ticks)
2320 				aic->ticks = 0;
2321 			eqd = aic->et_eqd;
2322 			goto modify_eqd;
2323 		}
2324 
2325 		if (i == 0) {
2326 			rq = sc->rq[0];
2327 			rxpkts = rq->rx_stats.rx_pkts;
2328 		} else
2329 			rxpkts = 0;
2330 		if (i + 1 < sc->nrqs) {
2331 			rq = sc->rq[i + 1];
2332 			rxpkts += rq->rx_stats.rx_pkts;
2333 		}
2334 		if (i < sc->nwqs) {
2335 			wq = sc->wq[i];
2336 			tx_reqs = wq->tx_stats.tx_reqs;
2337 		} else
2338 			tx_reqs = 0;
2339 		now = ticks;
2340 
2341 		if (!aic->ticks || now < aic->ticks ||
2342 		    rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2343 			aic->prev_rxpkts = rxpkts;
2344 			aic->prev_txreqs = tx_reqs;
2345 			aic->ticks = now;
2346 			continue;
2347 		}
2348 
2349 		delta = ticks_to_msecs(now - aic->ticks);
2350 
2351 		pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2352 		      (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2353 		eqd = (pps / 15000) << 2;
2354 		if (eqd < 8)
2355 			eqd = 0;
2356 
2357 		/* Make sure that the eq delay is in the known range */
2358 		eqd = min(eqd, aic->max_eqd);
2359 		eqd = max(eqd, aic->min_eqd);
2360 
2361 		aic->prev_rxpkts = rxpkts;
2362 		aic->prev_txreqs = tx_reqs;
2363 		aic->ticks = now;
2364 
2365 modify_eqd:
2366 		if (eqd != aic->cur_eqd) {
2367 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2368 			set_eqd[num].eq_id = eqo->eq_id;
2369 			aic->cur_eqd = eqd;
2370 			num++;
2371 		}
2372 	}
2373 
2374 	/* Is there atleast one eq that needs to be modified? */
2375         for(i = 0; i < num; i += 8) {
2376                 if((num - i) >=8 )
2377                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
2378                 else
2379                         oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2380         }
2381 
2382 }
2383 
oce_detect_hw_error(POCE_SOFTC sc)2384 static void oce_detect_hw_error(POCE_SOFTC sc)
2385 {
2386 
2387 	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
2388 	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2389 	uint32_t i;
2390 
2391 	if (sc->hw_error)
2392 		return;
2393 
2394 	if (IS_XE201(sc)) {
2395 		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
2396 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2397 			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
2398 			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
2399 		}
2400 	} else {
2401 		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
2402 		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
2403 		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
2404 		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
2405 
2406 		ue_low = (ue_low & ~ue_low_mask);
2407 		ue_high = (ue_high & ~ue_high_mask);
2408 	}
2409 
2410 	/* On certain platforms BE hardware can indicate spurious UEs.
2411 	 * Allow the h/w to stop working completely in case of a real UE.
2412 	 * Hence not setting the hw_error for UE detection.
2413 	 */
2414 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2415 		sc->hw_error = TRUE;
2416 		device_printf(sc->dev, "Error detected in the card\n");
2417 	}
2418 
2419 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2420 		device_printf(sc->dev,
2421 				"ERR: sliport status 0x%x\n", sliport_status);
2422 		device_printf(sc->dev,
2423 				"ERR: sliport error1 0x%x\n", sliport_err1);
2424 		device_printf(sc->dev,
2425 				"ERR: sliport error2 0x%x\n", sliport_err2);
2426 	}
2427 
2428 	if (ue_low) {
2429 		for (i = 0; ue_low; ue_low >>= 1, i++) {
2430 			if (ue_low & 1)
2431 				device_printf(sc->dev, "UE: %s bit set\n",
2432 							ue_status_low_desc[i]);
2433 		}
2434 	}
2435 
2436 	if (ue_high) {
2437 		for (i = 0; ue_high; ue_high >>= 1, i++) {
2438 			if (ue_high & 1)
2439 				device_printf(sc->dev, "UE: %s bit set\n",
2440 							ue_status_hi_desc[i]);
2441 		}
2442 	}
2443 
2444 }
2445 
2446 static void
oce_local_timer(void * arg)2447 oce_local_timer(void *arg)
2448 {
2449 	POCE_SOFTC sc = arg;
2450 	int i = 0;
2451 
2452 	oce_detect_hw_error(sc);
2453 	oce_refresh_nic_stats(sc);
2454 	oce_refresh_queue_stats(sc);
2455 	oce_mac_addr_set(sc);
2456 
2457 	/* TX Watch Dog*/
2458 	for (i = 0; i < sc->nwqs; i++)
2459 		oce_tx_restart(sc, sc->wq[i]);
2460 
2461 	/* calculate and set the eq delay for optimal interrupt rate */
2462 	if (IS_BE(sc) || IS_SH(sc))
2463 		oce_eqd_set_periodic(sc);
2464 
2465 	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2466 }
2467 
2468 static void
oce_tx_compl_clean(POCE_SOFTC sc)2469 oce_tx_compl_clean(POCE_SOFTC sc)
2470 {
2471 	struct oce_wq *wq;
2472 	int i = 0, timeo = 0, num_wqes = 0;
2473 	int pending_txqs = sc->nwqs;
2474 
2475 	/* Stop polling for compls when HW has been silent for 10ms or
2476 	 * hw_error or no outstanding completions expected
2477 	 */
2478 	do {
2479 		pending_txqs = sc->nwqs;
2480 
2481 		for_all_wq_queues(sc, wq, i) {
2482 			num_wqes = oce_wq_handler(wq);
2483 
2484 			if(num_wqes)
2485 				timeo = 0;
2486 
2487 			if(!wq->ring->num_used)
2488 				pending_txqs--;
2489 		}
2490 
2491 		if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2492 			break;
2493 
2494 		DELAY(1000);
2495 	} while (TRUE);
2496 
2497 	for_all_wq_queues(sc, wq, i) {
2498 		while(wq->ring->num_used) {
2499 			LOCK(&wq->tx_compl_lock);
2500 			oce_process_tx_completion(wq);
2501 			UNLOCK(&wq->tx_compl_lock);
2502 		}
2503 	}
2504 
2505 }
2506 
2507 /* NOTE : This should only be called holding
2508  *        DEVICE_LOCK.
2509  */
2510 static void
oce_if_deactivate(POCE_SOFTC sc)2511 oce_if_deactivate(POCE_SOFTC sc)
2512 {
2513 	int i;
2514 	struct oce_rq *rq;
2515 	struct oce_wq *wq;
2516 	struct oce_eq *eq;
2517 
2518 	if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2519 
2520 	oce_tx_compl_clean(sc);
2521 
2522 	/* Stop intrs and finish any bottom halves pending */
2523 	oce_hw_intr_disable(sc);
2524 
2525 	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2526 	   any other lock. So unlock device lock and require after
2527 	   completing taskqueue_drain.
2528 	*/
2529 	UNLOCK(&sc->dev_lock);
2530 	for (i = 0; i < sc->intr_count; i++) {
2531 		if (sc->intrs[i].tq != NULL) {
2532 			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2533 		}
2534 	}
2535 	LOCK(&sc->dev_lock);
2536 
2537 	/* Delete RX queue in card with flush param */
2538 	oce_stop_rx(sc);
2539 
2540 	/* Invalidate any pending cq and eq entries*/
2541 	for_all_evnt_queues(sc, eq, i)
2542 		oce_drain_eq(eq);
2543 	for_all_rq_queues(sc, rq, i)
2544 		oce_drain_rq_cq(rq);
2545 	for_all_wq_queues(sc, wq, i)
2546 		oce_drain_wq_cq(wq);
2547 
2548 	/* But still we need to get MCC aync events.
2549 	   So enable intrs and also arm first EQ
2550 	*/
2551 	oce_hw_intr_enable(sc);
2552 	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2553 
2554 	DELAY(10);
2555 }
2556 
2557 static void
oce_if_activate(POCE_SOFTC sc)2558 oce_if_activate(POCE_SOFTC sc)
2559 {
2560 	struct oce_eq *eq;
2561 	struct oce_rq *rq;
2562 	struct oce_wq *wq;
2563 	int i, rc = 0;
2564 
2565 	if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING , 0);
2566 
2567 	oce_hw_intr_disable(sc);
2568 
2569 	oce_start_rx(sc);
2570 
2571 	for_all_rq_queues(sc, rq, i) {
2572 		rc = oce_start_rq(rq);
2573 		if (rc)
2574 			device_printf(sc->dev, "Unable to start RX\n");
2575 	}
2576 
2577 	for_all_wq_queues(sc, wq, i) {
2578 		rc = oce_start_wq(wq);
2579 		if (rc)
2580 			device_printf(sc->dev, "Unable to start TX\n");
2581 	}
2582 
2583 	for_all_evnt_queues(sc, eq, i)
2584 		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2585 
2586 	oce_hw_intr_enable(sc);
2587 
2588 }
2589 
2590 static void
process_link_state(POCE_SOFTC sc,struct oce_async_cqe_link_state * acqe)2591 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2592 {
2593 	/* Update Link status */
2594 	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2595 	     ASYNC_EVENT_LINK_UP) {
2596 		sc->link_status = ASYNC_EVENT_LINK_UP;
2597 		if_link_state_change(sc->ifp, LINK_STATE_UP);
2598 	} else {
2599 		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2600 		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2601 	}
2602 }
2603 
oce_async_grp5_osbmc_process(POCE_SOFTC sc,struct oce_async_evt_grp5_os2bmc * evt)2604 static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
2605 					 struct oce_async_evt_grp5_os2bmc *evt)
2606 {
2607 	DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2608 	if (evt->u.s.mgmt_enable)
2609 		sc->flags |= OCE_FLAGS_OS2BMC;
2610 	else
2611 		return;
2612 
2613 	sc->bmc_filt_mask = evt->u.s.arp_filter;
2614 	sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2615 	sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2616 	sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2617 	sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2618 	sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2619 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2620 	sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2621 	sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2622 }
2623 
oce_process_grp5_events(POCE_SOFTC sc,struct oce_mq_cqe * cqe)2624 static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
2625 {
2626 	struct oce_async_event_grp5_pvid_state *gcqe;
2627 	struct oce_async_evt_grp5_os2bmc *bmccqe;
2628 
2629 	switch (cqe->u0.s.async_type) {
2630 	case ASYNC_EVENT_PVID_STATE:
2631 		/* GRP5 PVID */
2632 		gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
2633 		if (gcqe->enabled)
2634 			sc->pvid = gcqe->tag & VLAN_VID_MASK;
2635 		else
2636 			sc->pvid = 0;
2637 		break;
2638 	case ASYNC_EVENT_OS2BMC:
2639 		bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
2640 		oce_async_grp5_osbmc_process(sc, bmccqe);
2641 		break;
2642 	default:
2643 		break;
2644 	}
2645 }
2646 
2647 /* Handle the Completion Queue for the Mailbox/Async notifications */
2648 uint16_t
oce_mq_handler(void * arg)2649 oce_mq_handler(void *arg)
2650 {
2651 	struct oce_mq *mq = (struct oce_mq *)arg;
2652 	POCE_SOFTC sc = mq->parent;
2653 	struct oce_cq *cq = mq->cq;
2654 	int num_cqes = 0, evt_type = 0, optype = 0;
2655 	struct oce_mq_cqe *cqe;
2656 	struct oce_async_cqe_link_state *acqe;
2657 	struct oce_async_event_qnq *dbgcqe;
2658 
2659 	bus_dmamap_sync(cq->ring->dma.tag,
2660 			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2661 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2662 
2663 	while (cqe->u0.dw[3]) {
2664 		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2665 		if (cqe->u0.s.async_event) {
2666 			evt_type = cqe->u0.s.event_type;
2667 			optype = cqe->u0.s.async_type;
2668 			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2669 				/* Link status evt */
2670 				acqe = (struct oce_async_cqe_link_state *)cqe;
2671 				process_link_state(sc, acqe);
2672 			} else if (evt_type == ASYNC_EVENT_GRP5) {
2673 				oce_process_grp5_events(sc, cqe);
2674 			} else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
2675 					optype == ASYNC_EVENT_DEBUG_QNQ) {
2676 				dbgcqe =  (struct oce_async_event_qnq *)cqe;
2677 				if(dbgcqe->valid)
2678 					sc->qnqid = dbgcqe->vlan_tag;
2679 				sc->qnq_debug_event = TRUE;
2680 			}
2681 		}
2682 		cqe->u0.dw[3] = 0;
2683 		RING_GET(cq->ring, 1);
2684 		bus_dmamap_sync(cq->ring->dma.tag,
2685 				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2686 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2687 		num_cqes++;
2688 	}
2689 
2690 	if (num_cqes)
2691 		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2692 
2693 	return 0;
2694 }
2695 
2696 static void
setup_max_queues_want(POCE_SOFTC sc)2697 setup_max_queues_want(POCE_SOFTC sc)
2698 {
2699 	/* Check if it is FLEX machine. Is so dont use RSS */
2700 	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2701 	    (sc->function_mode & FNM_UMC_MODE)    ||
2702 	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2703 	    (!is_rss_enabled(sc))		  ||
2704 	    IS_BE2(sc)) {
2705 		sc->nrqs = 1;
2706 		sc->nwqs = 1;
2707 	} else {
2708 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2709 		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2710 	}
2711 
2712 	if (IS_BE2(sc) && is_rss_enabled(sc))
2713 		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2714 }
2715 
2716 static void
update_queues_got(POCE_SOFTC sc)2717 update_queues_got(POCE_SOFTC sc)
2718 {
2719 	if (is_rss_enabled(sc)) {
2720 		sc->nrqs = sc->intr_count + 1;
2721 		sc->nwqs = sc->intr_count;
2722 	} else {
2723 		sc->nrqs = 1;
2724 		sc->nwqs = 1;
2725 	}
2726 
2727 	if (IS_BE2(sc))
2728 		sc->nwqs = 1;
2729 }
2730 
2731 static int
oce_check_ipv6_ext_hdr(struct mbuf * m)2732 oce_check_ipv6_ext_hdr(struct mbuf *m)
2733 {
2734 	struct ether_header *eh = mtod(m, struct ether_header *);
2735 	caddr_t m_datatemp = m->m_data;
2736 
2737 	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2738 		m->m_data += sizeof(struct ether_header);
2739 		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2740 
2741 		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2742 				(ip6->ip6_nxt != IPPROTO_UDP)){
2743 			struct ip6_ext *ip6e = NULL;
2744 			m->m_data += sizeof(struct ip6_hdr);
2745 
2746 			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2747 			if(ip6e->ip6e_len == 0xff) {
2748 				m->m_data = m_datatemp;
2749 				return TRUE;
2750 			}
2751 		}
2752 		m->m_data = m_datatemp;
2753 	}
2754 	return FALSE;
2755 }
2756 
2757 static int
is_be3_a1(POCE_SOFTC sc)2758 is_be3_a1(POCE_SOFTC sc)
2759 {
2760 	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2761 		return TRUE;
2762 	}
2763 	return FALSE;
2764 }
2765 
2766 static struct mbuf *
oce_insert_vlan_tag(POCE_SOFTC sc,struct mbuf * m,boolean_t * complete)2767 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2768 {
2769 	uint16_t vlan_tag = 0;
2770 
2771 	if(!M_WRITABLE(m))
2772 		return NULL;
2773 
2774 	/* Embed vlan tag in the packet if it is not part of it */
2775 	if(m->m_flags & M_VLANTAG) {
2776 		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2777 		m->m_flags &= ~M_VLANTAG;
2778 	}
2779 
2780 	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2781 	if(sc->pvid) {
2782 		if(!vlan_tag)
2783 			vlan_tag = sc->pvid;
2784 		if (complete)
2785 			*complete = FALSE;
2786 	}
2787 
2788 	if(vlan_tag) {
2789 		m = ether_vlanencap(m, vlan_tag);
2790 	}
2791 
2792 	if(sc->qnqid) {
2793 		m = ether_vlanencap(m, sc->qnqid);
2794 
2795 		if (complete)
2796 			*complete = FALSE;
2797 	}
2798 	return m;
2799 }
2800 
2801 static int
oce_tx_asic_stall_verify(POCE_SOFTC sc,struct mbuf * m)2802 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2803 {
2804 	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2805 			oce_check_ipv6_ext_hdr(m)) {
2806 		return TRUE;
2807 	}
2808 	return FALSE;
2809 }
2810 
2811 static void
oce_get_config(POCE_SOFTC sc)2812 oce_get_config(POCE_SOFTC sc)
2813 {
2814 	int rc = 0;
2815 	uint32_t max_rss = 0;
2816 
2817 	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2818 		max_rss = OCE_LEGACY_MODE_RSS;
2819 	else
2820 		max_rss = OCE_MAX_RSS;
2821 
2822 	if (!IS_BE(sc)) {
2823 		rc = oce_get_profile_config(sc, max_rss);
2824 		if (rc) {
2825 			sc->nwqs = OCE_MAX_WQ;
2826 			sc->nrssqs = max_rss;
2827 			sc->nrqs = sc->nrssqs + 1;
2828 		}
2829 	}
2830 	else { /* For BE3 don't rely on fw for determining the resources */
2831 		sc->nrssqs = max_rss;
2832 		sc->nrqs = sc->nrssqs + 1;
2833 		sc->nwqs = OCE_MAX_WQ;
2834 		sc->max_vlans = MAX_VLANFILTER_SIZE;
2835 	}
2836 }
2837 
2838 static void
oce_rdma_close(void)2839 oce_rdma_close(void)
2840 {
2841   if (oce_rdma_if != NULL) {
2842     oce_rdma_if = NULL;
2843   }
2844 }
2845 
2846 static void
oce_get_mac_addr(POCE_SOFTC sc,uint8_t * macaddr)2847 oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
2848 {
2849   memcpy(macaddr, sc->macaddr.mac_addr, 6);
2850 }
2851 
2852 int
oce_register_rdma(POCE_RDMA_INFO rdma_info,POCE_RDMA_IF rdma_if)2853 oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
2854 {
2855   POCE_SOFTC sc;
2856   struct oce_dev_info di;
2857   int i;
2858 
2859   if ((rdma_info == NULL) || (rdma_if == NULL)) {
2860     return -EINVAL;
2861   }
2862 
2863   if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2864       (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2865     return -ENXIO;
2866   }
2867 
2868   rdma_info->close = oce_rdma_close;
2869   rdma_info->mbox_post = oce_mbox_post;
2870   rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2871   rdma_info->get_mac_addr = oce_get_mac_addr;
2872 
2873   oce_rdma_if = rdma_if;
2874 
2875   sc = softc_head;
2876   while (sc != NULL) {
2877     if (oce_rdma_if->announce != NULL) {
2878       memset(&di, 0, sizeof(di));
2879       di.dev = sc->dev;
2880       di.softc = sc;
2881       di.ifp = sc->ifp;
2882       di.db_bhandle = sc->db_bhandle;
2883       di.db_btag = sc->db_btag;
2884       di.db_page_size = 4096;
2885       if (sc->flags & OCE_FLAGS_USING_MSIX) {
2886         di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
2887       } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2888         di.intr_mode = OCE_INTERRUPT_MODE_MSI;
2889       } else {
2890         di.intr_mode = OCE_INTERRUPT_MODE_INTX;
2891       }
2892       di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
2893       if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
2894         di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2895         di.msix.start_vector = sc->intr_count;
2896         for (i=0; i<di.msix.num_vectors; i++) {
2897           di.msix.vector_list[i] = sc->intrs[i].vector;
2898         }
2899       } else {
2900       }
2901       memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2902       di.vendor_id = pci_get_vendor(sc->dev);
2903       di.dev_id = pci_get_device(sc->dev);
2904 
2905       if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2906           di.flags  |= OCE_RDMA_INFO_RDMA_SUPPORTED;
2907       }
2908 
2909       rdma_if->announce(&di);
2910       sc = sc->next;
2911     }
2912   }
2913 
2914   return 0;
2915 }
2916 
2917 static void
oce_read_env_variables(POCE_SOFTC sc)2918 oce_read_env_variables( POCE_SOFTC sc )
2919 {
2920 	char *value = NULL;
2921 	int rc = 0;
2922 
2923         /* read if user wants to enable hwlro or swlro */
2924         //value = getenv("oce_enable_hwlro");
2925         if(value && IS_SH(sc)) {
2926                 sc->enable_hwlro = strtol(value, NULL, 10);
2927                 if(sc->enable_hwlro) {
2928                         rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
2929                         if(rc) {
2930                                 device_printf(sc->dev, "no hardware lro support\n");
2931                 		device_printf(sc->dev, "software lro enabled\n");
2932                                 sc->enable_hwlro = 0;
2933                         }else {
2934                                 device_printf(sc->dev, "hardware lro enabled\n");
2935 				oce_max_rsp_handled = 32;
2936                         }
2937                 }else {
2938                         device_printf(sc->dev, "software lro enabled\n");
2939                 }
2940         }else {
2941                 sc->enable_hwlro = 0;
2942         }
2943 
2944         /* read mbuf size */
2945         //value = getenv("oce_rq_buf_size");
2946         if(value && IS_SH(sc)) {
2947                 oce_rq_buf_size = strtol(value, NULL, 10);
2948                 switch(oce_rq_buf_size) {
2949                 case 2048:
2950                 case 4096:
2951                 case 9216:
2952                 case 16384:
2953                         break;
2954 
2955                 default:
2956                         device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
2957                         oce_rq_buf_size = 2048;
2958                 }
2959         }
2960 
2961 	return;
2962 }
2963