xref: /freebsd/sys/dev/vnic/thunder_bgx.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 #include "opt_platform.h"
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bitset.h>
32 #include <sys/bitstring.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/pciio.h>
40 #include <sys/pcpu.h>
41 #include <sys/proc.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/cpuset.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/if_media.h>
51 
52 #include <machine/bus.h>
53 
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 
57 #include "thunder_bgx.h"
58 #include "thunder_bgx_var.h"
59 #include "nic_reg.h"
60 #include "nic.h"
61 
62 #include "lmac_if.h"
63 
64 #define	THUNDER_BGX_DEVSTR	"ThunderX BGX Ethernet I/O Interface"
65 
66 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
67 
68 #define BGX_NODE_ID_MASK	0x1
69 #define BGX_NODE_ID_SHIFT	24
70 
71 #define DRV_NAME	"thunder-BGX"
72 #define DRV_VERSION	"1.0"
73 
74 static int bgx_init_phy(struct bgx *);
75 
76 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
77 static int lmac_count __unused; /* Total no of LMACs in system */
78 
79 static int bgx_xaui_check_link(struct lmac *lmac);
80 static void bgx_get_qlm_mode(struct bgx *);
81 static void bgx_init_hw(struct bgx *);
82 static int bgx_lmac_enable(struct bgx *, uint8_t);
83 static void bgx_lmac_disable(struct bgx *, uint8_t);
84 
85 static int thunder_bgx_probe(device_t);
86 static int thunder_bgx_attach(device_t);
87 static int thunder_bgx_detach(device_t);
88 
89 static device_method_t thunder_bgx_methods[] = {
90 	/* Device interface */
91 	DEVMETHOD(device_probe,		thunder_bgx_probe),
92 	DEVMETHOD(device_attach,	thunder_bgx_attach),
93 	DEVMETHOD(device_detach,	thunder_bgx_detach),
94 
95 	DEVMETHOD_END,
96 };
97 
98 static driver_t thunder_bgx_driver = {
99 	"bgx",
100 	thunder_bgx_methods,
101 	sizeof(struct lmac),
102 };
103 
104 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, 0, 0);
105 MODULE_VERSION(thunder_bgx, 1);
106 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
107 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
108 MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
109 
110 static int
thunder_bgx_probe(device_t dev)111 thunder_bgx_probe(device_t dev)
112 {
113 	uint16_t vendor_id;
114 	uint16_t device_id;
115 
116 	vendor_id = pci_get_vendor(dev);
117 	device_id = pci_get_device(dev);
118 
119 	if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
120 	    device_id == PCI_DEVICE_ID_THUNDER_BGX) {
121 		device_set_desc(dev, THUNDER_BGX_DEVSTR);
122 		return (BUS_PROBE_DEFAULT);
123 	}
124 
125 	return (ENXIO);
126 }
127 
128 static int
thunder_bgx_attach(device_t dev)129 thunder_bgx_attach(device_t dev)
130 {
131 	struct bgx *bgx;
132 	uint8_t lmacid;
133 	int err;
134 	int rid;
135 	struct lmac *lmac;
136 
137 	bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
138 	bgx->dev = dev;
139 
140 	lmac = device_get_softc(dev);
141 	lmac->bgx = bgx;
142 	/* Enable bus mastering */
143 	pci_enable_busmaster(dev);
144 	/* Allocate resources - configuration registers */
145 	rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
146 	bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
147 	    RF_ACTIVE);
148 	if (bgx->reg_base == NULL) {
149 		device_printf(dev, "Could not allocate CSR memory space\n");
150 		err = ENXIO;
151 		goto err_disable_device;
152 	}
153 
154 	bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
155 	    BGX_NODE_ID_MASK;
156 	bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
157 
158 	bgx_vnic[bgx->bgx_id] = bgx;
159 	bgx_get_qlm_mode(bgx);
160 
161 	err = bgx_init_phy(bgx);
162 	if (err != 0)
163 		goto err_free_res;
164 
165 	bgx_init_hw(bgx);
166 
167 	/* Enable all LMACs */
168 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
169 		err = bgx_lmac_enable(bgx, lmacid);
170 		if (err) {
171 			device_printf(dev, "BGX%d failed to enable lmac%d\n",
172 				bgx->bgx_id, lmacid);
173 			goto err_free_res;
174 		}
175 	}
176 
177 	return (0);
178 
179 err_free_res:
180 	bgx_vnic[bgx->bgx_id] = NULL;
181 	bus_release_resource(dev, SYS_RES_MEMORY,
182 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
183 err_disable_device:
184 	free(bgx, M_BGX);
185 	pci_disable_busmaster(dev);
186 
187 	return (err);
188 }
189 
190 static int
thunder_bgx_detach(device_t dev)191 thunder_bgx_detach(device_t dev)
192 {
193 	struct lmac *lmac;
194 	struct bgx *bgx;
195 	uint8_t lmacid;
196 
197 	lmac = device_get_softc(dev);
198 	bgx = lmac->bgx;
199 	/* Disable all LMACs */
200 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
201 		bgx_lmac_disable(bgx, lmacid);
202 
203 	bgx_vnic[bgx->bgx_id] = NULL;
204 	bus_release_resource(dev, SYS_RES_MEMORY,
205 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
206 	free(bgx, M_BGX);
207 	pci_disable_busmaster(dev);
208 
209 	return (0);
210 }
211 
212 /* Register read/write APIs */
213 static uint64_t
bgx_reg_read(struct bgx * bgx,uint8_t lmac,uint64_t offset)214 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
215 {
216 	bus_space_handle_t addr;
217 
218 	addr = ((uint32_t)lmac << 20) + offset;
219 
220 	return (bus_read_8(bgx->reg_base, addr));
221 }
222 
223 static void
bgx_reg_write(struct bgx * bgx,uint8_t lmac,uint64_t offset,uint64_t val)224 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
225 {
226 	bus_space_handle_t addr;
227 
228 	addr = ((uint32_t)lmac << 20) + offset;
229 
230 	bus_write_8(bgx->reg_base, addr, val);
231 }
232 
233 static void
bgx_reg_modify(struct bgx * bgx,uint8_t lmac,uint64_t offset,uint64_t val)234 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
235 {
236 	bus_space_handle_t addr;
237 
238 	addr = ((uint32_t)lmac << 20) + offset;
239 
240 	bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
241 }
242 
243 static int
bgx_poll_reg(struct bgx * bgx,uint8_t lmac,uint64_t reg,uint64_t mask,boolean_t zero)244 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
245     boolean_t zero)
246 {
247 	int timeout = 10;
248 	uint64_t reg_val;
249 
250 	while (timeout) {
251 		reg_val = bgx_reg_read(bgx, lmac, reg);
252 		if (zero && !(reg_val & mask))
253 			return (0);
254 		if (!zero && (reg_val & mask))
255 			return (0);
256 
257 		DELAY(100);
258 		timeout--;
259 	}
260 	return (ETIMEDOUT);
261 }
262 
263 /* Return number of BGX present in HW */
264 u_int
bgx_get_map(int node)265 bgx_get_map(int node)
266 {
267 	int i;
268 	u_int map = 0;
269 
270 	for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
271 		if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
272 			map |= (1 << i);
273 	}
274 
275 	return (map);
276 }
277 
278 /* Return number of LMAC configured for this BGX */
279 int
bgx_get_lmac_count(int node,int bgx_idx)280 bgx_get_lmac_count(int node, int bgx_idx)
281 {
282 	struct bgx *bgx;
283 
284 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
285 	if (bgx != NULL)
286 		return (bgx->lmac_count);
287 
288 	return (0);
289 }
290 
291 /* Returns the current link status of LMAC */
292 void
bgx_get_lmac_link_state(int node,int bgx_idx,int lmacid,void * status)293 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
294 {
295 	struct bgx_link_status *link = (struct bgx_link_status *)status;
296 	struct bgx *bgx;
297 	struct lmac *lmac;
298 
299 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
300 	if (bgx == NULL)
301 		return;
302 
303 	lmac = &bgx->lmac[lmacid];
304 	link->link_up = lmac->link_up;
305 	link->duplex = lmac->last_duplex;
306 	link->speed = lmac->last_speed;
307 }
308 
309 const uint8_t
bgx_get_lmac_mac(int node,int bgx_idx,int lmacid)310 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
311 {
312 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
313 
314 	if (bgx != NULL)
315 		return (bgx->lmac[lmacid].mac);
316 
317 	return (NULL);
318 }
319 
320 void
bgx_set_lmac_mac(int node,int bgx_idx,int lmacid,const uint8_t * mac)321 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
322 {
323 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
324 
325 	if (bgx == NULL)
326 		return;
327 
328 	memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
329 }
330 
331 static void
bgx_sgmii_change_link_state(struct lmac * lmac)332 bgx_sgmii_change_link_state(struct lmac *lmac)
333 {
334 	struct bgx *bgx = lmac->bgx;
335 	uint64_t cmr_cfg;
336 	uint64_t port_cfg = 0;
337 	uint64_t misc_ctl = 0;
338 
339 	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
340 	cmr_cfg &= ~CMR_EN;
341 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
342 
343 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
344 	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
345 
346 	if (lmac->link_up) {
347 		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
348 		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
349 		port_cfg |=  (lmac->last_duplex << 2);
350 	} else {
351 		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
352 	}
353 
354 	switch (lmac->last_speed) {
355 	case 10:
356 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
357 		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
358 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
359 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
360 		misc_ctl |= 50; /* samp_pt */
361 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
362 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
363 		break;
364 	case 100:
365 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
366 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
367 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
368 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
369 		misc_ctl |= 5; /* samp_pt */
370 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
371 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
372 		break;
373 	case 1000:
374 		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
375 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
376 		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
377 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
378 		misc_ctl |= 1; /* samp_pt */
379 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
380 		if (lmac->last_duplex)
381 			bgx_reg_write(bgx, lmac->lmacid,
382 				      BGX_GMP_GMI_TXX_BURST, 0);
383 		else
384 			bgx_reg_write(bgx, lmac->lmacid,
385 				      BGX_GMP_GMI_TXX_BURST, 8192);
386 		break;
387 	default:
388 		break;
389 	}
390 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
391 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
392 
393 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
394 
395 	/* renable lmac */
396 	cmr_cfg |= CMR_EN;
397 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
398 }
399 
400 static void
bgx_lmac_handler(void * arg)401 bgx_lmac_handler(void *arg)
402 {
403 	struct lmac *lmac;
404 	int link, duplex, speed;
405 	int link_changed = 0;
406 	int err;
407 
408 	lmac = (struct lmac *)arg;
409 
410 	err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
411 	    &link, &duplex, &speed);
412 	if (err != 0)
413 		goto out;
414 
415 	if (!link && lmac->last_link)
416 		link_changed = -1;
417 
418 	if (link &&
419 	    (lmac->last_duplex != duplex ||
420 	     lmac->last_link != link ||
421 	     lmac->last_speed != speed)) {
422 			link_changed = 1;
423 	}
424 
425 	lmac->last_link = link;
426 	lmac->last_speed = speed;
427 	lmac->last_duplex = duplex;
428 
429 	if (!link_changed)
430 		goto out;
431 
432 	if (link_changed > 0)
433 		lmac->link_up = true;
434 	else
435 		lmac->link_up = false;
436 
437 	if (lmac->is_sgmii)
438 		bgx_sgmii_change_link_state(lmac);
439 	else
440 		bgx_xaui_check_link(lmac);
441 
442 out:
443 	callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
444 }
445 
446 uint64_t
bgx_get_rx_stats(int node,int bgx_idx,int lmac,int idx)447 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
448 {
449 	struct bgx *bgx;
450 
451 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
452 	if (bgx == NULL)
453 		return (0);
454 
455 	if (idx > 8)
456 		lmac = (0);
457 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
458 }
459 
460 uint64_t
bgx_get_tx_stats(int node,int bgx_idx,int lmac,int idx)461 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
462 {
463 	struct bgx *bgx;
464 
465 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
466 	if (bgx == NULL)
467 		return (0);
468 
469 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
470 }
471 
472 static void
bgx_flush_dmac_addrs(struct bgx * bgx,int lmac)473 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
474 {
475 	uint64_t offset;
476 
477 	while (bgx->lmac[lmac].dmac > 0) {
478 		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
479 		    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
480 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
481 		bgx->lmac[lmac].dmac--;
482 	}
483 }
484 
485 void
bgx_add_dmac_addr(uint64_t dmac,int node,int bgx_idx,int lmac)486 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
487 {
488 	uint64_t offset;
489 	struct bgx *bgx;
490 
491 #ifdef BGX_IN_PROMISCUOUS_MODE
492 	return;
493 #endif
494 
495 	bgx_idx += node * MAX_BGX_PER_CN88XX;
496 	bgx = bgx_vnic[bgx_idx];
497 
498 	if (bgx == NULL) {
499 		printf("BGX%d not yet initialized, ignoring DMAC addition\n",
500 		    bgx_idx);
501 		return;
502 	}
503 
504 	dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
505 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
506 		device_printf(bgx->dev,
507 		    "Max DMAC filters for LMAC%d reached, ignoring\n",
508 		    lmac);
509 		return;
510 	}
511 
512 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
513 		bgx->lmac[lmac].dmac = 1;
514 
515 	offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
516 	    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
517 	bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
518 	bgx->lmac[lmac].dmac++;
519 
520 	bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
521 	    (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
522 	    (BCAST_ACCEPT << 0));
523 }
524 
525 /* Configure BGX LMAC in internal loopback mode */
526 void
bgx_lmac_internal_loopback(int node,int bgx_idx,int lmac_idx,boolean_t enable)527 bgx_lmac_internal_loopback(int node, int bgx_idx,
528     int lmac_idx, boolean_t enable)
529 {
530 	struct bgx *bgx;
531 	struct lmac *lmac;
532 	uint64_t cfg;
533 
534 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
535 	if (bgx == NULL)
536 		return;
537 
538 	lmac = &bgx->lmac[lmac_idx];
539 	if (lmac->is_sgmii) {
540 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
541 		if (enable)
542 			cfg |= PCS_MRX_CTL_LOOPBACK1;
543 		else
544 			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
545 		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
546 	} else {
547 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
548 		if (enable)
549 			cfg |= SPU_CTL_LOOPBACK;
550 		else
551 			cfg &= ~SPU_CTL_LOOPBACK;
552 		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
553 	}
554 }
555 
556 static int
bgx_lmac_sgmii_init(struct bgx * bgx,int lmacid)557 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
558 {
559 	uint64_t cfg;
560 
561 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
562 	/* max packet size */
563 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
564 
565 	/* Disable frame alignment if using preamble */
566 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
567 	if (cfg & 1)
568 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
569 
570 	/* Enable lmac */
571 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
572 
573 	/* PCS reset */
574 	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
575 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
576 	    PCS_MRX_CTL_RESET, TRUE) != 0) {
577 		device_printf(bgx->dev, "BGX PCS reset not completed\n");
578 		return (ENXIO);
579 	}
580 
581 	/* power down, reset autoneg, autoneg enable */
582 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
583 	cfg &= ~PCS_MRX_CTL_PWR_DN;
584 	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
585 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
586 
587 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
588 	    PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
589 		device_printf(bgx->dev, "BGX AN_CPT not completed\n");
590 		return (ENXIO);
591 	}
592 
593 	return (0);
594 }
595 
596 static int
bgx_lmac_xaui_init(struct bgx * bgx,int lmacid,int lmac_type)597 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
598 {
599 	uint64_t cfg;
600 
601 	/* Reset SPU */
602 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
603 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
604 	    SPU_CTL_RESET, TRUE) != 0) {
605 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
606 		return (ENXIO);
607 	}
608 
609 	/* Disable LMAC */
610 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
611 	cfg &= ~CMR_EN;
612 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
613 
614 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
615 	/* Set interleaved running disparity for RXAUI */
616 	if (bgx->lmac_type != BGX_MODE_RXAUI) {
617 		bgx_reg_modify(bgx, lmacid,
618 		    BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
619 	} else {
620 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
621 		    SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
622 	}
623 
624 	/* clear all interrupts */
625 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
626 	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
627 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
628 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
629 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
630 	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
631 
632 	if (bgx->use_training) {
633 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
634 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
635 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
636 		/* training enable */
637 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
638 		    SPU_PMD_CRTL_TRAIN_EN);
639 	}
640 
641 	/* Append FCS to each packet */
642 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
643 
644 	/* Disable forward error correction */
645 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
646 	cfg &= ~SPU_FEC_CTL_FEC_EN;
647 	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
648 
649 	/* Disable autoneg */
650 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
651 	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
652 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
653 
654 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
655 	if (bgx->lmac_type == BGX_MODE_10G_KR)
656 		cfg |= (1 << 23);
657 	else if (bgx->lmac_type == BGX_MODE_40G_KR)
658 		cfg |= (1 << 24);
659 	else
660 		cfg &= ~((1 << 23) | (1 << 24));
661 	cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
662 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
663 
664 	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
665 	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
666 	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
667 
668 	/* Enable lmac */
669 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
670 
671 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
672 	cfg &= ~SPU_CTL_LOW_POWER;
673 	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
674 
675 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
676 	cfg &= ~SMU_TX_CTL_UNI_EN;
677 	cfg |= SMU_TX_CTL_DIC_EN;
678 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
679 
680 	/* take lmac_count into account */
681 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
682 	/* max packet size */
683 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
684 
685 	return (0);
686 }
687 
688 static int
bgx_xaui_check_link(struct lmac * lmac)689 bgx_xaui_check_link(struct lmac *lmac)
690 {
691 	struct bgx *bgx = lmac->bgx;
692 	int lmacid = lmac->lmacid;
693 	int lmac_type = bgx->lmac_type;
694 	uint64_t cfg;
695 
696 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
697 	if (bgx->use_training) {
698 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
699 		if ((cfg & (1UL << 13)) == 0) {
700 			cfg = (1UL << 13) | (1UL << 14);
701 			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
702 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
703 			cfg |= (1UL << 0);
704 			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
705 			return (ENXIO);
706 		}
707 	}
708 
709 	/* wait for PCS to come out of reset */
710 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
711 	    SPU_CTL_RESET, TRUE) != 0) {
712 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
713 		return (ENXIO);
714 	}
715 
716 	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
717 	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
718 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
719 		    SPU_BR_STATUS_BLK_LOCK, FALSE)) {
720 			device_printf(bgx->dev,
721 			    "SPU_BR_STATUS_BLK_LOCK not completed\n");
722 			return (ENXIO);
723 		}
724 	} else {
725 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
726 		    SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
727 			device_printf(bgx->dev,
728 			    "SPU_BX_STATUS_RX_ALIGN not completed\n");
729 			return (ENXIO);
730 		}
731 	}
732 
733 	/* Clear rcvflt bit (latching high) and read it back */
734 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
735 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
736 		device_printf(bgx->dev, "Receive fault, retry training\n");
737 		if (bgx->use_training) {
738 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
739 			if ((cfg & (1UL << 13)) == 0) {
740 				cfg = (1UL << 13) | (1UL << 14);
741 				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
742 				cfg = bgx_reg_read(bgx, lmacid,
743 				    BGX_SPUX_BR_PMD_CRTL);
744 				cfg |= (1UL << 0);
745 				bgx_reg_write(bgx, lmacid,
746 				    BGX_SPUX_BR_PMD_CRTL, cfg);
747 				return (ENXIO);
748 			}
749 		}
750 		return (ENXIO);
751 	}
752 
753 	/* Wait for MAC RX to be ready */
754 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
755 	    SMU_RX_CTL_STATUS, TRUE) != 0) {
756 		device_printf(bgx->dev, "SMU RX link not okay\n");
757 		return (ENXIO);
758 	}
759 
760 	/* Wait for BGX RX to be idle */
761 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
762 	    SMU_CTL_RX_IDLE, FALSE) != 0) {
763 		device_printf(bgx->dev, "SMU RX not idle\n");
764 		return (ENXIO);
765 	}
766 
767 	/* Wait for BGX TX to be idle */
768 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
769 	    SMU_CTL_TX_IDLE, FALSE) != 0) {
770 		device_printf(bgx->dev, "SMU TX not idle\n");
771 		return (ENXIO);
772 	}
773 
774 	if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
775 	    SPU_STATUS2_RCVFLT) != 0) {
776 		device_printf(bgx->dev, "Receive fault\n");
777 		return (ENXIO);
778 	}
779 
780 	/* Receive link is latching low. Force it high and verify it */
781 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
782 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
783 	    SPU_STATUS1_RCV_LNK, FALSE) != 0) {
784 		device_printf(bgx->dev, "SPU receive link down\n");
785 		return (ENXIO);
786 	}
787 
788 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
789 	cfg &= ~SPU_MISC_CTL_RX_DIS;
790 	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
791 	return (0);
792 }
793 
794 static void
bgx_poll_for_link(void * arg)795 bgx_poll_for_link(void *arg)
796 {
797 	struct lmac *lmac;
798 	uint64_t link;
799 
800 	lmac = (struct lmac *)arg;
801 
802 	/* Receive link is latching low. Force it high and verify it */
803 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
804 		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
805 	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
806 		     SPU_STATUS1_RCV_LNK, false);
807 
808 	link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
809 	if (link & SPU_STATUS1_RCV_LNK) {
810 		lmac->link_up = 1;
811 		if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
812 			lmac->last_speed = 40000;
813 		else
814 			lmac->last_speed = 10000;
815 		lmac->last_duplex = 1;
816 	} else {
817 		lmac->link_up = 0;
818 	}
819 
820 	if (lmac->last_link != lmac->link_up) {
821 		lmac->last_link = lmac->link_up;
822 		if (lmac->link_up)
823 			bgx_xaui_check_link(lmac);
824 	}
825 
826 	callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
827 }
828 
829 static int
bgx_lmac_enable(struct bgx * bgx,uint8_t lmacid)830 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
831 {
832 	uint64_t __unused dmac_bcast = (1UL << 48) - 1;
833 	struct lmac *lmac;
834 	uint64_t cfg;
835 
836 	lmac = &bgx->lmac[lmacid];
837 	lmac->bgx = bgx;
838 
839 	if (bgx->lmac_type == BGX_MODE_SGMII) {
840 		lmac->is_sgmii = 1;
841 		if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
842 			return -1;
843 	} else {
844 		lmac->is_sgmii = 0;
845 		if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
846 			return -1;
847 	}
848 
849 	if (lmac->is_sgmii) {
850 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
851 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
852 		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
853 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
854 	} else {
855 		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
856 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
857 		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
858 		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
859 	}
860 
861 	/* Enable lmac */
862 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
863 		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
864 
865 	/* Restore default cfg, incase low level firmware changed it */
866 	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
867 
868 	/* Add broadcast MAC into all LMAC's DMAC filters */
869 	bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
870 
871 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
872 	    (bgx->lmac_type != BGX_MODE_XAUI) &&
873 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
874 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
875 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
876 		if (lmac->phy_if_dev == NULL) {
877 			device_printf(bgx->dev,
878 			    "LMAC%d missing interface to PHY\n", lmacid);
879 			return (ENXIO);
880 		}
881 
882 		if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
883 		    lmacid) != 0) {
884 			device_printf(bgx->dev,
885 			    "LMAC%d could not connect to PHY\n", lmacid);
886 			return (ENXIO);
887 		}
888 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
889 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
890 		mtx_lock(&lmac->check_link_mtx);
891 		bgx_lmac_handler(lmac);
892 		mtx_unlock(&lmac->check_link_mtx);
893 	} else {
894 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
895 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
896 		mtx_lock(&lmac->check_link_mtx);
897 		bgx_poll_for_link(lmac);
898 		mtx_unlock(&lmac->check_link_mtx);
899 	}
900 
901 	return (0);
902 }
903 
904 static void
bgx_lmac_disable(struct bgx * bgx,uint8_t lmacid)905 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
906 {
907 	struct lmac *lmac;
908 	uint64_t cmrx_cfg;
909 
910 	lmac = &bgx->lmac[lmacid];
911 
912 	/* Stop callout */
913 	callout_drain(&lmac->check_link);
914 	mtx_destroy(&lmac->check_link_mtx);
915 
916 	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
917 	cmrx_cfg &= ~(1 << 15);
918 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
919 	bgx_flush_dmac_addrs(bgx, lmacid);
920 
921 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
922 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
923 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
924 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
925 		if (lmac->phy_if_dev == NULL) {
926 			device_printf(bgx->dev,
927 			    "LMAC%d missing interface to PHY\n", lmacid);
928 			return;
929 		}
930 		if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
931 		    lmacid) != 0) {
932 			device_printf(bgx->dev,
933 			    "LMAC%d could not disconnect PHY\n", lmacid);
934 			return;
935 		}
936 		lmac->phy_if_dev = NULL;
937 	}
938 }
939 
940 static void
bgx_set_num_ports(struct bgx * bgx)941 bgx_set_num_ports(struct bgx *bgx)
942 {
943 	uint64_t lmac_count;
944 
945 	switch (bgx->qlm_mode) {
946 	case QLM_MODE_SGMII:
947 		bgx->lmac_count = 4;
948 		bgx->lmac_type = BGX_MODE_SGMII;
949 		bgx->lane_to_sds = 0;
950 		break;
951 	case QLM_MODE_XAUI_1X4:
952 		bgx->lmac_count = 1;
953 		bgx->lmac_type = BGX_MODE_XAUI;
954 		bgx->lane_to_sds = 0xE4;
955 			break;
956 	case QLM_MODE_RXAUI_2X2:
957 		bgx->lmac_count = 2;
958 		bgx->lmac_type = BGX_MODE_RXAUI;
959 		bgx->lane_to_sds = 0xE4;
960 			break;
961 	case QLM_MODE_XFI_4X1:
962 		bgx->lmac_count = 4;
963 		bgx->lmac_type = BGX_MODE_XFI;
964 		bgx->lane_to_sds = 0;
965 		break;
966 	case QLM_MODE_XLAUI_1X4:
967 		bgx->lmac_count = 1;
968 		bgx->lmac_type = BGX_MODE_XLAUI;
969 		bgx->lane_to_sds = 0xE4;
970 		break;
971 	case QLM_MODE_10G_KR_4X1:
972 		bgx->lmac_count = 4;
973 		bgx->lmac_type = BGX_MODE_10G_KR;
974 		bgx->lane_to_sds = 0;
975 		bgx->use_training = 1;
976 		break;
977 	case QLM_MODE_40G_KR4_1X4:
978 		bgx->lmac_count = 1;
979 		bgx->lmac_type = BGX_MODE_40G_KR;
980 		bgx->lane_to_sds = 0xE4;
981 		bgx->use_training = 1;
982 		break;
983 	default:
984 		bgx->lmac_count = 0;
985 		break;
986 	}
987 
988 	/*
989 	 * Check if low level firmware has programmed LMAC count
990 	 * based on board type, if yes consider that otherwise
991 	 * the default static values
992 	 */
993 	lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
994 	if (lmac_count != 4)
995 		bgx->lmac_count = lmac_count;
996 }
997 
998 static void
bgx_init_hw(struct bgx * bgx)999 bgx_init_hw(struct bgx *bgx)
1000 {
1001 	int i;
1002 
1003 	bgx_set_num_ports(bgx);
1004 
1005 	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1006 	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1007 		device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1008 
1009 	/* Set lmac type and lane2serdes mapping */
1010 	for (i = 0; i < bgx->lmac_count; i++) {
1011 		if (bgx->lmac_type == BGX_MODE_RXAUI) {
1012 			if (i)
1013 				bgx->lane_to_sds = 0x0e;
1014 			else
1015 				bgx->lane_to_sds = 0x04;
1016 			bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1017 			    (bgx->lmac_type << 8) | bgx->lane_to_sds);
1018 			continue;
1019 		}
1020 		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1021 		    (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1022 		bgx->lmac[i].lmacid_bd = lmac_count;
1023 		lmac_count++;
1024 	}
1025 
1026 	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1027 	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1028 
1029 	/* Set the backpressure AND mask */
1030 	for (i = 0; i < bgx->lmac_count; i++) {
1031 		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1032 		    ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1033 		    (i * MAX_BGX_CHANS_PER_LMAC));
1034 	}
1035 
1036 	/* Disable all MAC filtering */
1037 	for (i = 0; i < RX_DMAC_COUNT; i++)
1038 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1039 
1040 	/* Disable MAC steering (NCSI traffic) */
1041 	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1042 		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1043 }
1044 
1045 static void
bgx_get_qlm_mode(struct bgx * bgx)1046 bgx_get_qlm_mode(struct bgx *bgx)
1047 {
1048 	device_t dev = bgx->dev;
1049 	int lmac_type;
1050 	int train_en;
1051 
1052 	/* Read LMAC0 type to figure out QLM mode
1053 	 * This is configured by low level firmware
1054 	 */
1055 	lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1056 	lmac_type = (lmac_type >> 8) & 0x07;
1057 
1058 	train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1059 	    SPU_PMD_CRTL_TRAIN_EN;
1060 
1061 	switch (lmac_type) {
1062 	case BGX_MODE_SGMII:
1063 		bgx->qlm_mode = QLM_MODE_SGMII;
1064 		if (bootverbose) {
1065 			device_printf(dev, "BGX%d QLM mode: SGMII\n",
1066 			    bgx->bgx_id);
1067 		}
1068 		break;
1069 	case BGX_MODE_XAUI:
1070 		bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1071 		if (bootverbose) {
1072 			device_printf(dev, "BGX%d QLM mode: XAUI\n",
1073 			    bgx->bgx_id);
1074 		}
1075 		break;
1076 	case BGX_MODE_RXAUI:
1077 		bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1078 		if (bootverbose) {
1079 			device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1080 			    bgx->bgx_id);
1081 		}
1082 		break;
1083 	case BGX_MODE_XFI:
1084 		if (!train_en) {
1085 			bgx->qlm_mode = QLM_MODE_XFI_4X1;
1086 			if (bootverbose) {
1087 				device_printf(dev, "BGX%d QLM mode: XFI\n",
1088 				    bgx->bgx_id);
1089 			}
1090 		} else {
1091 			bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1092 			if (bootverbose) {
1093 				device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1094 				    bgx->bgx_id);
1095 			}
1096 		}
1097 		break;
1098 	case BGX_MODE_XLAUI:
1099 		if (!train_en) {
1100 			bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1101 			if (bootverbose) {
1102 				device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1103 				    bgx->bgx_id);
1104 			}
1105 		} else {
1106 			bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1107 			if (bootverbose) {
1108 				device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1109 				    bgx->bgx_id);
1110 			}
1111 		}
1112 		break;
1113 	default:
1114 		bgx->qlm_mode = QLM_MODE_SGMII;
1115 		if (bootverbose) {
1116 			device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1117 			    bgx->bgx_id);
1118 		}
1119 	}
1120 }
1121 
1122 static int
bgx_init_phy(struct bgx * bgx)1123 bgx_init_phy(struct bgx *bgx)
1124 {
1125 	int err;
1126 
1127 	/* By default we fail */
1128 	err = ENXIO;
1129 #ifdef FDT
1130 	err = bgx_fdt_init_phy(bgx);
1131 #endif
1132 #ifdef ACPI
1133 	if (err != 0) {
1134 		/* ARM64TODO: Add ACPI function here */
1135 	}
1136 #endif
1137 	return (err);
1138 }
1139