xref: /freebsd/sys/dev/vnic/thunder_bgx.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 #include "opt_platform.h"
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bitset.h>
33 #include <sys/bitstring.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/rman.h>
40 #include <sys/pciio.h>
41 #include <sys/pcpu.h>
42 #include <sys/proc.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/cpuset.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_media.h>
52 
53 #include <machine/bus.h>
54 
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
57 
58 #include "thunder_bgx.h"
59 #include "thunder_bgx_var.h"
60 #include "nic_reg.h"
61 #include "nic.h"
62 
63 #include "lmac_if.h"
64 
65 #define	THUNDER_BGX_DEVSTR	"ThunderX BGX Ethernet I/O Interface"
66 
67 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
68 
69 #define BGX_NODE_ID_MASK	0x1
70 #define BGX_NODE_ID_SHIFT	24
71 
72 #define DRV_NAME	"thunder-BGX"
73 #define DRV_VERSION	"1.0"
74 
75 static int bgx_init_phy(struct bgx *);
76 
77 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
78 static int lmac_count __unused; /* Total no of LMACs in system */
79 
80 static int bgx_xaui_check_link(struct lmac *lmac);
81 static void bgx_get_qlm_mode(struct bgx *);
82 static void bgx_init_hw(struct bgx *);
83 static int bgx_lmac_enable(struct bgx *, uint8_t);
84 static void bgx_lmac_disable(struct bgx *, uint8_t);
85 
86 static int thunder_bgx_probe(device_t);
87 static int thunder_bgx_attach(device_t);
88 static int thunder_bgx_detach(device_t);
89 
90 static device_method_t thunder_bgx_methods[] = {
91 	/* Device interface */
92 	DEVMETHOD(device_probe,		thunder_bgx_probe),
93 	DEVMETHOD(device_attach,	thunder_bgx_attach),
94 	DEVMETHOD(device_detach,	thunder_bgx_detach),
95 
96 	DEVMETHOD_END,
97 };
98 
99 static driver_t thunder_bgx_driver = {
100 	"bgx",
101 	thunder_bgx_methods,
102 	sizeof(struct lmac),
103 };
104 
105 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, 0, 0);
106 MODULE_VERSION(thunder_bgx, 1);
107 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
108 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
109 MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
110 
111 static int
112 thunder_bgx_probe(device_t dev)
113 {
114 	uint16_t vendor_id;
115 	uint16_t device_id;
116 
117 	vendor_id = pci_get_vendor(dev);
118 	device_id = pci_get_device(dev);
119 
120 	if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
121 	    device_id == PCI_DEVICE_ID_THUNDER_BGX) {
122 		device_set_desc(dev, THUNDER_BGX_DEVSTR);
123 		return (BUS_PROBE_DEFAULT);
124 	}
125 
126 	return (ENXIO);
127 }
128 
129 static int
130 thunder_bgx_attach(device_t dev)
131 {
132 	struct bgx *bgx;
133 	uint8_t lmacid;
134 	int err;
135 	int rid;
136 	struct lmac *lmac;
137 
138 	bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
139 	bgx->dev = dev;
140 
141 	lmac = device_get_softc(dev);
142 	lmac->bgx = bgx;
143 	/* Enable bus mastering */
144 	pci_enable_busmaster(dev);
145 	/* Allocate resources - configuration registers */
146 	rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
147 	bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
148 	    RF_ACTIVE);
149 	if (bgx->reg_base == NULL) {
150 		device_printf(dev, "Could not allocate CSR memory space\n");
151 		err = ENXIO;
152 		goto err_disable_device;
153 	}
154 
155 	bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
156 	    BGX_NODE_ID_MASK;
157 	bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
158 
159 	bgx_vnic[bgx->bgx_id] = bgx;
160 	bgx_get_qlm_mode(bgx);
161 
162 	err = bgx_init_phy(bgx);
163 	if (err != 0)
164 		goto err_free_res;
165 
166 	bgx_init_hw(bgx);
167 
168 	/* Enable all LMACs */
169 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
170 		err = bgx_lmac_enable(bgx, lmacid);
171 		if (err) {
172 			device_printf(dev, "BGX%d failed to enable lmac%d\n",
173 				bgx->bgx_id, lmacid);
174 			goto err_free_res;
175 		}
176 	}
177 
178 	return (0);
179 
180 err_free_res:
181 	bgx_vnic[bgx->bgx_id] = NULL;
182 	bus_release_resource(dev, SYS_RES_MEMORY,
183 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
184 err_disable_device:
185 	free(bgx, M_BGX);
186 	pci_disable_busmaster(dev);
187 
188 	return (err);
189 }
190 
191 static int
192 thunder_bgx_detach(device_t dev)
193 {
194 	struct lmac *lmac;
195 	struct bgx *bgx;
196 	uint8_t lmacid;
197 
198 	lmac = device_get_softc(dev);
199 	bgx = lmac->bgx;
200 	/* Disable all LMACs */
201 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
202 		bgx_lmac_disable(bgx, lmacid);
203 
204 	bgx_vnic[bgx->bgx_id] = NULL;
205 	bus_release_resource(dev, SYS_RES_MEMORY,
206 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
207 	free(bgx, M_BGX);
208 	pci_disable_busmaster(dev);
209 
210 	return (0);
211 }
212 
213 /* Register read/write APIs */
214 static uint64_t
215 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
216 {
217 	bus_space_handle_t addr;
218 
219 	addr = ((uint32_t)lmac << 20) + offset;
220 
221 	return (bus_read_8(bgx->reg_base, addr));
222 }
223 
224 static void
225 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
226 {
227 	bus_space_handle_t addr;
228 
229 	addr = ((uint32_t)lmac << 20) + offset;
230 
231 	bus_write_8(bgx->reg_base, addr, val);
232 }
233 
234 static void
235 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
236 {
237 	bus_space_handle_t addr;
238 
239 	addr = ((uint32_t)lmac << 20) + offset;
240 
241 	bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
242 }
243 
244 static int
245 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
246     boolean_t zero)
247 {
248 	int timeout = 10;
249 	uint64_t reg_val;
250 
251 	while (timeout) {
252 		reg_val = bgx_reg_read(bgx, lmac, reg);
253 		if (zero && !(reg_val & mask))
254 			return (0);
255 		if (!zero && (reg_val & mask))
256 			return (0);
257 
258 		DELAY(100);
259 		timeout--;
260 	}
261 	return (ETIMEDOUT);
262 }
263 
264 /* Return number of BGX present in HW */
265 u_int
266 bgx_get_map(int node)
267 {
268 	int i;
269 	u_int map = 0;
270 
271 	for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
272 		if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
273 			map |= (1 << i);
274 	}
275 
276 	return (map);
277 }
278 
279 /* Return number of LMAC configured for this BGX */
280 int
281 bgx_get_lmac_count(int node, int bgx_idx)
282 {
283 	struct bgx *bgx;
284 
285 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
286 	if (bgx != NULL)
287 		return (bgx->lmac_count);
288 
289 	return (0);
290 }
291 
292 /* Returns the current link status of LMAC */
293 void
294 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
295 {
296 	struct bgx_link_status *link = (struct bgx_link_status *)status;
297 	struct bgx *bgx;
298 	struct lmac *lmac;
299 
300 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
301 	if (bgx == NULL)
302 		return;
303 
304 	lmac = &bgx->lmac[lmacid];
305 	link->link_up = lmac->link_up;
306 	link->duplex = lmac->last_duplex;
307 	link->speed = lmac->last_speed;
308 }
309 
310 const uint8_t
311 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
312 {
313 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
314 
315 	if (bgx != NULL)
316 		return (bgx->lmac[lmacid].mac);
317 
318 	return (NULL);
319 }
320 
321 void
322 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
323 {
324 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
325 
326 	if (bgx == NULL)
327 		return;
328 
329 	memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
330 }
331 
332 static void
333 bgx_sgmii_change_link_state(struct lmac *lmac)
334 {
335 	struct bgx *bgx = lmac->bgx;
336 	uint64_t cmr_cfg;
337 	uint64_t port_cfg = 0;
338 	uint64_t misc_ctl = 0;
339 
340 	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
341 	cmr_cfg &= ~CMR_EN;
342 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
343 
344 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
345 	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
346 
347 	if (lmac->link_up) {
348 		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
349 		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
350 		port_cfg |=  (lmac->last_duplex << 2);
351 	} else {
352 		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
353 	}
354 
355 	switch (lmac->last_speed) {
356 	case 10:
357 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
358 		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
359 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
360 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
361 		misc_ctl |= 50; /* samp_pt */
362 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
363 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
364 		break;
365 	case 100:
366 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
367 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
368 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
369 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
370 		misc_ctl |= 5; /* samp_pt */
371 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
372 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
373 		break;
374 	case 1000:
375 		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
376 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
377 		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
378 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
379 		misc_ctl |= 1; /* samp_pt */
380 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
381 		if (lmac->last_duplex)
382 			bgx_reg_write(bgx, lmac->lmacid,
383 				      BGX_GMP_GMI_TXX_BURST, 0);
384 		else
385 			bgx_reg_write(bgx, lmac->lmacid,
386 				      BGX_GMP_GMI_TXX_BURST, 8192);
387 		break;
388 	default:
389 		break;
390 	}
391 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
392 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
393 
394 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
395 
396 	/* renable lmac */
397 	cmr_cfg |= CMR_EN;
398 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
399 }
400 
401 static void
402 bgx_lmac_handler(void *arg)
403 {
404 	struct lmac *lmac;
405 	int link, duplex, speed;
406 	int link_changed = 0;
407 	int err;
408 
409 	lmac = (struct lmac *)arg;
410 
411 	err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
412 	    &link, &duplex, &speed);
413 	if (err != 0)
414 		goto out;
415 
416 	if (!link && lmac->last_link)
417 		link_changed = -1;
418 
419 	if (link &&
420 	    (lmac->last_duplex != duplex ||
421 	     lmac->last_link != link ||
422 	     lmac->last_speed != speed)) {
423 			link_changed = 1;
424 	}
425 
426 	lmac->last_link = link;
427 	lmac->last_speed = speed;
428 	lmac->last_duplex = duplex;
429 
430 	if (!link_changed)
431 		goto out;
432 
433 	if (link_changed > 0)
434 		lmac->link_up = true;
435 	else
436 		lmac->link_up = false;
437 
438 	if (lmac->is_sgmii)
439 		bgx_sgmii_change_link_state(lmac);
440 	else
441 		bgx_xaui_check_link(lmac);
442 
443 out:
444 	callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
445 }
446 
447 uint64_t
448 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
449 {
450 	struct bgx *bgx;
451 
452 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
453 	if (bgx == NULL)
454 		return (0);
455 
456 	if (idx > 8)
457 		lmac = (0);
458 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
459 }
460 
461 uint64_t
462 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
463 {
464 	struct bgx *bgx;
465 
466 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
467 	if (bgx == NULL)
468 		return (0);
469 
470 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
471 }
472 
473 static void
474 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
475 {
476 	uint64_t offset;
477 
478 	while (bgx->lmac[lmac].dmac > 0) {
479 		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
480 		    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
481 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
482 		bgx->lmac[lmac].dmac--;
483 	}
484 }
485 
486 void
487 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
488 {
489 	uint64_t offset;
490 	struct bgx *bgx;
491 
492 #ifdef BGX_IN_PROMISCUOUS_MODE
493 	return;
494 #endif
495 
496 	bgx_idx += node * MAX_BGX_PER_CN88XX;
497 	bgx = bgx_vnic[bgx_idx];
498 
499 	if (bgx == NULL) {
500 		printf("BGX%d not yet initialized, ignoring DMAC addition\n",
501 		    bgx_idx);
502 		return;
503 	}
504 
505 	dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
506 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
507 		device_printf(bgx->dev,
508 		    "Max DMAC filters for LMAC%d reached, ignoring\n",
509 		    lmac);
510 		return;
511 	}
512 
513 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
514 		bgx->lmac[lmac].dmac = 1;
515 
516 	offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
517 	    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
518 	bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
519 	bgx->lmac[lmac].dmac++;
520 
521 	bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
522 	    (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
523 	    (BCAST_ACCEPT << 0));
524 }
525 
526 /* Configure BGX LMAC in internal loopback mode */
527 void
528 bgx_lmac_internal_loopback(int node, int bgx_idx,
529     int lmac_idx, boolean_t enable)
530 {
531 	struct bgx *bgx;
532 	struct lmac *lmac;
533 	uint64_t cfg;
534 
535 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
536 	if (bgx == NULL)
537 		return;
538 
539 	lmac = &bgx->lmac[lmac_idx];
540 	if (lmac->is_sgmii) {
541 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
542 		if (enable)
543 			cfg |= PCS_MRX_CTL_LOOPBACK1;
544 		else
545 			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
546 		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
547 	} else {
548 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
549 		if (enable)
550 			cfg |= SPU_CTL_LOOPBACK;
551 		else
552 			cfg &= ~SPU_CTL_LOOPBACK;
553 		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
554 	}
555 }
556 
557 static int
558 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
559 {
560 	uint64_t cfg;
561 
562 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
563 	/* max packet size */
564 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
565 
566 	/* Disable frame alignment if using preamble */
567 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
568 	if (cfg & 1)
569 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
570 
571 	/* Enable lmac */
572 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
573 
574 	/* PCS reset */
575 	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
576 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
577 	    PCS_MRX_CTL_RESET, TRUE) != 0) {
578 		device_printf(bgx->dev, "BGX PCS reset not completed\n");
579 		return (ENXIO);
580 	}
581 
582 	/* power down, reset autoneg, autoneg enable */
583 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
584 	cfg &= ~PCS_MRX_CTL_PWR_DN;
585 	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
586 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
587 
588 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
589 	    PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
590 		device_printf(bgx->dev, "BGX AN_CPT not completed\n");
591 		return (ENXIO);
592 	}
593 
594 	return (0);
595 }
596 
597 static int
598 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
599 {
600 	uint64_t cfg;
601 
602 	/* Reset SPU */
603 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
604 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
605 	    SPU_CTL_RESET, TRUE) != 0) {
606 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
607 		return (ENXIO);
608 	}
609 
610 	/* Disable LMAC */
611 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
612 	cfg &= ~CMR_EN;
613 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
614 
615 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
616 	/* Set interleaved running disparity for RXAUI */
617 	if (bgx->lmac_type != BGX_MODE_RXAUI) {
618 		bgx_reg_modify(bgx, lmacid,
619 		    BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
620 	} else {
621 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
622 		    SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
623 	}
624 
625 	/* clear all interrupts */
626 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
627 	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
628 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
629 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
630 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
631 	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
632 
633 	if (bgx->use_training) {
634 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
635 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
636 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
637 		/* training enable */
638 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
639 		    SPU_PMD_CRTL_TRAIN_EN);
640 	}
641 
642 	/* Append FCS to each packet */
643 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
644 
645 	/* Disable forward error correction */
646 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
647 	cfg &= ~SPU_FEC_CTL_FEC_EN;
648 	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
649 
650 	/* Disable autoneg */
651 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
652 	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
653 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
654 
655 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
656 	if (bgx->lmac_type == BGX_MODE_10G_KR)
657 		cfg |= (1 << 23);
658 	else if (bgx->lmac_type == BGX_MODE_40G_KR)
659 		cfg |= (1 << 24);
660 	else
661 		cfg &= ~((1 << 23) | (1 << 24));
662 	cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
663 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
664 
665 	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
666 	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
667 	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
668 
669 	/* Enable lmac */
670 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
671 
672 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
673 	cfg &= ~SPU_CTL_LOW_POWER;
674 	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
675 
676 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
677 	cfg &= ~SMU_TX_CTL_UNI_EN;
678 	cfg |= SMU_TX_CTL_DIC_EN;
679 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
680 
681 	/* take lmac_count into account */
682 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
683 	/* max packet size */
684 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
685 
686 	return (0);
687 }
688 
689 static int
690 bgx_xaui_check_link(struct lmac *lmac)
691 {
692 	struct bgx *bgx = lmac->bgx;
693 	int lmacid = lmac->lmacid;
694 	int lmac_type = bgx->lmac_type;
695 	uint64_t cfg;
696 
697 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
698 	if (bgx->use_training) {
699 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
700 		if ((cfg & (1UL << 13)) == 0) {
701 			cfg = (1UL << 13) | (1UL << 14);
702 			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
703 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
704 			cfg |= (1UL << 0);
705 			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
706 			return (ENXIO);
707 		}
708 	}
709 
710 	/* wait for PCS to come out of reset */
711 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
712 	    SPU_CTL_RESET, TRUE) != 0) {
713 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
714 		return (ENXIO);
715 	}
716 
717 	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
718 	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
719 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
720 		    SPU_BR_STATUS_BLK_LOCK, FALSE)) {
721 			device_printf(bgx->dev,
722 			    "SPU_BR_STATUS_BLK_LOCK not completed\n");
723 			return (ENXIO);
724 		}
725 	} else {
726 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
727 		    SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
728 			device_printf(bgx->dev,
729 			    "SPU_BX_STATUS_RX_ALIGN not completed\n");
730 			return (ENXIO);
731 		}
732 	}
733 
734 	/* Clear rcvflt bit (latching high) and read it back */
735 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
736 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
737 		device_printf(bgx->dev, "Receive fault, retry training\n");
738 		if (bgx->use_training) {
739 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
740 			if ((cfg & (1UL << 13)) == 0) {
741 				cfg = (1UL << 13) | (1UL << 14);
742 				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
743 				cfg = bgx_reg_read(bgx, lmacid,
744 				    BGX_SPUX_BR_PMD_CRTL);
745 				cfg |= (1UL << 0);
746 				bgx_reg_write(bgx, lmacid,
747 				    BGX_SPUX_BR_PMD_CRTL, cfg);
748 				return (ENXIO);
749 			}
750 		}
751 		return (ENXIO);
752 	}
753 
754 	/* Wait for MAC RX to be ready */
755 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
756 	    SMU_RX_CTL_STATUS, TRUE) != 0) {
757 		device_printf(bgx->dev, "SMU RX link not okay\n");
758 		return (ENXIO);
759 	}
760 
761 	/* Wait for BGX RX to be idle */
762 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
763 	    SMU_CTL_RX_IDLE, FALSE) != 0) {
764 		device_printf(bgx->dev, "SMU RX not idle\n");
765 		return (ENXIO);
766 	}
767 
768 	/* Wait for BGX TX to be idle */
769 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
770 	    SMU_CTL_TX_IDLE, FALSE) != 0) {
771 		device_printf(bgx->dev, "SMU TX not idle\n");
772 		return (ENXIO);
773 	}
774 
775 	if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
776 	    SPU_STATUS2_RCVFLT) != 0) {
777 		device_printf(bgx->dev, "Receive fault\n");
778 		return (ENXIO);
779 	}
780 
781 	/* Receive link is latching low. Force it high and verify it */
782 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
783 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
784 	    SPU_STATUS1_RCV_LNK, FALSE) != 0) {
785 		device_printf(bgx->dev, "SPU receive link down\n");
786 		return (ENXIO);
787 	}
788 
789 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
790 	cfg &= ~SPU_MISC_CTL_RX_DIS;
791 	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
792 	return (0);
793 }
794 
795 static void
796 bgx_poll_for_link(void *arg)
797 {
798 	struct lmac *lmac;
799 	uint64_t link;
800 
801 	lmac = (struct lmac *)arg;
802 
803 	/* Receive link is latching low. Force it high and verify it */
804 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
805 		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
806 	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
807 		     SPU_STATUS1_RCV_LNK, false);
808 
809 	link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
810 	if (link & SPU_STATUS1_RCV_LNK) {
811 		lmac->link_up = 1;
812 		if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
813 			lmac->last_speed = 40000;
814 		else
815 			lmac->last_speed = 10000;
816 		lmac->last_duplex = 1;
817 	} else {
818 		lmac->link_up = 0;
819 	}
820 
821 	if (lmac->last_link != lmac->link_up) {
822 		lmac->last_link = lmac->link_up;
823 		if (lmac->link_up)
824 			bgx_xaui_check_link(lmac);
825 	}
826 
827 	callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
828 }
829 
830 static int
831 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
832 {
833 	uint64_t __unused dmac_bcast = (1UL << 48) - 1;
834 	struct lmac *lmac;
835 	uint64_t cfg;
836 
837 	lmac = &bgx->lmac[lmacid];
838 	lmac->bgx = bgx;
839 
840 	if (bgx->lmac_type == BGX_MODE_SGMII) {
841 		lmac->is_sgmii = 1;
842 		if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
843 			return -1;
844 	} else {
845 		lmac->is_sgmii = 0;
846 		if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
847 			return -1;
848 	}
849 
850 	if (lmac->is_sgmii) {
851 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
852 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
853 		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
854 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
855 	} else {
856 		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
857 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
858 		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
859 		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
860 	}
861 
862 	/* Enable lmac */
863 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
864 		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
865 
866 	/* Restore default cfg, incase low level firmware changed it */
867 	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
868 
869 	/* Add broadcast MAC into all LMAC's DMAC filters */
870 	bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
871 
872 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
873 	    (bgx->lmac_type != BGX_MODE_XAUI) &&
874 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
875 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
876 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
877 		if (lmac->phy_if_dev == NULL) {
878 			device_printf(bgx->dev,
879 			    "LMAC%d missing interface to PHY\n", lmacid);
880 			return (ENXIO);
881 		}
882 
883 		if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
884 		    lmacid) != 0) {
885 			device_printf(bgx->dev,
886 			    "LMAC%d could not connect to PHY\n", lmacid);
887 			return (ENXIO);
888 		}
889 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
890 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
891 		mtx_lock(&lmac->check_link_mtx);
892 		bgx_lmac_handler(lmac);
893 		mtx_unlock(&lmac->check_link_mtx);
894 	} else {
895 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
896 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
897 		mtx_lock(&lmac->check_link_mtx);
898 		bgx_poll_for_link(lmac);
899 		mtx_unlock(&lmac->check_link_mtx);
900 	}
901 
902 	return (0);
903 }
904 
905 static void
906 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
907 {
908 	struct lmac *lmac;
909 	uint64_t cmrx_cfg;
910 
911 	lmac = &bgx->lmac[lmacid];
912 
913 	/* Stop callout */
914 	callout_drain(&lmac->check_link);
915 	mtx_destroy(&lmac->check_link_mtx);
916 
917 	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
918 	cmrx_cfg &= ~(1 << 15);
919 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
920 	bgx_flush_dmac_addrs(bgx, lmacid);
921 
922 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
923 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
924 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
925 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
926 		if (lmac->phy_if_dev == NULL) {
927 			device_printf(bgx->dev,
928 			    "LMAC%d missing interface to PHY\n", lmacid);
929 			return;
930 		}
931 		if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
932 		    lmacid) != 0) {
933 			device_printf(bgx->dev,
934 			    "LMAC%d could not disconnect PHY\n", lmacid);
935 			return;
936 		}
937 		lmac->phy_if_dev = NULL;
938 	}
939 }
940 
941 static void
942 bgx_set_num_ports(struct bgx *bgx)
943 {
944 	uint64_t lmac_count;
945 
946 	switch (bgx->qlm_mode) {
947 	case QLM_MODE_SGMII:
948 		bgx->lmac_count = 4;
949 		bgx->lmac_type = BGX_MODE_SGMII;
950 		bgx->lane_to_sds = 0;
951 		break;
952 	case QLM_MODE_XAUI_1X4:
953 		bgx->lmac_count = 1;
954 		bgx->lmac_type = BGX_MODE_XAUI;
955 		bgx->lane_to_sds = 0xE4;
956 			break;
957 	case QLM_MODE_RXAUI_2X2:
958 		bgx->lmac_count = 2;
959 		bgx->lmac_type = BGX_MODE_RXAUI;
960 		bgx->lane_to_sds = 0xE4;
961 			break;
962 	case QLM_MODE_XFI_4X1:
963 		bgx->lmac_count = 4;
964 		bgx->lmac_type = BGX_MODE_XFI;
965 		bgx->lane_to_sds = 0;
966 		break;
967 	case QLM_MODE_XLAUI_1X4:
968 		bgx->lmac_count = 1;
969 		bgx->lmac_type = BGX_MODE_XLAUI;
970 		bgx->lane_to_sds = 0xE4;
971 		break;
972 	case QLM_MODE_10G_KR_4X1:
973 		bgx->lmac_count = 4;
974 		bgx->lmac_type = BGX_MODE_10G_KR;
975 		bgx->lane_to_sds = 0;
976 		bgx->use_training = 1;
977 		break;
978 	case QLM_MODE_40G_KR4_1X4:
979 		bgx->lmac_count = 1;
980 		bgx->lmac_type = BGX_MODE_40G_KR;
981 		bgx->lane_to_sds = 0xE4;
982 		bgx->use_training = 1;
983 		break;
984 	default:
985 		bgx->lmac_count = 0;
986 		break;
987 	}
988 
989 	/*
990 	 * Check if low level firmware has programmed LMAC count
991 	 * based on board type, if yes consider that otherwise
992 	 * the default static values
993 	 */
994 	lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
995 	if (lmac_count != 4)
996 		bgx->lmac_count = lmac_count;
997 }
998 
999 static void
1000 bgx_init_hw(struct bgx *bgx)
1001 {
1002 	int i;
1003 
1004 	bgx_set_num_ports(bgx);
1005 
1006 	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1007 	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1008 		device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1009 
1010 	/* Set lmac type and lane2serdes mapping */
1011 	for (i = 0; i < bgx->lmac_count; i++) {
1012 		if (bgx->lmac_type == BGX_MODE_RXAUI) {
1013 			if (i)
1014 				bgx->lane_to_sds = 0x0e;
1015 			else
1016 				bgx->lane_to_sds = 0x04;
1017 			bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1018 			    (bgx->lmac_type << 8) | bgx->lane_to_sds);
1019 			continue;
1020 		}
1021 		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1022 		    (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1023 		bgx->lmac[i].lmacid_bd = lmac_count;
1024 		lmac_count++;
1025 	}
1026 
1027 	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1028 	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1029 
1030 	/* Set the backpressure AND mask */
1031 	for (i = 0; i < bgx->lmac_count; i++) {
1032 		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1033 		    ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1034 		    (i * MAX_BGX_CHANS_PER_LMAC));
1035 	}
1036 
1037 	/* Disable all MAC filtering */
1038 	for (i = 0; i < RX_DMAC_COUNT; i++)
1039 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1040 
1041 	/* Disable MAC steering (NCSI traffic) */
1042 	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1043 		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1044 }
1045 
1046 static void
1047 bgx_get_qlm_mode(struct bgx *bgx)
1048 {
1049 	device_t dev = bgx->dev;
1050 	int lmac_type;
1051 	int train_en;
1052 
1053 	/* Read LMAC0 type to figure out QLM mode
1054 	 * This is configured by low level firmware
1055 	 */
1056 	lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1057 	lmac_type = (lmac_type >> 8) & 0x07;
1058 
1059 	train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1060 	    SPU_PMD_CRTL_TRAIN_EN;
1061 
1062 	switch (lmac_type) {
1063 	case BGX_MODE_SGMII:
1064 		bgx->qlm_mode = QLM_MODE_SGMII;
1065 		if (bootverbose) {
1066 			device_printf(dev, "BGX%d QLM mode: SGMII\n",
1067 			    bgx->bgx_id);
1068 		}
1069 		break;
1070 	case BGX_MODE_XAUI:
1071 		bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1072 		if (bootverbose) {
1073 			device_printf(dev, "BGX%d QLM mode: XAUI\n",
1074 			    bgx->bgx_id);
1075 		}
1076 		break;
1077 	case BGX_MODE_RXAUI:
1078 		bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1079 		if (bootverbose) {
1080 			device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1081 			    bgx->bgx_id);
1082 		}
1083 		break;
1084 	case BGX_MODE_XFI:
1085 		if (!train_en) {
1086 			bgx->qlm_mode = QLM_MODE_XFI_4X1;
1087 			if (bootverbose) {
1088 				device_printf(dev, "BGX%d QLM mode: XFI\n",
1089 				    bgx->bgx_id);
1090 			}
1091 		} else {
1092 			bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1093 			if (bootverbose) {
1094 				device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1095 				    bgx->bgx_id);
1096 			}
1097 		}
1098 		break;
1099 	case BGX_MODE_XLAUI:
1100 		if (!train_en) {
1101 			bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1102 			if (bootverbose) {
1103 				device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1104 				    bgx->bgx_id);
1105 			}
1106 		} else {
1107 			bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1108 			if (bootverbose) {
1109 				device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1110 				    bgx->bgx_id);
1111 			}
1112 		}
1113 		break;
1114 	default:
1115 		bgx->qlm_mode = QLM_MODE_SGMII;
1116 		if (bootverbose) {
1117 			device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1118 			    bgx->bgx_id);
1119 		}
1120 	}
1121 }
1122 
1123 static int
1124 bgx_init_phy(struct bgx *bgx)
1125 {
1126 	int err;
1127 
1128 	/* By default we fail */
1129 	err = ENXIO;
1130 #ifdef FDT
1131 	err = bgx_fdt_init_phy(bgx);
1132 #endif
1133 #ifdef ACPI
1134 	if (err != 0) {
1135 		/* ARM64TODO: Add ACPI function here */
1136 	}
1137 #endif
1138 	return (err);
1139 }
1140