xref: /freebsd/sys/dev/vnic/thunder_bgx.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 #include "opt_platform.h"
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bitset.h>
35 #include <sys/bitstring.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/pciio.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/cpuset.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 
51 #include <net/ethernet.h>
52 #include <net/if.h>
53 #include <net/if_media.h>
54 
55 #include <machine/bus.h>
56 
57 #include <dev/pci/pcireg.h>
58 #include <dev/pci/pcivar.h>
59 
60 #include "thunder_bgx.h"
61 #include "thunder_bgx_var.h"
62 #include "nic_reg.h"
63 #include "nic.h"
64 
65 #include "lmac_if.h"
66 
67 #define	THUNDER_BGX_DEVSTR	"ThunderX BGX Ethernet I/O Interface"
68 
69 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
70 
71 #define BGX_NODE_ID_MASK	0x1
72 #define BGX_NODE_ID_SHIFT	24
73 
74 #define DRV_NAME	"thunder-BGX"
75 #define DRV_VERSION	"1.0"
76 
77 static int bgx_init_phy(struct bgx *);
78 
79 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
80 static int lmac_count __unused; /* Total no of LMACs in system */
81 
82 static int bgx_xaui_check_link(struct lmac *lmac);
83 static void bgx_get_qlm_mode(struct bgx *);
84 static void bgx_init_hw(struct bgx *);
85 static int bgx_lmac_enable(struct bgx *, uint8_t);
86 static void bgx_lmac_disable(struct bgx *, uint8_t);
87 
88 static int thunder_bgx_probe(device_t);
89 static int thunder_bgx_attach(device_t);
90 static int thunder_bgx_detach(device_t);
91 
92 static device_method_t thunder_bgx_methods[] = {
93 	/* Device interface */
94 	DEVMETHOD(device_probe,		thunder_bgx_probe),
95 	DEVMETHOD(device_attach,	thunder_bgx_attach),
96 	DEVMETHOD(device_detach,	thunder_bgx_detach),
97 
98 	DEVMETHOD_END,
99 };
100 
101 static driver_t thunder_bgx_driver = {
102 	"bgx",
103 	thunder_bgx_methods,
104 	sizeof(struct lmac),
105 };
106 
107 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, 0, 0);
108 MODULE_VERSION(thunder_bgx, 1);
109 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
110 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
111 MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
112 
113 static int
114 thunder_bgx_probe(device_t dev)
115 {
116 	uint16_t vendor_id;
117 	uint16_t device_id;
118 
119 	vendor_id = pci_get_vendor(dev);
120 	device_id = pci_get_device(dev);
121 
122 	if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
123 	    device_id == PCI_DEVICE_ID_THUNDER_BGX) {
124 		device_set_desc(dev, THUNDER_BGX_DEVSTR);
125 		return (BUS_PROBE_DEFAULT);
126 	}
127 
128 	return (ENXIO);
129 }
130 
131 static int
132 thunder_bgx_attach(device_t dev)
133 {
134 	struct bgx *bgx;
135 	uint8_t lmacid;
136 	int err;
137 	int rid;
138 	struct lmac *lmac;
139 
140 	bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
141 	bgx->dev = dev;
142 
143 	lmac = device_get_softc(dev);
144 	lmac->bgx = bgx;
145 	/* Enable bus mastering */
146 	pci_enable_busmaster(dev);
147 	/* Allocate resources - configuration registers */
148 	rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
149 	bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
150 	    RF_ACTIVE);
151 	if (bgx->reg_base == NULL) {
152 		device_printf(dev, "Could not allocate CSR memory space\n");
153 		err = ENXIO;
154 		goto err_disable_device;
155 	}
156 
157 	bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
158 	    BGX_NODE_ID_MASK;
159 	bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
160 
161 	bgx_vnic[bgx->bgx_id] = bgx;
162 	bgx_get_qlm_mode(bgx);
163 
164 	err = bgx_init_phy(bgx);
165 	if (err != 0)
166 		goto err_free_res;
167 
168 	bgx_init_hw(bgx);
169 
170 	/* Enable all LMACs */
171 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
172 		err = bgx_lmac_enable(bgx, lmacid);
173 		if (err) {
174 			device_printf(dev, "BGX%d failed to enable lmac%d\n",
175 				bgx->bgx_id, lmacid);
176 			goto err_free_res;
177 		}
178 	}
179 
180 	return (0);
181 
182 err_free_res:
183 	bgx_vnic[bgx->bgx_id] = NULL;
184 	bus_release_resource(dev, SYS_RES_MEMORY,
185 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
186 err_disable_device:
187 	free(bgx, M_BGX);
188 	pci_disable_busmaster(dev);
189 
190 	return (err);
191 }
192 
193 static int
194 thunder_bgx_detach(device_t dev)
195 {
196 	struct lmac *lmac;
197 	struct bgx *bgx;
198 	uint8_t lmacid;
199 
200 	lmac = device_get_softc(dev);
201 	bgx = lmac->bgx;
202 	/* Disable all LMACs */
203 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
204 		bgx_lmac_disable(bgx, lmacid);
205 
206 	bgx_vnic[bgx->bgx_id] = NULL;
207 	bus_release_resource(dev, SYS_RES_MEMORY,
208 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
209 	free(bgx, M_BGX);
210 	pci_disable_busmaster(dev);
211 
212 	return (0);
213 }
214 
215 /* Register read/write APIs */
216 static uint64_t
217 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
218 {
219 	bus_space_handle_t addr;
220 
221 	addr = ((uint32_t)lmac << 20) + offset;
222 
223 	return (bus_read_8(bgx->reg_base, addr));
224 }
225 
226 static void
227 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
228 {
229 	bus_space_handle_t addr;
230 
231 	addr = ((uint32_t)lmac << 20) + offset;
232 
233 	bus_write_8(bgx->reg_base, addr, val);
234 }
235 
236 static void
237 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
238 {
239 	bus_space_handle_t addr;
240 
241 	addr = ((uint32_t)lmac << 20) + offset;
242 
243 	bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
244 }
245 
246 static int
247 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
248     boolean_t zero)
249 {
250 	int timeout = 10;
251 	uint64_t reg_val;
252 
253 	while (timeout) {
254 		reg_val = bgx_reg_read(bgx, lmac, reg);
255 		if (zero && !(reg_val & mask))
256 			return (0);
257 		if (!zero && (reg_val & mask))
258 			return (0);
259 
260 		DELAY(100);
261 		timeout--;
262 	}
263 	return (ETIMEDOUT);
264 }
265 
266 /* Return number of BGX present in HW */
267 u_int
268 bgx_get_map(int node)
269 {
270 	int i;
271 	u_int map = 0;
272 
273 	for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
274 		if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
275 			map |= (1 << i);
276 	}
277 
278 	return (map);
279 }
280 
281 /* Return number of LMAC configured for this BGX */
282 int
283 bgx_get_lmac_count(int node, int bgx_idx)
284 {
285 	struct bgx *bgx;
286 
287 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
288 	if (bgx != NULL)
289 		return (bgx->lmac_count);
290 
291 	return (0);
292 }
293 
294 /* Returns the current link status of LMAC */
295 void
296 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
297 {
298 	struct bgx_link_status *link = (struct bgx_link_status *)status;
299 	struct bgx *bgx;
300 	struct lmac *lmac;
301 
302 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
303 	if (bgx == NULL)
304 		return;
305 
306 	lmac = &bgx->lmac[lmacid];
307 	link->link_up = lmac->link_up;
308 	link->duplex = lmac->last_duplex;
309 	link->speed = lmac->last_speed;
310 }
311 
312 const uint8_t
313 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
314 {
315 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
316 
317 	if (bgx != NULL)
318 		return (bgx->lmac[lmacid].mac);
319 
320 	return (NULL);
321 }
322 
323 void
324 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
325 {
326 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
327 
328 	if (bgx == NULL)
329 		return;
330 
331 	memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
332 }
333 
334 static void
335 bgx_sgmii_change_link_state(struct lmac *lmac)
336 {
337 	struct bgx *bgx = lmac->bgx;
338 	uint64_t cmr_cfg;
339 	uint64_t port_cfg = 0;
340 	uint64_t misc_ctl = 0;
341 
342 	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
343 	cmr_cfg &= ~CMR_EN;
344 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
345 
346 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
347 	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
348 
349 	if (lmac->link_up) {
350 		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
351 		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
352 		port_cfg |=  (lmac->last_duplex << 2);
353 	} else {
354 		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
355 	}
356 
357 	switch (lmac->last_speed) {
358 	case 10:
359 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
360 		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
361 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
362 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
363 		misc_ctl |= 50; /* samp_pt */
364 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
365 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
366 		break;
367 	case 100:
368 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
369 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
370 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
371 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
372 		misc_ctl |= 5; /* samp_pt */
373 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
374 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
375 		break;
376 	case 1000:
377 		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
378 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
379 		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
380 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
381 		misc_ctl |= 1; /* samp_pt */
382 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
383 		if (lmac->last_duplex)
384 			bgx_reg_write(bgx, lmac->lmacid,
385 				      BGX_GMP_GMI_TXX_BURST, 0);
386 		else
387 			bgx_reg_write(bgx, lmac->lmacid,
388 				      BGX_GMP_GMI_TXX_BURST, 8192);
389 		break;
390 	default:
391 		break;
392 	}
393 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
394 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
395 
396 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
397 
398 	/* renable lmac */
399 	cmr_cfg |= CMR_EN;
400 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
401 }
402 
403 static void
404 bgx_lmac_handler(void *arg)
405 {
406 	struct lmac *lmac;
407 	int link, duplex, speed;
408 	int link_changed = 0;
409 	int err;
410 
411 	lmac = (struct lmac *)arg;
412 
413 	err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
414 	    &link, &duplex, &speed);
415 	if (err != 0)
416 		goto out;
417 
418 	if (!link && lmac->last_link)
419 		link_changed = -1;
420 
421 	if (link &&
422 	    (lmac->last_duplex != duplex ||
423 	     lmac->last_link != link ||
424 	     lmac->last_speed != speed)) {
425 			link_changed = 1;
426 	}
427 
428 	lmac->last_link = link;
429 	lmac->last_speed = speed;
430 	lmac->last_duplex = duplex;
431 
432 	if (!link_changed)
433 		goto out;
434 
435 	if (link_changed > 0)
436 		lmac->link_up = true;
437 	else
438 		lmac->link_up = false;
439 
440 	if (lmac->is_sgmii)
441 		bgx_sgmii_change_link_state(lmac);
442 	else
443 		bgx_xaui_check_link(lmac);
444 
445 out:
446 	callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
447 }
448 
449 uint64_t
450 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
451 {
452 	struct bgx *bgx;
453 
454 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
455 	if (bgx == NULL)
456 		return (0);
457 
458 	if (idx > 8)
459 		lmac = (0);
460 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
461 }
462 
463 uint64_t
464 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
465 {
466 	struct bgx *bgx;
467 
468 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
469 	if (bgx == NULL)
470 		return (0);
471 
472 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
473 }
474 
475 static void
476 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
477 {
478 	uint64_t offset;
479 
480 	while (bgx->lmac[lmac].dmac > 0) {
481 		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
482 		    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
483 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
484 		bgx->lmac[lmac].dmac--;
485 	}
486 }
487 
488 void
489 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
490 {
491 	uint64_t offset;
492 	struct bgx *bgx;
493 
494 #ifdef BGX_IN_PROMISCUOUS_MODE
495 	return;
496 #endif
497 
498 	bgx_idx += node * MAX_BGX_PER_CN88XX;
499 	bgx = bgx_vnic[bgx_idx];
500 
501 	if (bgx == NULL) {
502 		printf("BGX%d not yet initialized, ignoring DMAC addition\n",
503 		    bgx_idx);
504 		return;
505 	}
506 
507 	dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
508 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
509 		device_printf(bgx->dev,
510 		    "Max DMAC filters for LMAC%d reached, ignoring\n",
511 		    lmac);
512 		return;
513 	}
514 
515 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
516 		bgx->lmac[lmac].dmac = 1;
517 
518 	offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
519 	    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
520 	bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
521 	bgx->lmac[lmac].dmac++;
522 
523 	bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
524 	    (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
525 	    (BCAST_ACCEPT << 0));
526 }
527 
528 /* Configure BGX LMAC in internal loopback mode */
529 void
530 bgx_lmac_internal_loopback(int node, int bgx_idx,
531     int lmac_idx, boolean_t enable)
532 {
533 	struct bgx *bgx;
534 	struct lmac *lmac;
535 	uint64_t cfg;
536 
537 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
538 	if (bgx == NULL)
539 		return;
540 
541 	lmac = &bgx->lmac[lmac_idx];
542 	if (lmac->is_sgmii) {
543 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
544 		if (enable)
545 			cfg |= PCS_MRX_CTL_LOOPBACK1;
546 		else
547 			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
548 		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
549 	} else {
550 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
551 		if (enable)
552 			cfg |= SPU_CTL_LOOPBACK;
553 		else
554 			cfg &= ~SPU_CTL_LOOPBACK;
555 		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
556 	}
557 }
558 
559 static int
560 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
561 {
562 	uint64_t cfg;
563 
564 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
565 	/* max packet size */
566 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
567 
568 	/* Disable frame alignment if using preamble */
569 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
570 	if (cfg & 1)
571 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
572 
573 	/* Enable lmac */
574 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
575 
576 	/* PCS reset */
577 	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
578 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
579 	    PCS_MRX_CTL_RESET, TRUE) != 0) {
580 		device_printf(bgx->dev, "BGX PCS reset not completed\n");
581 		return (ENXIO);
582 	}
583 
584 	/* power down, reset autoneg, autoneg enable */
585 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
586 	cfg &= ~PCS_MRX_CTL_PWR_DN;
587 	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
588 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
589 
590 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
591 	    PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
592 		device_printf(bgx->dev, "BGX AN_CPT not completed\n");
593 		return (ENXIO);
594 	}
595 
596 	return (0);
597 }
598 
599 static int
600 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
601 {
602 	uint64_t cfg;
603 
604 	/* Reset SPU */
605 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
606 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
607 	    SPU_CTL_RESET, TRUE) != 0) {
608 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
609 		return (ENXIO);
610 	}
611 
612 	/* Disable LMAC */
613 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
614 	cfg &= ~CMR_EN;
615 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
616 
617 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
618 	/* Set interleaved running disparity for RXAUI */
619 	if (bgx->lmac_type != BGX_MODE_RXAUI) {
620 		bgx_reg_modify(bgx, lmacid,
621 		    BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
622 	} else {
623 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
624 		    SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
625 	}
626 
627 	/* clear all interrupts */
628 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
629 	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
630 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
631 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
632 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
633 	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
634 
635 	if (bgx->use_training) {
636 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
637 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
638 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
639 		/* training enable */
640 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
641 		    SPU_PMD_CRTL_TRAIN_EN);
642 	}
643 
644 	/* Append FCS to each packet */
645 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
646 
647 	/* Disable forward error correction */
648 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
649 	cfg &= ~SPU_FEC_CTL_FEC_EN;
650 	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
651 
652 	/* Disable autoneg */
653 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
654 	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
655 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
656 
657 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
658 	if (bgx->lmac_type == BGX_MODE_10G_KR)
659 		cfg |= (1 << 23);
660 	else if (bgx->lmac_type == BGX_MODE_40G_KR)
661 		cfg |= (1 << 24);
662 	else
663 		cfg &= ~((1 << 23) | (1 << 24));
664 	cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
665 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
666 
667 	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
668 	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
669 	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
670 
671 	/* Enable lmac */
672 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
673 
674 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
675 	cfg &= ~SPU_CTL_LOW_POWER;
676 	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
677 
678 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
679 	cfg &= ~SMU_TX_CTL_UNI_EN;
680 	cfg |= SMU_TX_CTL_DIC_EN;
681 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
682 
683 	/* take lmac_count into account */
684 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
685 	/* max packet size */
686 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
687 
688 	return (0);
689 }
690 
691 static int
692 bgx_xaui_check_link(struct lmac *lmac)
693 {
694 	struct bgx *bgx = lmac->bgx;
695 	int lmacid = lmac->lmacid;
696 	int lmac_type = bgx->lmac_type;
697 	uint64_t cfg;
698 
699 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
700 	if (bgx->use_training) {
701 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
702 		if ((cfg & (1UL << 13)) == 0) {
703 			cfg = (1UL << 13) | (1UL << 14);
704 			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
705 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
706 			cfg |= (1UL << 0);
707 			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
708 			return (ENXIO);
709 		}
710 	}
711 
712 	/* wait for PCS to come out of reset */
713 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
714 	    SPU_CTL_RESET, TRUE) != 0) {
715 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
716 		return (ENXIO);
717 	}
718 
719 	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
720 	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
721 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
722 		    SPU_BR_STATUS_BLK_LOCK, FALSE)) {
723 			device_printf(bgx->dev,
724 			    "SPU_BR_STATUS_BLK_LOCK not completed\n");
725 			return (ENXIO);
726 		}
727 	} else {
728 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
729 		    SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
730 			device_printf(bgx->dev,
731 			    "SPU_BX_STATUS_RX_ALIGN not completed\n");
732 			return (ENXIO);
733 		}
734 	}
735 
736 	/* Clear rcvflt bit (latching high) and read it back */
737 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
738 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
739 		device_printf(bgx->dev, "Receive fault, retry training\n");
740 		if (bgx->use_training) {
741 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
742 			if ((cfg & (1UL << 13)) == 0) {
743 				cfg = (1UL << 13) | (1UL << 14);
744 				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
745 				cfg = bgx_reg_read(bgx, lmacid,
746 				    BGX_SPUX_BR_PMD_CRTL);
747 				cfg |= (1UL << 0);
748 				bgx_reg_write(bgx, lmacid,
749 				    BGX_SPUX_BR_PMD_CRTL, cfg);
750 				return (ENXIO);
751 			}
752 		}
753 		return (ENXIO);
754 	}
755 
756 	/* Wait for MAC RX to be ready */
757 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
758 	    SMU_RX_CTL_STATUS, TRUE) != 0) {
759 		device_printf(bgx->dev, "SMU RX link not okay\n");
760 		return (ENXIO);
761 	}
762 
763 	/* Wait for BGX RX to be idle */
764 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
765 	    SMU_CTL_RX_IDLE, FALSE) != 0) {
766 		device_printf(bgx->dev, "SMU RX not idle\n");
767 		return (ENXIO);
768 	}
769 
770 	/* Wait for BGX TX to be idle */
771 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
772 	    SMU_CTL_TX_IDLE, FALSE) != 0) {
773 		device_printf(bgx->dev, "SMU TX not idle\n");
774 		return (ENXIO);
775 	}
776 
777 	if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
778 	    SPU_STATUS2_RCVFLT) != 0) {
779 		device_printf(bgx->dev, "Receive fault\n");
780 		return (ENXIO);
781 	}
782 
783 	/* Receive link is latching low. Force it high and verify it */
784 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
785 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
786 	    SPU_STATUS1_RCV_LNK, FALSE) != 0) {
787 		device_printf(bgx->dev, "SPU receive link down\n");
788 		return (ENXIO);
789 	}
790 
791 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
792 	cfg &= ~SPU_MISC_CTL_RX_DIS;
793 	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
794 	return (0);
795 }
796 
797 static void
798 bgx_poll_for_link(void *arg)
799 {
800 	struct lmac *lmac;
801 	uint64_t link;
802 
803 	lmac = (struct lmac *)arg;
804 
805 	/* Receive link is latching low. Force it high and verify it */
806 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
807 		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
808 	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
809 		     SPU_STATUS1_RCV_LNK, false);
810 
811 	link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
812 	if (link & SPU_STATUS1_RCV_LNK) {
813 		lmac->link_up = 1;
814 		if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
815 			lmac->last_speed = 40000;
816 		else
817 			lmac->last_speed = 10000;
818 		lmac->last_duplex = 1;
819 	} else {
820 		lmac->link_up = 0;
821 	}
822 
823 	if (lmac->last_link != lmac->link_up) {
824 		lmac->last_link = lmac->link_up;
825 		if (lmac->link_up)
826 			bgx_xaui_check_link(lmac);
827 	}
828 
829 	callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
830 }
831 
832 static int
833 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
834 {
835 	uint64_t __unused dmac_bcast = (1UL << 48) - 1;
836 	struct lmac *lmac;
837 	uint64_t cfg;
838 
839 	lmac = &bgx->lmac[lmacid];
840 	lmac->bgx = bgx;
841 
842 	if (bgx->lmac_type == BGX_MODE_SGMII) {
843 		lmac->is_sgmii = 1;
844 		if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
845 			return -1;
846 	} else {
847 		lmac->is_sgmii = 0;
848 		if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
849 			return -1;
850 	}
851 
852 	if (lmac->is_sgmii) {
853 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
854 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
855 		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
856 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
857 	} else {
858 		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
859 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
860 		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
861 		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
862 	}
863 
864 	/* Enable lmac */
865 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
866 		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
867 
868 	/* Restore default cfg, incase low level firmware changed it */
869 	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
870 
871 	/* Add broadcast MAC into all LMAC's DMAC filters */
872 	bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
873 
874 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
875 	    (bgx->lmac_type != BGX_MODE_XAUI) &&
876 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
877 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
878 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
879 		if (lmac->phy_if_dev == NULL) {
880 			device_printf(bgx->dev,
881 			    "LMAC%d missing interface to PHY\n", lmacid);
882 			return (ENXIO);
883 		}
884 
885 		if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
886 		    lmacid) != 0) {
887 			device_printf(bgx->dev,
888 			    "LMAC%d could not connect to PHY\n", lmacid);
889 			return (ENXIO);
890 		}
891 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
892 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
893 		mtx_lock(&lmac->check_link_mtx);
894 		bgx_lmac_handler(lmac);
895 		mtx_unlock(&lmac->check_link_mtx);
896 	} else {
897 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
898 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
899 		mtx_lock(&lmac->check_link_mtx);
900 		bgx_poll_for_link(lmac);
901 		mtx_unlock(&lmac->check_link_mtx);
902 	}
903 
904 	return (0);
905 }
906 
907 static void
908 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
909 {
910 	struct lmac *lmac;
911 	uint64_t cmrx_cfg;
912 
913 	lmac = &bgx->lmac[lmacid];
914 
915 	/* Stop callout */
916 	callout_drain(&lmac->check_link);
917 	mtx_destroy(&lmac->check_link_mtx);
918 
919 	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
920 	cmrx_cfg &= ~(1 << 15);
921 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
922 	bgx_flush_dmac_addrs(bgx, lmacid);
923 
924 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
925 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
926 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
927 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
928 		if (lmac->phy_if_dev == NULL) {
929 			device_printf(bgx->dev,
930 			    "LMAC%d missing interface to PHY\n", lmacid);
931 			return;
932 		}
933 		if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
934 		    lmacid) != 0) {
935 			device_printf(bgx->dev,
936 			    "LMAC%d could not disconnect PHY\n", lmacid);
937 			return;
938 		}
939 		lmac->phy_if_dev = NULL;
940 	}
941 }
942 
943 static void
944 bgx_set_num_ports(struct bgx *bgx)
945 {
946 	uint64_t lmac_count;
947 
948 	switch (bgx->qlm_mode) {
949 	case QLM_MODE_SGMII:
950 		bgx->lmac_count = 4;
951 		bgx->lmac_type = BGX_MODE_SGMII;
952 		bgx->lane_to_sds = 0;
953 		break;
954 	case QLM_MODE_XAUI_1X4:
955 		bgx->lmac_count = 1;
956 		bgx->lmac_type = BGX_MODE_XAUI;
957 		bgx->lane_to_sds = 0xE4;
958 			break;
959 	case QLM_MODE_RXAUI_2X2:
960 		bgx->lmac_count = 2;
961 		bgx->lmac_type = BGX_MODE_RXAUI;
962 		bgx->lane_to_sds = 0xE4;
963 			break;
964 	case QLM_MODE_XFI_4X1:
965 		bgx->lmac_count = 4;
966 		bgx->lmac_type = BGX_MODE_XFI;
967 		bgx->lane_to_sds = 0;
968 		break;
969 	case QLM_MODE_XLAUI_1X4:
970 		bgx->lmac_count = 1;
971 		bgx->lmac_type = BGX_MODE_XLAUI;
972 		bgx->lane_to_sds = 0xE4;
973 		break;
974 	case QLM_MODE_10G_KR_4X1:
975 		bgx->lmac_count = 4;
976 		bgx->lmac_type = BGX_MODE_10G_KR;
977 		bgx->lane_to_sds = 0;
978 		bgx->use_training = 1;
979 		break;
980 	case QLM_MODE_40G_KR4_1X4:
981 		bgx->lmac_count = 1;
982 		bgx->lmac_type = BGX_MODE_40G_KR;
983 		bgx->lane_to_sds = 0xE4;
984 		bgx->use_training = 1;
985 		break;
986 	default:
987 		bgx->lmac_count = 0;
988 		break;
989 	}
990 
991 	/*
992 	 * Check if low level firmware has programmed LMAC count
993 	 * based on board type, if yes consider that otherwise
994 	 * the default static values
995 	 */
996 	lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
997 	if (lmac_count != 4)
998 		bgx->lmac_count = lmac_count;
999 }
1000 
1001 static void
1002 bgx_init_hw(struct bgx *bgx)
1003 {
1004 	int i;
1005 
1006 	bgx_set_num_ports(bgx);
1007 
1008 	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1009 	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1010 		device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1011 
1012 	/* Set lmac type and lane2serdes mapping */
1013 	for (i = 0; i < bgx->lmac_count; i++) {
1014 		if (bgx->lmac_type == BGX_MODE_RXAUI) {
1015 			if (i)
1016 				bgx->lane_to_sds = 0x0e;
1017 			else
1018 				bgx->lane_to_sds = 0x04;
1019 			bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1020 			    (bgx->lmac_type << 8) | bgx->lane_to_sds);
1021 			continue;
1022 		}
1023 		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1024 		    (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1025 		bgx->lmac[i].lmacid_bd = lmac_count;
1026 		lmac_count++;
1027 	}
1028 
1029 	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1030 	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1031 
1032 	/* Set the backpressure AND mask */
1033 	for (i = 0; i < bgx->lmac_count; i++) {
1034 		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1035 		    ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1036 		    (i * MAX_BGX_CHANS_PER_LMAC));
1037 	}
1038 
1039 	/* Disable all MAC filtering */
1040 	for (i = 0; i < RX_DMAC_COUNT; i++)
1041 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1042 
1043 	/* Disable MAC steering (NCSI traffic) */
1044 	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1045 		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1046 }
1047 
1048 static void
1049 bgx_get_qlm_mode(struct bgx *bgx)
1050 {
1051 	device_t dev = bgx->dev;
1052 	int lmac_type;
1053 	int train_en;
1054 
1055 	/* Read LMAC0 type to figure out QLM mode
1056 	 * This is configured by low level firmware
1057 	 */
1058 	lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1059 	lmac_type = (lmac_type >> 8) & 0x07;
1060 
1061 	train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1062 	    SPU_PMD_CRTL_TRAIN_EN;
1063 
1064 	switch (lmac_type) {
1065 	case BGX_MODE_SGMII:
1066 		bgx->qlm_mode = QLM_MODE_SGMII;
1067 		if (bootverbose) {
1068 			device_printf(dev, "BGX%d QLM mode: SGMII\n",
1069 			    bgx->bgx_id);
1070 		}
1071 		break;
1072 	case BGX_MODE_XAUI:
1073 		bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1074 		if (bootverbose) {
1075 			device_printf(dev, "BGX%d QLM mode: XAUI\n",
1076 			    bgx->bgx_id);
1077 		}
1078 		break;
1079 	case BGX_MODE_RXAUI:
1080 		bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1081 		if (bootverbose) {
1082 			device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1083 			    bgx->bgx_id);
1084 		}
1085 		break;
1086 	case BGX_MODE_XFI:
1087 		if (!train_en) {
1088 			bgx->qlm_mode = QLM_MODE_XFI_4X1;
1089 			if (bootverbose) {
1090 				device_printf(dev, "BGX%d QLM mode: XFI\n",
1091 				    bgx->bgx_id);
1092 			}
1093 		} else {
1094 			bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1095 			if (bootverbose) {
1096 				device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1097 				    bgx->bgx_id);
1098 			}
1099 		}
1100 		break;
1101 	case BGX_MODE_XLAUI:
1102 		if (!train_en) {
1103 			bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1104 			if (bootverbose) {
1105 				device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1106 				    bgx->bgx_id);
1107 			}
1108 		} else {
1109 			bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1110 			if (bootverbose) {
1111 				device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1112 				    bgx->bgx_id);
1113 			}
1114 		}
1115 		break;
1116 	default:
1117 		bgx->qlm_mode = QLM_MODE_SGMII;
1118 		if (bootverbose) {
1119 			device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1120 			    bgx->bgx_id);
1121 		}
1122 	}
1123 }
1124 
1125 static int
1126 bgx_init_phy(struct bgx *bgx)
1127 {
1128 	int err;
1129 
1130 	/* By default we fail */
1131 	err = ENXIO;
1132 #ifdef FDT
1133 	err = bgx_fdt_init_phy(bgx);
1134 #endif
1135 #ifdef ACPI
1136 	if (err != 0) {
1137 		/* ARM64TODO: Add ACPI function here */
1138 	}
1139 #endif
1140 	return (err);
1141 }
1142