xref: /freebsd/sys/dev/vnic/thunder_bgx.c (revision 1b99d52f261bfacfb9bb149d33ed6444364ac219)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include "opt_platform.h"
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bitset.h>
37 #include <sys/bitstring.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/pciio.h>
45 #include <sys/pcpu.h>
46 #include <sys/proc.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_media.h>
56 
57 #include <machine/bus.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 
62 #include "thunder_bgx.h"
63 #include "thunder_bgx_var.h"
64 #include "nic_reg.h"
65 #include "nic.h"
66 
67 #include "lmac_if.h"
68 
69 #define	THUNDER_BGX_DEVSTR	"ThunderX BGX Ethernet I/O Interface"
70 
71 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
72 
73 #define BGX_NODE_ID_MASK	0x1
74 #define BGX_NODE_ID_SHIFT	24
75 
76 #define DRV_NAME	"thunder-BGX"
77 #define DRV_VERSION	"1.0"
78 
79 static int bgx_init_phy(struct bgx *);
80 
81 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
82 static int lmac_count __unused; /* Total no of LMACs in system */
83 
84 static int bgx_xaui_check_link(struct lmac *lmac);
85 static void bgx_get_qlm_mode(struct bgx *);
86 static void bgx_init_hw(struct bgx *);
87 static int bgx_lmac_enable(struct bgx *, uint8_t);
88 static void bgx_lmac_disable(struct bgx *, uint8_t);
89 
90 static int thunder_bgx_probe(device_t);
91 static int thunder_bgx_attach(device_t);
92 static int thunder_bgx_detach(device_t);
93 
94 static device_method_t thunder_bgx_methods[] = {
95 	/* Device interface */
96 	DEVMETHOD(device_probe,		thunder_bgx_probe),
97 	DEVMETHOD(device_attach,	thunder_bgx_attach),
98 	DEVMETHOD(device_detach,	thunder_bgx_detach),
99 
100 	DEVMETHOD_END,
101 };
102 
103 static driver_t thunder_bgx_driver = {
104 	"bgx",
105 	thunder_bgx_methods,
106 	sizeof(struct lmac),
107 };
108 
109 static devclass_t thunder_bgx_devclass;
110 
111 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0);
112 MODULE_VERSION(thunder_bgx, 1);
113 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
114 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
115 MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
116 
117 static int
118 thunder_bgx_probe(device_t dev)
119 {
120 	uint16_t vendor_id;
121 	uint16_t device_id;
122 
123 	vendor_id = pci_get_vendor(dev);
124 	device_id = pci_get_device(dev);
125 
126 	if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
127 	    device_id == PCI_DEVICE_ID_THUNDER_BGX) {
128 		device_set_desc(dev, THUNDER_BGX_DEVSTR);
129 		return (BUS_PROBE_DEFAULT);
130 	}
131 
132 	return (ENXIO);
133 }
134 
135 static int
136 thunder_bgx_attach(device_t dev)
137 {
138 	struct bgx *bgx;
139 	uint8_t lmacid;
140 	int err;
141 	int rid;
142 	struct lmac *lmac;
143 
144 	bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
145 	bgx->dev = dev;
146 
147 	lmac = device_get_softc(dev);
148 	lmac->bgx = bgx;
149 	/* Enable bus mastering */
150 	pci_enable_busmaster(dev);
151 	/* Allocate resources - configuration registers */
152 	rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
153 	bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
154 	    RF_ACTIVE);
155 	if (bgx->reg_base == NULL) {
156 		device_printf(dev, "Could not allocate CSR memory space\n");
157 		err = ENXIO;
158 		goto err_disable_device;
159 	}
160 
161 	bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
162 	    BGX_NODE_ID_MASK;
163 	bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
164 
165 	bgx_vnic[bgx->bgx_id] = bgx;
166 	bgx_get_qlm_mode(bgx);
167 
168 	err = bgx_init_phy(bgx);
169 	if (err != 0)
170 		goto err_free_res;
171 
172 	bgx_init_hw(bgx);
173 
174 	/* Enable all LMACs */
175 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
176 		err = bgx_lmac_enable(bgx, lmacid);
177 		if (err) {
178 			device_printf(dev, "BGX%d failed to enable lmac%d\n",
179 				bgx->bgx_id, lmacid);
180 			goto err_free_res;
181 		}
182 	}
183 
184 	return (0);
185 
186 err_free_res:
187 	bgx_vnic[bgx->bgx_id] = NULL;
188 	bus_release_resource(dev, SYS_RES_MEMORY,
189 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
190 err_disable_device:
191 	free(bgx, M_BGX);
192 	pci_disable_busmaster(dev);
193 
194 	return (err);
195 }
196 
197 static int
198 thunder_bgx_detach(device_t dev)
199 {
200 	struct lmac *lmac;
201 	struct bgx *bgx;
202 	uint8_t lmacid;
203 
204 	lmac = device_get_softc(dev);
205 	bgx = lmac->bgx;
206 	/* Disable all LMACs */
207 	for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
208 		bgx_lmac_disable(bgx, lmacid);
209 
210 	bgx_vnic[bgx->bgx_id] = NULL;
211 	bus_release_resource(dev, SYS_RES_MEMORY,
212 	    rman_get_rid(bgx->reg_base), bgx->reg_base);
213 	free(bgx, M_BGX);
214 	pci_disable_busmaster(dev);
215 
216 	return (0);
217 }
218 
219 /* Register read/write APIs */
220 static uint64_t
221 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
222 {
223 	bus_space_handle_t addr;
224 
225 	addr = ((uint32_t)lmac << 20) + offset;
226 
227 	return (bus_read_8(bgx->reg_base, addr));
228 }
229 
230 static void
231 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
232 {
233 	bus_space_handle_t addr;
234 
235 	addr = ((uint32_t)lmac << 20) + offset;
236 
237 	bus_write_8(bgx->reg_base, addr, val);
238 }
239 
240 static void
241 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
242 {
243 	bus_space_handle_t addr;
244 
245 	addr = ((uint32_t)lmac << 20) + offset;
246 
247 	bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
248 }
249 
250 static int
251 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
252     boolean_t zero)
253 {
254 	int timeout = 10;
255 	uint64_t reg_val;
256 
257 	while (timeout) {
258 		reg_val = bgx_reg_read(bgx, lmac, reg);
259 		if (zero && !(reg_val & mask))
260 			return (0);
261 		if (!zero && (reg_val & mask))
262 			return (0);
263 
264 		DELAY(100);
265 		timeout--;
266 	}
267 	return (ETIMEDOUT);
268 }
269 
270 /* Return number of BGX present in HW */
271 u_int
272 bgx_get_map(int node)
273 {
274 	int i;
275 	u_int map = 0;
276 
277 	for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
278 		if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
279 			map |= (1 << i);
280 	}
281 
282 	return (map);
283 }
284 
285 /* Return number of LMAC configured for this BGX */
286 int
287 bgx_get_lmac_count(int node, int bgx_idx)
288 {
289 	struct bgx *bgx;
290 
291 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
292 	if (bgx != NULL)
293 		return (bgx->lmac_count);
294 
295 	return (0);
296 }
297 
298 /* Returns the current link status of LMAC */
299 void
300 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
301 {
302 	struct bgx_link_status *link = (struct bgx_link_status *)status;
303 	struct bgx *bgx;
304 	struct lmac *lmac;
305 
306 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
307 	if (bgx == NULL)
308 		return;
309 
310 	lmac = &bgx->lmac[lmacid];
311 	link->link_up = lmac->link_up;
312 	link->duplex = lmac->last_duplex;
313 	link->speed = lmac->last_speed;
314 }
315 
316 const uint8_t
317 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
318 {
319 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
320 
321 	if (bgx != NULL)
322 		return (bgx->lmac[lmacid].mac);
323 
324 	return (NULL);
325 }
326 
327 void
328 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
329 {
330 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
331 
332 	if (bgx == NULL)
333 		return;
334 
335 	memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
336 }
337 
338 static void
339 bgx_sgmii_change_link_state(struct lmac *lmac)
340 {
341 	struct bgx *bgx = lmac->bgx;
342 	uint64_t cmr_cfg;
343 	uint64_t port_cfg = 0;
344 	uint64_t misc_ctl = 0;
345 
346 	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
347 	cmr_cfg &= ~CMR_EN;
348 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
349 
350 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
351 	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
352 
353 	if (lmac->link_up) {
354 		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
355 		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
356 		port_cfg |=  (lmac->last_duplex << 2);
357 	} else {
358 		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
359 	}
360 
361 	switch (lmac->last_speed) {
362 	case 10:
363 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
364 		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
365 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
366 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
367 		misc_ctl |= 50; /* samp_pt */
368 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
369 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
370 		break;
371 	case 100:
372 		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
373 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
374 		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
375 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
376 		misc_ctl |= 5; /* samp_pt */
377 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
378 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
379 		break;
380 	case 1000:
381 		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
382 		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
383 		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
384 		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
385 		misc_ctl |= 1; /* samp_pt */
386 		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
387 		if (lmac->last_duplex)
388 			bgx_reg_write(bgx, lmac->lmacid,
389 				      BGX_GMP_GMI_TXX_BURST, 0);
390 		else
391 			bgx_reg_write(bgx, lmac->lmacid,
392 				      BGX_GMP_GMI_TXX_BURST, 8192);
393 		break;
394 	default:
395 		break;
396 	}
397 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
398 	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
399 
400 	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
401 
402 	/* renable lmac */
403 	cmr_cfg |= CMR_EN;
404 	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
405 }
406 
407 static void
408 bgx_lmac_handler(void *arg)
409 {
410 	struct lmac *lmac;
411 	int link, duplex, speed;
412 	int link_changed = 0;
413 	int err;
414 
415 	lmac = (struct lmac *)arg;
416 
417 	err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
418 	    &link, &duplex, &speed);
419 	if (err != 0)
420 		goto out;
421 
422 	if (!link && lmac->last_link)
423 		link_changed = -1;
424 
425 	if (link &&
426 	    (lmac->last_duplex != duplex ||
427 	     lmac->last_link != link ||
428 	     lmac->last_speed != speed)) {
429 			link_changed = 1;
430 	}
431 
432 	lmac->last_link = link;
433 	lmac->last_speed = speed;
434 	lmac->last_duplex = duplex;
435 
436 	if (!link_changed)
437 		goto out;
438 
439 	if (link_changed > 0)
440 		lmac->link_up = true;
441 	else
442 		lmac->link_up = false;
443 
444 	if (lmac->is_sgmii)
445 		bgx_sgmii_change_link_state(lmac);
446 	else
447 		bgx_xaui_check_link(lmac);
448 
449 out:
450 	callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
451 }
452 
453 uint64_t
454 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
455 {
456 	struct bgx *bgx;
457 
458 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
459 	if (bgx == NULL)
460 		return (0);
461 
462 	if (idx > 8)
463 		lmac = (0);
464 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
465 }
466 
467 uint64_t
468 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
469 {
470 	struct bgx *bgx;
471 
472 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
473 	if (bgx == NULL)
474 		return (0);
475 
476 	return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
477 }
478 
479 static void
480 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
481 {
482 	uint64_t offset;
483 
484 	while (bgx->lmac[lmac].dmac > 0) {
485 		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
486 		    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
487 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
488 		bgx->lmac[lmac].dmac--;
489 	}
490 }
491 
492 void
493 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
494 {
495 	uint64_t offset;
496 	struct bgx *bgx;
497 
498 #ifdef BGX_IN_PROMISCUOUS_MODE
499 	return;
500 #endif
501 
502 	bgx_idx += node * MAX_BGX_PER_CN88XX;
503 	bgx = bgx_vnic[bgx_idx];
504 
505 	if (!bgx) {
506 		device_printf(bgx->dev,
507 		    "BGX%d not yet initialized, ignoring DMAC addition\n",
508 		    bgx_idx);
509 		return;
510 	}
511 
512 	dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
513 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
514 		device_printf(bgx->dev,
515 		    "Max DMAC filters for LMAC%d reached, ignoring\n",
516 		    lmac);
517 		return;
518 	}
519 
520 	if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
521 		bgx->lmac[lmac].dmac = 1;
522 
523 	offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
524 	    (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
525 	bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
526 	bgx->lmac[lmac].dmac++;
527 
528 	bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
529 	    (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
530 	    (BCAST_ACCEPT << 0));
531 }
532 
533 /* Configure BGX LMAC in internal loopback mode */
534 void
535 bgx_lmac_internal_loopback(int node, int bgx_idx,
536     int lmac_idx, boolean_t enable)
537 {
538 	struct bgx *bgx;
539 	struct lmac *lmac;
540 	uint64_t cfg;
541 
542 	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
543 	if (bgx == NULL)
544 		return;
545 
546 	lmac = &bgx->lmac[lmac_idx];
547 	if (lmac->is_sgmii) {
548 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
549 		if (enable)
550 			cfg |= PCS_MRX_CTL_LOOPBACK1;
551 		else
552 			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
553 		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
554 	} else {
555 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
556 		if (enable)
557 			cfg |= SPU_CTL_LOOPBACK;
558 		else
559 			cfg &= ~SPU_CTL_LOOPBACK;
560 		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
561 	}
562 }
563 
564 static int
565 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
566 {
567 	uint64_t cfg;
568 
569 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
570 	/* max packet size */
571 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
572 
573 	/* Disable frame alignment if using preamble */
574 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
575 	if (cfg & 1)
576 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
577 
578 	/* Enable lmac */
579 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
580 
581 	/* PCS reset */
582 	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
583 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
584 	    PCS_MRX_CTL_RESET, TRUE) != 0) {
585 		device_printf(bgx->dev, "BGX PCS reset not completed\n");
586 		return (ENXIO);
587 	}
588 
589 	/* power down, reset autoneg, autoneg enable */
590 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
591 	cfg &= ~PCS_MRX_CTL_PWR_DN;
592 	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
593 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
594 
595 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
596 	    PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
597 		device_printf(bgx->dev, "BGX AN_CPT not completed\n");
598 		return (ENXIO);
599 	}
600 
601 	return (0);
602 }
603 
604 static int
605 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
606 {
607 	uint64_t cfg;
608 
609 	/* Reset SPU */
610 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
611 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
612 	    SPU_CTL_RESET, TRUE) != 0) {
613 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
614 		return (ENXIO);
615 	}
616 
617 	/* Disable LMAC */
618 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
619 	cfg &= ~CMR_EN;
620 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
621 
622 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
623 	/* Set interleaved running disparity for RXAUI */
624 	if (bgx->lmac_type != BGX_MODE_RXAUI) {
625 		bgx_reg_modify(bgx, lmacid,
626 		    BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
627 	} else {
628 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
629 		    SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
630 	}
631 
632 	/* clear all interrupts */
633 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
634 	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
635 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
636 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
637 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
638 	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
639 
640 	if (bgx->use_training) {
641 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
642 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
643 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
644 		/* training enable */
645 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
646 		    SPU_PMD_CRTL_TRAIN_EN);
647 	}
648 
649 	/* Append FCS to each packet */
650 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
651 
652 	/* Disable forward error correction */
653 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
654 	cfg &= ~SPU_FEC_CTL_FEC_EN;
655 	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
656 
657 	/* Disable autoneg */
658 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
659 	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
660 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
661 
662 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
663 	if (bgx->lmac_type == BGX_MODE_10G_KR)
664 		cfg |= (1 << 23);
665 	else if (bgx->lmac_type == BGX_MODE_40G_KR)
666 		cfg |= (1 << 24);
667 	else
668 		cfg &= ~((1 << 23) | (1 << 24));
669 	cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
670 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
671 
672 	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
673 	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
674 	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
675 
676 	/* Enable lmac */
677 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
678 
679 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
680 	cfg &= ~SPU_CTL_LOW_POWER;
681 	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
682 
683 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
684 	cfg &= ~SMU_TX_CTL_UNI_EN;
685 	cfg |= SMU_TX_CTL_DIC_EN;
686 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
687 
688 	/* take lmac_count into account */
689 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
690 	/* max packet size */
691 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
692 
693 	return (0);
694 }
695 
696 static int
697 bgx_xaui_check_link(struct lmac *lmac)
698 {
699 	struct bgx *bgx = lmac->bgx;
700 	int lmacid = lmac->lmacid;
701 	int lmac_type = bgx->lmac_type;
702 	uint64_t cfg;
703 
704 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
705 	if (bgx->use_training) {
706 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
707 		if ((cfg & (1UL << 13)) == 0) {
708 			cfg = (1UL << 13) | (1UL << 14);
709 			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
710 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
711 			cfg |= (1UL << 0);
712 			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
713 			return (ENXIO);
714 		}
715 	}
716 
717 	/* wait for PCS to come out of reset */
718 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
719 	    SPU_CTL_RESET, TRUE) != 0) {
720 		device_printf(bgx->dev, "BGX SPU reset not completed\n");
721 		return (ENXIO);
722 	}
723 
724 	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
725 	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
726 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
727 		    SPU_BR_STATUS_BLK_LOCK, FALSE)) {
728 			device_printf(bgx->dev,
729 			    "SPU_BR_STATUS_BLK_LOCK not completed\n");
730 			return (ENXIO);
731 		}
732 	} else {
733 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
734 		    SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
735 			device_printf(bgx->dev,
736 			    "SPU_BX_STATUS_RX_ALIGN not completed\n");
737 			return (ENXIO);
738 		}
739 	}
740 
741 	/* Clear rcvflt bit (latching high) and read it back */
742 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
743 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
744 		device_printf(bgx->dev, "Receive fault, retry training\n");
745 		if (bgx->use_training) {
746 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
747 			if ((cfg & (1UL << 13)) == 0) {
748 				cfg = (1UL << 13) | (1UL << 14);
749 				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
750 				cfg = bgx_reg_read(bgx, lmacid,
751 				    BGX_SPUX_BR_PMD_CRTL);
752 				cfg |= (1UL << 0);
753 				bgx_reg_write(bgx, lmacid,
754 				    BGX_SPUX_BR_PMD_CRTL, cfg);
755 				return (ENXIO);
756 			}
757 		}
758 		return (ENXIO);
759 	}
760 
761 	/* Wait for MAC RX to be ready */
762 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
763 	    SMU_RX_CTL_STATUS, TRUE) != 0) {
764 		device_printf(bgx->dev, "SMU RX link not okay\n");
765 		return (ENXIO);
766 	}
767 
768 	/* Wait for BGX RX to be idle */
769 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
770 	    SMU_CTL_RX_IDLE, FALSE) != 0) {
771 		device_printf(bgx->dev, "SMU RX not idle\n");
772 		return (ENXIO);
773 	}
774 
775 	/* Wait for BGX TX to be idle */
776 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
777 	    SMU_CTL_TX_IDLE, FALSE) != 0) {
778 		device_printf(bgx->dev, "SMU TX not idle\n");
779 		return (ENXIO);
780 	}
781 
782 	if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
783 	    SPU_STATUS2_RCVFLT) != 0) {
784 		device_printf(bgx->dev, "Receive fault\n");
785 		return (ENXIO);
786 	}
787 
788 	/* Receive link is latching low. Force it high and verify it */
789 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
790 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
791 	    SPU_STATUS1_RCV_LNK, FALSE) != 0) {
792 		device_printf(bgx->dev, "SPU receive link down\n");
793 		return (ENXIO);
794 	}
795 
796 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
797 	cfg &= ~SPU_MISC_CTL_RX_DIS;
798 	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
799 	return (0);
800 }
801 
802 static void
803 bgx_poll_for_link(void *arg)
804 {
805 	struct lmac *lmac;
806 	uint64_t link;
807 
808 	lmac = (struct lmac *)arg;
809 
810 	/* Receive link is latching low. Force it high and verify it */
811 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
812 		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
813 	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
814 		     SPU_STATUS1_RCV_LNK, false);
815 
816 	link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
817 	if (link & SPU_STATUS1_RCV_LNK) {
818 		lmac->link_up = 1;
819 		if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
820 			lmac->last_speed = 40000;
821 		else
822 			lmac->last_speed = 10000;
823 		lmac->last_duplex = 1;
824 	} else {
825 		lmac->link_up = 0;
826 	}
827 
828 	if (lmac->last_link != lmac->link_up) {
829 		lmac->last_link = lmac->link_up;
830 		if (lmac->link_up)
831 			bgx_xaui_check_link(lmac);
832 	}
833 
834 	callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
835 }
836 
837 static int
838 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
839 {
840 	uint64_t __unused dmac_bcast = (1UL << 48) - 1;
841 	struct lmac *lmac;
842 	uint64_t cfg;
843 
844 	lmac = &bgx->lmac[lmacid];
845 	lmac->bgx = bgx;
846 
847 	if (bgx->lmac_type == BGX_MODE_SGMII) {
848 		lmac->is_sgmii = 1;
849 		if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
850 			return -1;
851 	} else {
852 		lmac->is_sgmii = 0;
853 		if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
854 			return -1;
855 	}
856 
857 	if (lmac->is_sgmii) {
858 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
859 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
860 		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
861 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
862 	} else {
863 		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
864 		cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
865 		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
866 		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
867 	}
868 
869 	/* Enable lmac */
870 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
871 		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
872 
873 	/* Restore default cfg, incase low level firmware changed it */
874 	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
875 
876 	/* Add broadcast MAC into all LMAC's DMAC filters */
877 	bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
878 
879 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
880 	    (bgx->lmac_type != BGX_MODE_XAUI) &&
881 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
882 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
883 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
884 		if (lmac->phy_if_dev == NULL) {
885 			device_printf(bgx->dev,
886 			    "LMAC%d missing interface to PHY\n", lmacid);
887 			return (ENXIO);
888 		}
889 
890 		if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
891 		    lmacid) != 0) {
892 			device_printf(bgx->dev,
893 			    "LMAC%d could not connect to PHY\n", lmacid);
894 			return (ENXIO);
895 		}
896 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
897 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
898 		mtx_lock(&lmac->check_link_mtx);
899 		bgx_lmac_handler(lmac);
900 		mtx_unlock(&lmac->check_link_mtx);
901 	} else {
902 		mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
903 		callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
904 		mtx_lock(&lmac->check_link_mtx);
905 		bgx_poll_for_link(lmac);
906 		mtx_unlock(&lmac->check_link_mtx);
907 	}
908 
909 	return (0);
910 }
911 
912 static void
913 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
914 {
915 	struct lmac *lmac;
916 	uint64_t cmrx_cfg;
917 
918 	lmac = &bgx->lmac[lmacid];
919 
920 	/* Stop callout */
921 	callout_drain(&lmac->check_link);
922 	mtx_destroy(&lmac->check_link_mtx);
923 
924 	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
925 	cmrx_cfg &= ~(1 << 15);
926 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
927 	bgx_flush_dmac_addrs(bgx, lmacid);
928 
929 	if ((bgx->lmac_type != BGX_MODE_XFI) &&
930 	    (bgx->lmac_type != BGX_MODE_XLAUI) &&
931 	    (bgx->lmac_type != BGX_MODE_40G_KR) &&
932 	    (bgx->lmac_type != BGX_MODE_10G_KR)) {
933 		if (lmac->phy_if_dev == NULL) {
934 			device_printf(bgx->dev,
935 			    "LMAC%d missing interface to PHY\n", lmacid);
936 			return;
937 		}
938 		if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
939 		    lmacid) != 0) {
940 			device_printf(bgx->dev,
941 			    "LMAC%d could not disconnect PHY\n", lmacid);
942 			return;
943 		}
944 		lmac->phy_if_dev = NULL;
945 	}
946 }
947 
948 static void
949 bgx_set_num_ports(struct bgx *bgx)
950 {
951 	uint64_t lmac_count;
952 
953 	switch (bgx->qlm_mode) {
954 	case QLM_MODE_SGMII:
955 		bgx->lmac_count = 4;
956 		bgx->lmac_type = BGX_MODE_SGMII;
957 		bgx->lane_to_sds = 0;
958 		break;
959 	case QLM_MODE_XAUI_1X4:
960 		bgx->lmac_count = 1;
961 		bgx->lmac_type = BGX_MODE_XAUI;
962 		bgx->lane_to_sds = 0xE4;
963 			break;
964 	case QLM_MODE_RXAUI_2X2:
965 		bgx->lmac_count = 2;
966 		bgx->lmac_type = BGX_MODE_RXAUI;
967 		bgx->lane_to_sds = 0xE4;
968 			break;
969 	case QLM_MODE_XFI_4X1:
970 		bgx->lmac_count = 4;
971 		bgx->lmac_type = BGX_MODE_XFI;
972 		bgx->lane_to_sds = 0;
973 		break;
974 	case QLM_MODE_XLAUI_1X4:
975 		bgx->lmac_count = 1;
976 		bgx->lmac_type = BGX_MODE_XLAUI;
977 		bgx->lane_to_sds = 0xE4;
978 		break;
979 	case QLM_MODE_10G_KR_4X1:
980 		bgx->lmac_count = 4;
981 		bgx->lmac_type = BGX_MODE_10G_KR;
982 		bgx->lane_to_sds = 0;
983 		bgx->use_training = 1;
984 		break;
985 	case QLM_MODE_40G_KR4_1X4:
986 		bgx->lmac_count = 1;
987 		bgx->lmac_type = BGX_MODE_40G_KR;
988 		bgx->lane_to_sds = 0xE4;
989 		bgx->use_training = 1;
990 		break;
991 	default:
992 		bgx->lmac_count = 0;
993 		break;
994 	}
995 
996 	/*
997 	 * Check if low level firmware has programmed LMAC count
998 	 * based on board type, if yes consider that otherwise
999 	 * the default static values
1000 	 */
1001 	lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
1002 	if (lmac_count != 4)
1003 		bgx->lmac_count = lmac_count;
1004 }
1005 
1006 static void
1007 bgx_init_hw(struct bgx *bgx)
1008 {
1009 	int i;
1010 
1011 	bgx_set_num_ports(bgx);
1012 
1013 	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1014 	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1015 		device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1016 
1017 	/* Set lmac type and lane2serdes mapping */
1018 	for (i = 0; i < bgx->lmac_count; i++) {
1019 		if (bgx->lmac_type == BGX_MODE_RXAUI) {
1020 			if (i)
1021 				bgx->lane_to_sds = 0x0e;
1022 			else
1023 				bgx->lane_to_sds = 0x04;
1024 			bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1025 			    (bgx->lmac_type << 8) | bgx->lane_to_sds);
1026 			continue;
1027 		}
1028 		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1029 		    (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1030 		bgx->lmac[i].lmacid_bd = lmac_count;
1031 		lmac_count++;
1032 	}
1033 
1034 	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1035 	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1036 
1037 	/* Set the backpressure AND mask */
1038 	for (i = 0; i < bgx->lmac_count; i++) {
1039 		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1040 		    ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1041 		    (i * MAX_BGX_CHANS_PER_LMAC));
1042 	}
1043 
1044 	/* Disable all MAC filtering */
1045 	for (i = 0; i < RX_DMAC_COUNT; i++)
1046 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1047 
1048 	/* Disable MAC steering (NCSI traffic) */
1049 	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1050 		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1051 }
1052 
1053 static void
1054 bgx_get_qlm_mode(struct bgx *bgx)
1055 {
1056 	device_t dev = bgx->dev;;
1057 	int lmac_type;
1058 	int train_en;
1059 
1060 	/* Read LMAC0 type to figure out QLM mode
1061 	 * This is configured by low level firmware
1062 	 */
1063 	lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1064 	lmac_type = (lmac_type >> 8) & 0x07;
1065 
1066 	train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1067 	    SPU_PMD_CRTL_TRAIN_EN;
1068 
1069 	switch (lmac_type) {
1070 	case BGX_MODE_SGMII:
1071 		bgx->qlm_mode = QLM_MODE_SGMII;
1072 		if (bootverbose) {
1073 			device_printf(dev, "BGX%d QLM mode: SGMII\n",
1074 			    bgx->bgx_id);
1075 		}
1076 		break;
1077 	case BGX_MODE_XAUI:
1078 		bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1079 		if (bootverbose) {
1080 			device_printf(dev, "BGX%d QLM mode: XAUI\n",
1081 			    bgx->bgx_id);
1082 		}
1083 		break;
1084 	case BGX_MODE_RXAUI:
1085 		bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1086 		if (bootverbose) {
1087 			device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1088 			    bgx->bgx_id);
1089 		}
1090 		break;
1091 	case BGX_MODE_XFI:
1092 		if (!train_en) {
1093 			bgx->qlm_mode = QLM_MODE_XFI_4X1;
1094 			if (bootverbose) {
1095 				device_printf(dev, "BGX%d QLM mode: XFI\n",
1096 				    bgx->bgx_id);
1097 			}
1098 		} else {
1099 			bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1100 			if (bootverbose) {
1101 				device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1102 				    bgx->bgx_id);
1103 			}
1104 		}
1105 		break;
1106 	case BGX_MODE_XLAUI:
1107 		if (!train_en) {
1108 			bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1109 			if (bootverbose) {
1110 				device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1111 				    bgx->bgx_id);
1112 			}
1113 		} else {
1114 			bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1115 			if (bootverbose) {
1116 				device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1117 				    bgx->bgx_id);
1118 			}
1119 		}
1120 		break;
1121 	default:
1122 		bgx->qlm_mode = QLM_MODE_SGMII;
1123 		if (bootverbose) {
1124 			device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1125 			    bgx->bgx_id);
1126 		}
1127 	}
1128 }
1129 
1130 static int
1131 bgx_init_phy(struct bgx *bgx)
1132 {
1133 	int err;
1134 
1135 	/* By default we fail */
1136 	err = ENXIO;
1137 #ifdef FDT
1138 	err = bgx_fdt_init_phy(bgx);
1139 #endif
1140 #ifdef ACPI
1141 	if (err != 0) {
1142 		/* ARM64TODO: Add ACPI function here */
1143 	}
1144 #endif
1145 	return (err);
1146 }
1147