1 /* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 */ 29 #include "opt_platform.h" 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bitset.h> 37 #include <sys/bitstring.h> 38 #include <sys/bus.h> 39 #include <sys/endian.h> 40 #include <sys/kernel.h> 41 #include <sys/malloc.h> 42 #include <sys/module.h> 43 #include <sys/rman.h> 44 #include <sys/pciio.h> 45 #include <sys/pcpu.h> 46 #include <sys/proc.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/cpuset.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_media.h> 56 57 #include <machine/bus.h> 58 59 #include <dev/pci/pcireg.h> 60 #include <dev/pci/pcivar.h> 61 62 #include "thunder_bgx.h" 63 #include "thunder_bgx_var.h" 64 #include "nic_reg.h" 65 #include "nic.h" 66 67 #include "lmac_if.h" 68 69 #define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface" 70 71 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory"); 72 73 #define BGX_NODE_ID_MASK 0x1 74 #define BGX_NODE_ID_SHIFT 24 75 76 #define DRV_NAME "thunder-BGX" 77 #define DRV_VERSION "1.0" 78 79 static int bgx_init_phy(struct bgx *); 80 81 static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; 82 static int lmac_count __unused; /* Total no of LMACs in system */ 83 84 static int bgx_xaui_check_link(struct lmac *lmac); 85 static void bgx_get_qlm_mode(struct bgx *); 86 static void bgx_init_hw(struct bgx *); 87 static int bgx_lmac_enable(struct bgx *, uint8_t); 88 static void bgx_lmac_disable(struct bgx *, uint8_t); 89 90 static int thunder_bgx_probe(device_t); 91 static int thunder_bgx_attach(device_t); 92 static int thunder_bgx_detach(device_t); 93 94 static device_method_t thunder_bgx_methods[] = { 95 /* Device interface */ 96 DEVMETHOD(device_probe, thunder_bgx_probe), 97 DEVMETHOD(device_attach, thunder_bgx_attach), 98 DEVMETHOD(device_detach, thunder_bgx_detach), 99 100 DEVMETHOD_END, 101 }; 102 103 static driver_t thunder_bgx_driver = { 104 "bgx", 105 thunder_bgx_methods, 106 sizeof(struct lmac), 107 }; 108 109 static devclass_t thunder_bgx_devclass; 110 111 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0); 112 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1); 113 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1); 114 MODULE_DEPEND(thunder_bgx, octeon_mdio, 1, 1, 1); 115 116 static int 117 thunder_bgx_probe(device_t dev) 118 { 119 uint16_t vendor_id; 120 uint16_t device_id; 121 122 vendor_id = pci_get_vendor(dev); 123 device_id = pci_get_device(dev); 124 125 if (vendor_id == PCI_VENDOR_ID_CAVIUM && 126 device_id == PCI_DEVICE_ID_THUNDER_BGX) { 127 device_set_desc(dev, THUNDER_BGX_DEVSTR); 128 return (BUS_PROBE_DEFAULT); 129 } 130 131 return (ENXIO); 132 } 133 134 static int 135 thunder_bgx_attach(device_t dev) 136 { 137 struct bgx *bgx; 138 uint8_t lmac; 139 int err; 140 int rid; 141 142 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO)); 143 bgx->dev = dev; 144 /* Enable bus mastering */ 145 pci_enable_busmaster(dev); 146 /* Allocate resources - configuration registers */ 147 rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM); 148 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 149 RF_ACTIVE); 150 if (bgx->reg_base == NULL) { 151 device_printf(dev, "Could not allocate CSR memory space\n"); 152 err = ENXIO; 153 goto err_disable_device; 154 } 155 156 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) & 157 BGX_NODE_ID_MASK; 158 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX; 159 160 bgx_vnic[bgx->bgx_id] = bgx; 161 bgx_get_qlm_mode(bgx); 162 163 err = bgx_init_phy(bgx); 164 if (err != 0) 165 goto err_free_res; 166 167 bgx_init_hw(bgx); 168 169 /* Enable all LMACs */ 170 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 171 err = bgx_lmac_enable(bgx, lmac); 172 if (err) { 173 device_printf(dev, "BGX%d failed to enable lmac%d\n", 174 bgx->bgx_id, lmac); 175 goto err_free_res; 176 } 177 } 178 179 return (0); 180 181 err_free_res: 182 bgx_vnic[bgx->bgx_id] = NULL; 183 bus_release_resource(dev, SYS_RES_MEMORY, 184 rman_get_rid(bgx->reg_base), bgx->reg_base); 185 err_disable_device: 186 free(bgx, M_BGX); 187 pci_disable_busmaster(dev); 188 189 return (err); 190 } 191 192 static int 193 thunder_bgx_detach(device_t dev) 194 { 195 struct lmac *lmac; 196 struct bgx *bgx; 197 uint8_t lmacid; 198 199 lmac = device_get_softc(dev); 200 bgx = lmac->bgx; 201 /* Disable all LMACs */ 202 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) 203 bgx_lmac_disable(bgx, lmacid); 204 205 return (0); 206 } 207 208 /* Register read/write APIs */ 209 static uint64_t 210 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset) 211 { 212 bus_space_handle_t addr; 213 214 addr = ((uint32_t)lmac << 20) + offset; 215 216 return (bus_read_8(bgx->reg_base, addr)); 217 } 218 219 static void 220 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val) 221 { 222 bus_space_handle_t addr; 223 224 addr = ((uint32_t)lmac << 20) + offset; 225 226 bus_write_8(bgx->reg_base, addr, val); 227 } 228 229 static void 230 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val) 231 { 232 bus_space_handle_t addr; 233 234 addr = ((uint32_t)lmac << 20) + offset; 235 236 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr)); 237 } 238 239 static int 240 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask, 241 boolean_t zero) 242 { 243 int timeout = 10; 244 uint64_t reg_val; 245 246 while (timeout) { 247 reg_val = bgx_reg_read(bgx, lmac, reg); 248 if (zero && !(reg_val & mask)) 249 return (0); 250 if (!zero && (reg_val & mask)) 251 return (0); 252 253 DELAY(100); 254 timeout--; 255 } 256 return (ETIMEDOUT); 257 } 258 259 /* Return number of BGX present in HW */ 260 u_int 261 bgx_get_map(int node) 262 { 263 int i; 264 u_int map = 0; 265 266 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { 267 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) 268 map |= (1 << i); 269 } 270 271 return (map); 272 } 273 274 /* Return number of LMAC configured for this BGX */ 275 int 276 bgx_get_lmac_count(int node, int bgx_idx) 277 { 278 struct bgx *bgx; 279 280 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 281 if (bgx != NULL) 282 return (bgx->lmac_count); 283 284 return (0); 285 } 286 287 /* Returns the current link status of LMAC */ 288 void 289 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) 290 { 291 struct bgx_link_status *link = (struct bgx_link_status *)status; 292 struct bgx *bgx; 293 struct lmac *lmac; 294 295 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 296 if (bgx == NULL) 297 return; 298 299 lmac = &bgx->lmac[lmacid]; 300 link->link_up = lmac->link_up; 301 link->duplex = lmac->last_duplex; 302 link->speed = lmac->last_speed; 303 } 304 305 const uint8_t 306 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 307 { 308 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 309 310 if (bgx != NULL) 311 return (bgx->lmac[lmacid].mac); 312 313 return (NULL); 314 } 315 316 void 317 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac) 318 { 319 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 320 321 if (bgx == NULL) 322 return; 323 324 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN); 325 } 326 327 static void 328 bgx_sgmii_change_link_state(struct lmac *lmac) 329 { 330 struct bgx *bgx = lmac->bgx; 331 uint64_t cmr_cfg; 332 uint64_t port_cfg = 0; 333 uint64_t misc_ctl = 0; 334 335 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 336 cmr_cfg &= ~CMR_EN; 337 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 338 339 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 340 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 341 342 if (lmac->link_up) { 343 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; 344 port_cfg &= ~GMI_PORT_CFG_DUPLEX; 345 port_cfg |= (lmac->last_duplex << 2); 346 } else { 347 misc_ctl |= PCS_MISC_CTL_GMX_ENO; 348 } 349 350 switch (lmac->last_speed) { 351 case 10: 352 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 353 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ 354 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 355 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 356 misc_ctl |= 50; /* samp_pt */ 357 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 358 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 359 break; 360 case 100: 361 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 362 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 363 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 364 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 365 misc_ctl |= 5; /* samp_pt */ 366 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 367 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 368 break; 369 case 1000: 370 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ 371 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 372 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ 373 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 374 misc_ctl |= 1; /* samp_pt */ 375 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); 376 if (lmac->last_duplex) 377 bgx_reg_write(bgx, lmac->lmacid, 378 BGX_GMP_GMI_TXX_BURST, 0); 379 else 380 bgx_reg_write(bgx, lmac->lmacid, 381 BGX_GMP_GMI_TXX_BURST, 8192); 382 break; 383 default: 384 break; 385 } 386 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 387 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 388 389 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 390 391 /* renable lmac */ 392 cmr_cfg |= CMR_EN; 393 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 394 } 395 396 static void 397 bgx_lmac_handler(void *arg) 398 { 399 struct lmac *lmac; 400 int link, duplex, speed; 401 int link_changed = 0; 402 int err; 403 404 lmac = (struct lmac *)arg; 405 406 err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid, 407 &link, &duplex, &speed); 408 if (err != 0) 409 goto out; 410 411 if (!link && lmac->last_link) 412 link_changed = -1; 413 414 if (link && 415 (lmac->last_duplex != duplex || 416 lmac->last_link != link || 417 lmac->last_speed != speed)) { 418 link_changed = 1; 419 } 420 421 lmac->last_link = link; 422 lmac->last_speed = speed; 423 lmac->last_duplex = duplex; 424 425 if (!link_changed) 426 goto out; 427 428 if (link_changed > 0) 429 lmac->link_up = true; 430 else 431 lmac->link_up = false; 432 433 if (lmac->is_sgmii) 434 bgx_sgmii_change_link_state(lmac); 435 else 436 bgx_xaui_check_link(lmac); 437 438 out: 439 callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac); 440 } 441 442 uint64_t 443 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) 444 { 445 struct bgx *bgx; 446 447 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 448 if (bgx == NULL) 449 return (0); 450 451 if (idx > 8) 452 lmac = (0); 453 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8))); 454 } 455 456 uint64_t 457 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) 458 { 459 struct bgx *bgx; 460 461 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 462 if (bgx == NULL) 463 return (0); 464 465 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8))); 466 } 467 468 static void 469 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) 470 { 471 uint64_t offset; 472 473 while (bgx->lmac[lmac].dmac > 0) { 474 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) + 475 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t)); 476 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); 477 bgx->lmac[lmac].dmac--; 478 } 479 } 480 481 void 482 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac) 483 { 484 uint64_t offset; 485 struct bgx *bgx; 486 487 #ifdef BGX_IN_PROMISCUOUS_MODE 488 return; 489 #endif 490 491 bgx_idx += node * MAX_BGX_PER_CN88XX; 492 bgx = bgx_vnic[bgx_idx]; 493 494 if (!bgx) { 495 device_printf(bgx->dev, 496 "BGX%d not yet initialized, ignoring DMAC addition\n", 497 bgx_idx); 498 return; 499 } 500 501 dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */ 502 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) { 503 device_printf(bgx->dev, 504 "Max DMAC filters for LMAC%d reached, ignoring\n", 505 lmac); 506 return; 507 } 508 509 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE) 510 bgx->lmac[lmac].dmac = 1; 511 512 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) + 513 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t)); 514 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac); 515 bgx->lmac[lmac].dmac++; 516 517 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL, 518 (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) | 519 (BCAST_ACCEPT << 0)); 520 } 521 522 /* Configure BGX LMAC in internal loopback mode */ 523 void 524 bgx_lmac_internal_loopback(int node, int bgx_idx, 525 int lmac_idx, boolean_t enable) 526 { 527 struct bgx *bgx; 528 struct lmac *lmac; 529 uint64_t cfg; 530 531 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 532 if (bgx == NULL) 533 return; 534 535 lmac = &bgx->lmac[lmac_idx]; 536 if (lmac->is_sgmii) { 537 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); 538 if (enable) 539 cfg |= PCS_MRX_CTL_LOOPBACK1; 540 else 541 cfg &= ~PCS_MRX_CTL_LOOPBACK1; 542 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); 543 } else { 544 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); 545 if (enable) 546 cfg |= SPU_CTL_LOOPBACK; 547 else 548 cfg &= ~SPU_CTL_LOOPBACK; 549 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); 550 } 551 } 552 553 static int 554 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) 555 { 556 uint64_t cfg; 557 558 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); 559 /* max packet size */ 560 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); 561 562 /* Disable frame alignment if using preamble */ 563 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 564 if (cfg & 1) 565 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); 566 567 /* Enable lmac */ 568 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 569 570 /* PCS reset */ 571 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); 572 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, 573 PCS_MRX_CTL_RESET, TRUE) != 0) { 574 device_printf(bgx->dev, "BGX PCS reset not completed\n"); 575 return (ENXIO); 576 } 577 578 /* power down, reset autoneg, autoneg enable */ 579 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 580 cfg &= ~PCS_MRX_CTL_PWR_DN; 581 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 582 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 583 584 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 585 PCS_MRX_STATUS_AN_CPT, FALSE) != 0) { 586 device_printf(bgx->dev, "BGX AN_CPT not completed\n"); 587 return (ENXIO); 588 } 589 590 return (0); 591 } 592 593 static int 594 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) 595 { 596 uint64_t cfg; 597 598 /* Reset SPU */ 599 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); 600 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, 601 SPU_CTL_RESET, TRUE) != 0) { 602 device_printf(bgx->dev, "BGX SPU reset not completed\n"); 603 return (ENXIO); 604 } 605 606 /* Disable LMAC */ 607 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 608 cfg &= ~CMR_EN; 609 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 610 611 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 612 /* Set interleaved running disparity for RXAUI */ 613 if (bgx->lmac_type != BGX_MODE_RXAUI) { 614 bgx_reg_modify(bgx, lmacid, 615 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 616 } else { 617 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, 618 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); 619 } 620 621 /* clear all interrupts */ 622 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); 623 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); 624 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); 625 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); 626 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 627 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 628 629 if (bgx->use_training) { 630 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); 631 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); 632 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); 633 /* training enable */ 634 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, 635 SPU_PMD_CRTL_TRAIN_EN); 636 } 637 638 /* Append FCS to each packet */ 639 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); 640 641 /* Disable forward error correction */ 642 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); 643 cfg &= ~SPU_FEC_CTL_FEC_EN; 644 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); 645 646 /* Disable autoneg */ 647 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); 648 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); 649 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); 650 651 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); 652 if (bgx->lmac_type == BGX_MODE_10G_KR) 653 cfg |= (1 << 23); 654 else if (bgx->lmac_type == BGX_MODE_40G_KR) 655 cfg |= (1 << 24); 656 else 657 cfg &= ~((1 << 23) | (1 << 24)); 658 cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12))); 659 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); 660 661 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); 662 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; 663 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); 664 665 /* Enable lmac */ 666 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 667 668 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); 669 cfg &= ~SPU_CTL_LOW_POWER; 670 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); 671 672 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); 673 cfg &= ~SMU_TX_CTL_UNI_EN; 674 cfg |= SMU_TX_CTL_DIC_EN; 675 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); 676 677 /* take lmac_count into account */ 678 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); 679 /* max packet size */ 680 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); 681 682 return (0); 683 } 684 685 static int 686 bgx_xaui_check_link(struct lmac *lmac) 687 { 688 struct bgx *bgx = lmac->bgx; 689 int lmacid = lmac->lmacid; 690 int lmac_type = bgx->lmac_type; 691 uint64_t cfg; 692 693 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 694 if (bgx->use_training) { 695 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 696 if ((cfg & (1UL << 13)) == 0) { 697 cfg = (1UL << 13) | (1UL << 14); 698 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 699 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); 700 cfg |= (1UL << 0); 701 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); 702 return (ENXIO); 703 } 704 } 705 706 /* wait for PCS to come out of reset */ 707 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, 708 SPU_CTL_RESET, TRUE) != 0) { 709 device_printf(bgx->dev, "BGX SPU reset not completed\n"); 710 return (ENXIO); 711 } 712 713 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || 714 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { 715 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, 716 SPU_BR_STATUS_BLK_LOCK, FALSE)) { 717 device_printf(bgx->dev, 718 "SPU_BR_STATUS_BLK_LOCK not completed\n"); 719 return (ENXIO); 720 } 721 } else { 722 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, 723 SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) { 724 device_printf(bgx->dev, 725 "SPU_BX_STATUS_RX_ALIGN not completed\n"); 726 return (ENXIO); 727 } 728 } 729 730 /* Clear rcvflt bit (latching high) and read it back */ 731 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 732 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 733 device_printf(bgx->dev, "Receive fault, retry training\n"); 734 if (bgx->use_training) { 735 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 736 if ((cfg & (1UL << 13)) == 0) { 737 cfg = (1UL << 13) | (1UL << 14); 738 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 739 cfg = bgx_reg_read(bgx, lmacid, 740 BGX_SPUX_BR_PMD_CRTL); 741 cfg |= (1UL << 0); 742 bgx_reg_write(bgx, lmacid, 743 BGX_SPUX_BR_PMD_CRTL, cfg); 744 return (ENXIO); 745 } 746 } 747 return (ENXIO); 748 } 749 750 /* Wait for MAC RX to be ready */ 751 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, 752 SMU_RX_CTL_STATUS, TRUE) != 0) { 753 device_printf(bgx->dev, "SMU RX link not okay\n"); 754 return (ENXIO); 755 } 756 757 /* Wait for BGX RX to be idle */ 758 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, 759 SMU_CTL_RX_IDLE, FALSE) != 0) { 760 device_printf(bgx->dev, "SMU RX not idle\n"); 761 return (ENXIO); 762 } 763 764 /* Wait for BGX TX to be idle */ 765 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, 766 SMU_CTL_TX_IDLE, FALSE) != 0) { 767 device_printf(bgx->dev, "SMU TX not idle\n"); 768 return (ENXIO); 769 } 770 771 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & 772 SPU_STATUS2_RCVFLT) != 0) { 773 device_printf(bgx->dev, "Receive fault\n"); 774 return (ENXIO); 775 } 776 777 /* Receive link is latching low. Force it high and verify it */ 778 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 779 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, 780 SPU_STATUS1_RCV_LNK, FALSE) != 0) { 781 device_printf(bgx->dev, "SPU receive link down\n"); 782 return (ENXIO); 783 } 784 785 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 786 cfg &= ~SPU_MISC_CTL_RX_DIS; 787 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 788 return (0); 789 } 790 791 static void 792 bgx_poll_for_link(void *arg) 793 { 794 struct lmac *lmac; 795 uint64_t link; 796 797 lmac = (struct lmac *)arg; 798 799 /* Receive link is latching low. Force it high and verify it */ 800 bgx_reg_modify(lmac->bgx, lmac->lmacid, 801 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 802 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 803 SPU_STATUS1_RCV_LNK, false); 804 805 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 806 if (link & SPU_STATUS1_RCV_LNK) { 807 lmac->link_up = 1; 808 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) 809 lmac->last_speed = 40000; 810 else 811 lmac->last_speed = 10000; 812 lmac->last_duplex = 1; 813 } else { 814 lmac->link_up = 0; 815 } 816 817 if (lmac->last_link != lmac->link_up) { 818 lmac->last_link = lmac->link_up; 819 if (lmac->link_up) 820 bgx_xaui_check_link(lmac); 821 } 822 823 callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac); 824 } 825 826 static int 827 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid) 828 { 829 uint64_t __unused dmac_bcast = (1UL << 48) - 1; 830 struct lmac *lmac; 831 uint64_t cfg; 832 833 lmac = &bgx->lmac[lmacid]; 834 lmac->bgx = bgx; 835 836 if (bgx->lmac_type == BGX_MODE_SGMII) { 837 lmac->is_sgmii = 1; 838 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0) 839 return -1; 840 } else { 841 lmac->is_sgmii = 0; 842 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) 843 return -1; 844 } 845 846 if (lmac->is_sgmii) { 847 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 848 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */ 849 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); 850 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); 851 } else { 852 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); 853 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */ 854 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); 855 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 856 } 857 858 /* Enable lmac */ 859 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, 860 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); 861 862 /* Restore default cfg, incase low level firmware changed it */ 863 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 864 865 /* Add broadcast MAC into all LMAC's DMAC filters */ 866 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid); 867 868 if ((bgx->lmac_type != BGX_MODE_XFI) && 869 (bgx->lmac_type != BGX_MODE_XAUI) && 870 (bgx->lmac_type != BGX_MODE_XLAUI) && 871 (bgx->lmac_type != BGX_MODE_40G_KR) && 872 (bgx->lmac_type != BGX_MODE_10G_KR)) { 873 if (lmac->phy_if_dev == NULL) { 874 device_printf(bgx->dev, 875 "LMAC%d missing interface to PHY\n", lmacid); 876 return (ENXIO); 877 } 878 879 if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr, 880 lmacid) != 0) { 881 device_printf(bgx->dev, 882 "LMAC%d could not connect to PHY\n", lmacid); 883 return (ENXIO); 884 } 885 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF); 886 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0); 887 mtx_lock(&lmac->check_link_mtx); 888 bgx_lmac_handler(lmac); 889 mtx_unlock(&lmac->check_link_mtx); 890 } else { 891 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF); 892 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0); 893 mtx_lock(&lmac->check_link_mtx); 894 bgx_poll_for_link(lmac); 895 mtx_unlock(&lmac->check_link_mtx); 896 } 897 898 return (0); 899 } 900 901 static void 902 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid) 903 { 904 struct lmac *lmac; 905 uint64_t cmrx_cfg; 906 907 lmac = &bgx->lmac[lmacid]; 908 909 /* Stop callout */ 910 callout_drain(&lmac->check_link); 911 mtx_destroy(&lmac->check_link_mtx); 912 913 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 914 cmrx_cfg &= ~(1 << 15); 915 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 916 bgx_flush_dmac_addrs(bgx, lmacid); 917 918 if ((bgx->lmac_type != BGX_MODE_XFI) && 919 (bgx->lmac_type != BGX_MODE_XLAUI) && 920 (bgx->lmac_type != BGX_MODE_40G_KR) && 921 (bgx->lmac_type != BGX_MODE_10G_KR)) { 922 if (lmac->phy_if_dev == NULL) { 923 device_printf(bgx->dev, 924 "LMAC%d missing interface to PHY\n", lmacid); 925 return; 926 } 927 if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr, 928 lmacid) != 0) { 929 device_printf(bgx->dev, 930 "LMAC%d could not disconnect PHY\n", lmacid); 931 return; 932 } 933 lmac->phy_if_dev = NULL; 934 } 935 } 936 937 static void 938 bgx_set_num_ports(struct bgx *bgx) 939 { 940 uint64_t lmac_count; 941 942 switch (bgx->qlm_mode) { 943 case QLM_MODE_SGMII: 944 bgx->lmac_count = 4; 945 bgx->lmac_type = BGX_MODE_SGMII; 946 bgx->lane_to_sds = 0; 947 break; 948 case QLM_MODE_XAUI_1X4: 949 bgx->lmac_count = 1; 950 bgx->lmac_type = BGX_MODE_XAUI; 951 bgx->lane_to_sds = 0xE4; 952 break; 953 case QLM_MODE_RXAUI_2X2: 954 bgx->lmac_count = 2; 955 bgx->lmac_type = BGX_MODE_RXAUI; 956 bgx->lane_to_sds = 0xE4; 957 break; 958 case QLM_MODE_XFI_4X1: 959 bgx->lmac_count = 4; 960 bgx->lmac_type = BGX_MODE_XFI; 961 bgx->lane_to_sds = 0; 962 break; 963 case QLM_MODE_XLAUI_1X4: 964 bgx->lmac_count = 1; 965 bgx->lmac_type = BGX_MODE_XLAUI; 966 bgx->lane_to_sds = 0xE4; 967 break; 968 case QLM_MODE_10G_KR_4X1: 969 bgx->lmac_count = 4; 970 bgx->lmac_type = BGX_MODE_10G_KR; 971 bgx->lane_to_sds = 0; 972 bgx->use_training = 1; 973 break; 974 case QLM_MODE_40G_KR4_1X4: 975 bgx->lmac_count = 1; 976 bgx->lmac_type = BGX_MODE_40G_KR; 977 bgx->lane_to_sds = 0xE4; 978 bgx->use_training = 1; 979 break; 980 default: 981 bgx->lmac_count = 0; 982 break; 983 } 984 985 /* 986 * Check if low level firmware has programmed LMAC count 987 * based on board type, if yes consider that otherwise 988 * the default static values 989 */ 990 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; 991 if (lmac_count != 4) 992 bgx->lmac_count = lmac_count; 993 } 994 995 static void 996 bgx_init_hw(struct bgx *bgx) 997 { 998 int i; 999 1000 bgx_set_num_ports(bgx); 1001 1002 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); 1003 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) 1004 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id); 1005 1006 /* Set lmac type and lane2serdes mapping */ 1007 for (i = 0; i < bgx->lmac_count; i++) { 1008 if (bgx->lmac_type == BGX_MODE_RXAUI) { 1009 if (i) 1010 bgx->lane_to_sds = 0x0e; 1011 else 1012 bgx->lane_to_sds = 0x04; 1013 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 1014 (bgx->lmac_type << 8) | bgx->lane_to_sds); 1015 continue; 1016 } 1017 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 1018 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); 1019 bgx->lmac[i].lmacid_bd = lmac_count; 1020 lmac_count++; 1021 } 1022 1023 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); 1024 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); 1025 1026 /* Set the backpressure AND mask */ 1027 for (i = 0; i < bgx->lmac_count; i++) { 1028 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, 1029 ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) << 1030 (i * MAX_BGX_CHANS_PER_LMAC)); 1031 } 1032 1033 /* Disable all MAC filtering */ 1034 for (i = 0; i < RX_DMAC_COUNT; i++) 1035 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); 1036 1037 /* Disable MAC steering (NCSI traffic) */ 1038 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1039 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 1040 } 1041 1042 static void 1043 bgx_get_qlm_mode(struct bgx *bgx) 1044 { 1045 device_t dev = bgx->dev;; 1046 int lmac_type; 1047 int train_en; 1048 1049 /* Read LMAC0 type to figure out QLM mode 1050 * This is configured by low level firmware 1051 */ 1052 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); 1053 lmac_type = (lmac_type >> 8) & 0x07; 1054 1055 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & 1056 SPU_PMD_CRTL_TRAIN_EN; 1057 1058 switch (lmac_type) { 1059 case BGX_MODE_SGMII: 1060 bgx->qlm_mode = QLM_MODE_SGMII; 1061 if (bootverbose) { 1062 device_printf(dev, "BGX%d QLM mode: SGMII\n", 1063 bgx->bgx_id); 1064 } 1065 break; 1066 case BGX_MODE_XAUI: 1067 bgx->qlm_mode = QLM_MODE_XAUI_1X4; 1068 if (bootverbose) { 1069 device_printf(dev, "BGX%d QLM mode: XAUI\n", 1070 bgx->bgx_id); 1071 } 1072 break; 1073 case BGX_MODE_RXAUI: 1074 bgx->qlm_mode = QLM_MODE_RXAUI_2X2; 1075 if (bootverbose) { 1076 device_printf(dev, "BGX%d QLM mode: RXAUI\n", 1077 bgx->bgx_id); 1078 } 1079 break; 1080 case BGX_MODE_XFI: 1081 if (!train_en) { 1082 bgx->qlm_mode = QLM_MODE_XFI_4X1; 1083 if (bootverbose) { 1084 device_printf(dev, "BGX%d QLM mode: XFI\n", 1085 bgx->bgx_id); 1086 } 1087 } else { 1088 bgx->qlm_mode = QLM_MODE_10G_KR_4X1; 1089 if (bootverbose) { 1090 device_printf(dev, "BGX%d QLM mode: 10G_KR\n", 1091 bgx->bgx_id); 1092 } 1093 } 1094 break; 1095 case BGX_MODE_XLAUI: 1096 if (!train_en) { 1097 bgx->qlm_mode = QLM_MODE_XLAUI_1X4; 1098 if (bootverbose) { 1099 device_printf(dev, "BGX%d QLM mode: XLAUI\n", 1100 bgx->bgx_id); 1101 } 1102 } else { 1103 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; 1104 if (bootverbose) { 1105 device_printf(dev, "BGX%d QLM mode: 40G_KR4\n", 1106 bgx->bgx_id); 1107 } 1108 } 1109 break; 1110 default: 1111 bgx->qlm_mode = QLM_MODE_SGMII; 1112 if (bootverbose) { 1113 device_printf(dev, "BGX%d QLM default mode: SGMII\n", 1114 bgx->bgx_id); 1115 } 1116 } 1117 } 1118 1119 static int 1120 bgx_init_phy(struct bgx *bgx) 1121 { 1122 int err; 1123 1124 /* By default we fail */ 1125 err = ENXIO; 1126 #ifdef FDT 1127 err = bgx_fdt_init_phy(bgx); 1128 #endif 1129 #ifdef ACPI 1130 if (err != 0) { 1131 /* ARM64TODO: Add ACPI function here */ 1132 } 1133 #endif 1134 return (err); 1135 } 1136