1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/cmn_err.h> 30 #include <sys/vmsystm.h> 31 #include <sys/vmem.h> 32 #include <sys/machsystm.h> /* lddphys() */ 33 #include <sys/iommutsb.h> 34 #include <sys/pci.h> 35 #include <sys/hotplug/pci/pciehpc.h> 36 #include <pcie_pwr.h> 37 #include <px_obj.h> 38 #include "px_regs.h" 39 #include "oberon_regs.h" 40 #include "px_csr.h" 41 #include "px_lib4u.h" 42 43 /* 44 * Registers that need to be saved and restored during suspend/resume. 45 */ 46 47 /* 48 * Registers in the PEC Module. 49 * LPU_RESET should be set to 0ull during resume 50 * 51 * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips 52 * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only. 53 */ 54 static struct px_pec_regs { 55 uint64_t reg; 56 uint64_t chip; 57 } pec_config_state_regs[] = { 58 {PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 59 {ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 60 {ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 61 {TLU_CONTROL, PX_CHIP_UNIDENTIFIED}, 62 {TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 63 {TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 64 {TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED}, 65 {TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED}, 66 {TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 67 {TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 68 {TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 69 {TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 70 {DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON}, 71 {DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON}, 72 {DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON}, 73 {LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE}, 74 {LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE}, 75 {LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE}, 76 {LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE}, 77 {LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE}, 78 {LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE}, 79 {LPU_RESET, PX_CHIP_FIRE}, 80 {LPU_DEBUG_CONFIG, PX_CHIP_FIRE}, 81 {LPU_INTERRUPT_MASK, PX_CHIP_FIRE}, 82 {LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE}, 83 {LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE}, 84 {LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE}, 85 {LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE}, 86 {LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE}, 87 {LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE}, 88 {LPU_LTSSM_CONFIG2, PX_CHIP_FIRE}, 89 {LPU_LTSSM_CONFIG3, PX_CHIP_FIRE}, 90 {LPU_LTSSM_CONFIG4, PX_CHIP_FIRE}, 91 {LPU_LTSSM_CONFIG5, PX_CHIP_FIRE}, 92 {DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 93 {DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED}, 94 {DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED} 95 }; 96 97 #define PEC_KEYS \ 98 ((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs)) 99 100 #define PEC_SIZE (PEC_KEYS * sizeof (uint64_t)) 101 102 /* 103 * Registers for the MMU module. 104 * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull) 105 */ 106 static uint64_t mmu_config_state_regs[] = { 107 MMU_TSB_CONTROL, 108 MMU_CONTROL_AND_STATUS, 109 MMU_ERROR_LOG_ENABLE, 110 MMU_INTERRUPT_ENABLE 111 }; 112 #define MMU_SIZE (sizeof (mmu_config_state_regs)) 113 #define MMU_KEYS (MMU_SIZE / sizeof (uint64_t)) 114 115 /* 116 * Registers for the IB Module 117 */ 118 static uint64_t ib_config_state_regs[] = { 119 IMU_ERROR_LOG_ENABLE, 120 IMU_INTERRUPT_ENABLE 121 }; 122 #define IB_SIZE (sizeof (ib_config_state_regs)) 123 #define IB_KEYS (IB_SIZE / sizeof (uint64_t)) 124 #define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t)) 125 126 /* 127 * Registers for the JBC module. 128 * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull) 129 */ 130 static uint64_t jbc_config_state_regs[] = { 131 JBUS_PARITY_CONTROL, 132 JBC_FATAL_RESET_ENABLE, 133 JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, 134 JBC_ERROR_LOG_ENABLE, 135 JBC_INTERRUPT_ENABLE 136 }; 137 #define JBC_SIZE (sizeof (jbc_config_state_regs)) 138 #define JBC_KEYS (JBC_SIZE / sizeof (uint64_t)) 139 140 /* 141 * Registers for the UBC module. 142 * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull) 143 */ 144 static uint64_t ubc_config_state_regs[] = { 145 UBC_ERROR_LOG_ENABLE, 146 UBC_INTERRUPT_ENABLE 147 }; 148 #define UBC_SIZE (sizeof (ubc_config_state_regs)) 149 #define UBC_KEYS (UBC_SIZE / sizeof (uint64_t)) 150 151 static uint64_t msiq_config_other_regs[] = { 152 ERR_COR_MAPPING, 153 ERR_NONFATAL_MAPPING, 154 ERR_FATAL_MAPPING, 155 PM_PME_MAPPING, 156 PME_TO_ACK_MAPPING, 157 MSI_32_BIT_ADDRESS, 158 MSI_64_BIT_ADDRESS 159 }; 160 #define MSIQ_OTHER_SIZE (sizeof (msiq_config_other_regs)) 161 #define MSIQ_OTHER_KEYS (MSIQ_OTHER_SIZE / sizeof (uint64_t)) 162 163 #define MSIQ_STATE_SIZE (EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t)) 164 #define MSIQ_MAPPING_SIZE (MSI_MAPPING_ENTRIES * sizeof (uint64_t)) 165 166 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p); 167 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p); 168 static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p); 169 static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p); 170 171 /* 172 * Initialize the bus, but do not enable interrupts. 173 */ 174 /* ARGSUSED */ 175 void 176 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 177 { 178 switch (PX_CHIP_TYPE(pxu_p)) { 179 case PX_CHIP_OBERON: 180 ubc_init(xbc_csr_base, pxu_p); 181 break; 182 case PX_CHIP_FIRE: 183 jbc_init(xbc_csr_base, pxu_p); 184 break; 185 default: 186 DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n", 187 PX_CHIP_TYPE(pxu_p)); 188 break; 189 } 190 } 191 192 /* 193 * Initialize the JBC module, but do not enable interrupts. 194 */ 195 /* ARGSUSED */ 196 static void 197 jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 198 { 199 uint64_t val; 200 201 /* Check if we need to enable inverted parity */ 202 val = (1ULL << JBUS_PARITY_CONTROL_P_EN); 203 CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val); 204 DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n", 205 CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL)); 206 207 val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) | 208 (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) | 209 (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) | 210 (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) | 211 (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) | 212 (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) | 213 (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) | 214 (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN); 215 CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val); 216 DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n", 217 CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE)); 218 219 /* 220 * Enable merge, jbc and dmc interrupts. 221 */ 222 CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull); 223 DBG(DBG_CB, NULL, 224 "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 225 CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 226 227 /* 228 * CSR_V JBC's interrupt regs (log, enable, status, clear) 229 */ 230 DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n", 231 CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE)); 232 233 DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n", 234 CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE)); 235 236 DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n", 237 CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS)); 238 239 DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n", 240 CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR)); 241 } 242 243 /* 244 * Initialize the UBC module, but do not enable interrupts. 245 */ 246 /* ARGSUSED */ 247 static void 248 ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 249 { 250 /* 251 * Enable Uranus bus error log bits. 252 */ 253 CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull); 254 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n", 255 CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE)); 256 257 /* 258 * Clear Uranus bus errors. 259 */ 260 CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull); 261 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n", 262 CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR)); 263 264 /* 265 * CSR_V UBC's interrupt regs (log, enable, status, clear) 266 */ 267 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n", 268 CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE)); 269 270 DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n", 271 CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE)); 272 273 DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n", 274 CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS)); 275 276 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n", 277 CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR)); 278 } 279 280 /* 281 * Initialize the module, but do not enable interrupts. 282 */ 283 /* ARGSUSED */ 284 void 285 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p) 286 { 287 /* 288 * CSR_V IB's interrupt regs (log, enable, status, clear) 289 */ 290 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n", 291 CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE)); 292 293 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n", 294 CSR_XR(csr_base, IMU_INTERRUPT_ENABLE)); 295 296 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n", 297 CSR_XR(csr_base, IMU_INTERRUPT_STATUS)); 298 299 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n", 300 CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR)); 301 } 302 303 /* 304 * Initialize the module, but do not enable interrupts. 305 */ 306 /* ARGSUSED */ 307 static void 308 ilu_init(caddr_t csr_base, pxu_t *pxu_p) 309 { 310 /* 311 * CSR_V ILU's interrupt regs (log, enable, status, clear) 312 */ 313 DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n", 314 CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE)); 315 316 DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n", 317 CSR_XR(csr_base, ILU_INTERRUPT_ENABLE)); 318 319 DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n", 320 CSR_XR(csr_base, ILU_INTERRUPT_STATUS)); 321 322 DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n", 323 CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR)); 324 } 325 326 /* 327 * Initialize the module, but do not enable interrupts. 328 */ 329 /* ARGSUSED */ 330 static void 331 tlu_init(caddr_t csr_base, pxu_t *pxu_p) 332 { 333 uint64_t val; 334 335 /* 336 * CSR_V TLU_CONTROL Expect OBP ??? 337 */ 338 339 /* 340 * L0s entry default timer value - 7.0 us 341 * Completion timeout select default value - 67.1 ms and 342 * OBP will set this value. 343 * 344 * Configuration - Bit 0 should always be 0 for upstream port. 345 * Bit 1 is clock - how is this related to the clock bit in TLU 346 * Link Control register? Both are hardware dependent and likely 347 * set by OBP. 348 * 349 * NOTE: Do not set the NPWR_EN bit. The desired value of this bit 350 * will be set by OBP. 351 */ 352 val = CSR_XR(csr_base, TLU_CONTROL); 353 val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) | 354 TLU_CONTROL_CONFIG_DEFAULT; 355 356 /* 357 * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking 358 * behind non-posted PIO writes. This blocking could cause a master or 359 * slave timeout on the host bus if multiple serialized PIOs were to 360 * suffer Completion Timeouts because the CTO delays for each PIO ahead 361 * of the read would accumulate. Since the Olympus processor can have 362 * only 1 PIO outstanding, there is no possibility of PIO accesses from 363 * a given CPU to a given device being re-ordered by the PCIe fabric; 364 * therefore turning off serialization should be safe from a PCIe 365 * ordering perspective. 366 */ 367 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 368 val &= ~(1ull << TLU_CONTROL_NPWR_EN); 369 370 /* 371 * Set Detect.Quiet. This will disable automatic link 372 * re-training, if the link goes down e.g. power management 373 * turns off power to the downstream device. This will enable 374 * Fire to go to Drain state, after link down. The drain state 375 * forces a reset to the FC state machine, which is required for 376 * proper link re-training. 377 */ 378 val |= (1ull << TLU_REMAIN_DETECT_QUIET); 379 CSR_XS(csr_base, TLU_CONTROL, val); 380 DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n", 381 CSR_XR(csr_base, TLU_CONTROL)); 382 383 /* 384 * CSR_V TLU_STATUS Expect HW 0x4 385 */ 386 387 /* 388 * Only bit [7:0] are currently defined. Bits [2:0] 389 * are the state, which should likely be in state active, 390 * 100b. Bit three is 'recovery', which is not understood. 391 * All other bits are reserved. 392 */ 393 DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n", 394 CSR_XR(csr_base, TLU_STATUS)); 395 396 /* 397 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0 398 */ 399 DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n", 400 CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE)); 401 402 /* 403 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0 404 */ 405 406 /* 407 * Ingress credits initial register. Bits [39:32] should be 408 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should 409 * be 0xC0. These are the reset values, and should be set by 410 * HW. 411 */ 412 DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n", 413 CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL)); 414 415 /* 416 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0 417 */ 418 419 /* 420 * Diagnostic register - always zero unless we are debugging. 421 */ 422 DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n", 423 CSR_XR(csr_base, TLU_DIAGNOSTIC)); 424 425 /* 426 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0 427 */ 428 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n", 429 CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED)); 430 431 /* 432 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0 433 */ 434 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n", 435 CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT)); 436 437 /* 438 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0 439 */ 440 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n", 441 CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER)); 442 443 /* 444 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0 445 */ 446 DBG(DBG_TLU, NULL, 447 "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n", 448 CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED)); 449 450 /* 451 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0 452 */ 453 DBG(DBG_TLU, NULL, 454 "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n", 455 CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED)); 456 457 /* 458 * CSR_V TLU's interrupt regs (log, enable, status, clear) 459 */ 460 DBG(DBG_TLU, NULL, 461 "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n", 462 CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE)); 463 464 DBG(DBG_TLU, NULL, 465 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n", 466 CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE)); 467 468 DBG(DBG_TLU, NULL, 469 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n", 470 CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS)); 471 472 DBG(DBG_TLU, NULL, 473 "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n", 474 CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR)); 475 476 /* 477 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0 478 */ 479 DBG(DBG_TLU, NULL, 480 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n", 481 CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG)); 482 483 /* 484 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0 485 */ 486 DBG(DBG_TLU, NULL, 487 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n", 488 CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG)); 489 490 /* 491 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0 492 */ 493 DBG(DBG_TLU, NULL, 494 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n", 495 CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG)); 496 497 /* 498 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0 499 */ 500 DBG(DBG_TLU, NULL, 501 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n", 502 CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG)); 503 504 /* 505 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0 506 */ 507 DBG(DBG_TLU, NULL, 508 "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n", 509 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT)); 510 511 /* 512 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0 513 */ 514 DBG(DBG_TLU, NULL, 515 "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n", 516 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO)); 517 518 /* 519 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0 520 */ 521 DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n", 522 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE)); 523 524 /* 525 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0 526 */ 527 DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n", 528 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO)); 529 530 /* 531 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0 532 */ 533 534 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n", 535 CSR_XR(csr_base, TLU_DEBUG_SELECT_A)); 536 537 /* 538 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0 539 */ 540 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n", 541 CSR_XR(csr_base, TLU_DEBUG_SELECT_B)); 542 543 /* 544 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2 545 */ 546 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n", 547 CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES)); 548 549 /* 550 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0 551 */ 552 553 /* 554 * Bits [14:12] are the Max Read Request Size, which is always 64 555 * bytes which is 000b. Bits [7:5] are Max Payload Size, which 556 * start at 128 bytes which is 000b. This may be revisited if 557 * init_child finds greater values. 558 */ 559 val = 0x0ull; 560 CSR_XS(csr_base, TLU_DEVICE_CONTROL, val); 561 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n", 562 CSR_XR(csr_base, TLU_DEVICE_CONTROL)); 563 564 /* 565 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0 566 */ 567 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n", 568 CSR_XR(csr_base, TLU_DEVICE_STATUS)); 569 570 /* 571 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81 572 */ 573 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n", 574 CSR_XR(csr_base, TLU_LINK_CAPABILITIES)); 575 576 /* 577 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40 578 */ 579 580 /* 581 * The CLOCK bit should be set by OBP if the hardware dictates, 582 * and if it is set then ASPM should be used since then L0s exit 583 * latency should be lower than L1 exit latency. 584 * 585 * Note that we will not enable power management during bringup 586 * since it has not been test and is creating some problems in 587 * simulation. 588 */ 589 val = (1ull << TLU_LINK_CONTROL_CLOCK); 590 591 CSR_XS(csr_base, TLU_LINK_CONTROL, val); 592 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n", 593 CSR_XR(csr_base, TLU_LINK_CONTROL)); 594 595 /* 596 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011 597 */ 598 599 /* 600 * Not sure if HW or OBP will be setting this read only 601 * register. Bit 12 is Clock, and it should always be 1 602 * signifying that the component uses the same physical 603 * clock as the platform. Bits [9:4] are for the width, 604 * with the expected value above signifying a x1 width. 605 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s, 606 * the only speed as yet supported by the PCI-E spec. 607 */ 608 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n", 609 CSR_XR(csr_base, TLU_LINK_STATUS)); 610 611 /* 612 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ??? 613 */ 614 615 /* 616 * Power Limits for the slots. Will be platform 617 * dependent, and OBP will need to set after consulting 618 * with the HW guys. 619 * 620 * Bits [16:15] are power limit scale, which most likely 621 * will be 0b signifying 1x. Bits [14:7] are the Set 622 * Power Limit Value, which is a number which is multiplied 623 * by the power limit scale to get the actual power limit. 624 */ 625 DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n", 626 CSR_XR(csr_base, TLU_SLOT_CAPABILITIES)); 627 628 /* 629 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011 630 */ 631 DBG(DBG_TLU, NULL, 632 "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n", 633 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE)); 634 635 /* 636 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect 637 * Kernel 0x17F0110017F011 638 */ 639 DBG(DBG_TLU, NULL, 640 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n", 641 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE)); 642 643 /* 644 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0 645 */ 646 DBG(DBG_TLU, NULL, 647 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n", 648 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS)); 649 650 /* 651 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0 652 */ 653 DBG(DBG_TLU, NULL, 654 "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n", 655 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR)); 656 657 /* 658 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0 659 */ 660 DBG(DBG_TLU, NULL, 661 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n", 662 CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG)); 663 664 /* 665 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0 666 */ 667 DBG(DBG_TLU, NULL, 668 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n", 669 CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG)); 670 671 /* 672 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0 673 */ 674 DBG(DBG_TLU, NULL, 675 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n", 676 CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG)); 677 678 /* 679 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0 680 */ 681 DBG(DBG_TLU, NULL, 682 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n", 683 CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG)); 684 685 686 /* 687 * CSR_V TLU's CE interrupt regs (log, enable, status, clear) 688 * Plus header logs 689 */ 690 691 /* 692 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1 693 */ 694 DBG(DBG_TLU, NULL, 695 "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n", 696 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE)); 697 698 /* 699 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1 700 */ 701 DBG(DBG_TLU, NULL, 702 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n", 703 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE)); 704 705 /* 706 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0 707 */ 708 DBG(DBG_TLU, NULL, 709 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n", 710 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS)); 711 712 /* 713 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0 714 */ 715 DBG(DBG_TLU, NULL, 716 "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n", 717 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR)); 718 } 719 720 /* ARGSUSED */ 721 static void 722 lpu_init(caddr_t csr_base, pxu_t *pxu_p) 723 { 724 /* Variables used to set the ACKNAK Latency Timer and Replay Timer */ 725 int link_width, max_payload; 726 727 uint64_t val; 728 729 /* 730 * ACKNAK Latency Threshold Table. 731 * See Fire PRM 2.0 section 1.2.12.2, table 1-17. 732 */ 733 int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = { 734 {0xED, 0x49, 0x43, 0x30}, 735 {0x1A0, 0x76, 0x6B, 0x48}, 736 {0x22F, 0x9A, 0x56, 0x56}, 737 {0x42F, 0x11A, 0x96, 0x96}, 738 {0x82F, 0x21A, 0x116, 0x116}, 739 {0x102F, 0x41A, 0x216, 0x216} 740 }; 741 742 /* 743 * TxLink Replay Timer Latency Table 744 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18. 745 */ 746 int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = { 747 {0x379, 0x112, 0xFC, 0xB4}, 748 {0x618, 0x1BA, 0x192, 0x10E}, 749 {0x831, 0x242, 0x143, 0x143}, 750 {0xFB1, 0x422, 0x233, 0x233}, 751 {0x1EB0, 0x7E1, 0x412, 0x412}, 752 {0x3CB0, 0xF61, 0x7D2, 0x7D2} 753 }; 754 755 /* 756 * Get the Link Width. See table above LINK_WIDTH_ARR_SIZE #define 757 * Only Link Widths of x1, x4, and x8 are supported. 758 * If any width is reported other than x8, set default to x8. 759 */ 760 link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH); 761 DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width); 762 763 /* 764 * Convert link_width to match timer array configuration. 765 */ 766 switch (link_width) { 767 case 1: 768 link_width = 0; 769 break; 770 case 4: 771 link_width = 1; 772 break; 773 case 8: 774 link_width = 2; 775 break; 776 case 16: 777 link_width = 3; 778 break; 779 default: 780 link_width = 0; 781 } 782 783 /* 784 * Get the Max Payload Size. 785 * See table above LINK_MAX_PKT_ARR_SIZE #define 786 */ 787 max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) & 788 TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT); 789 790 DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n", 791 (0x80 << max_payload)); 792 793 /* Make sure the packet size is not greater than 4096 */ 794 max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ? 795 (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload; 796 797 /* 798 * CSR_V LPU_ID Expect HW 0x0 799 */ 800 801 /* 802 * This register has link id, phy id and gigablaze id. 803 * Should be set by HW. 804 */ 805 DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n", 806 CSR_XR(csr_base, LPU_ID)); 807 808 /* 809 * CSR_V LPU_RESET Expect Kernel 0x0 810 */ 811 812 /* 813 * No reason to have any reset bits high until an error is 814 * detected on the link. 815 */ 816 val = 0ull; 817 CSR_XS(csr_base, LPU_RESET, val); 818 DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n", 819 CSR_XR(csr_base, LPU_RESET)); 820 821 /* 822 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0 823 */ 824 825 /* 826 * Bits [15:8] are Debug B, and bit [7:0] are Debug A. 827 * They are read-only. What do the 8 bits mean, and 828 * how do they get set if they are read only? 829 */ 830 DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n", 831 CSR_XR(csr_base, LPU_DEBUG_STATUS)); 832 833 /* 834 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0 835 */ 836 DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n", 837 CSR_XR(csr_base, LPU_DEBUG_CONFIG)); 838 839 /* 840 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0 841 */ 842 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n", 843 CSR_XR(csr_base, LPU_LTSSM_CONTROL)); 844 845 /* 846 * CSR_V LPU_LINK_STATUS Expect HW 0x101 847 */ 848 849 /* 850 * This register has bits [9:4] for link width, and the 851 * default 0x10, means a width of x16. The problem is 852 * this width is not supported according to the TLU 853 * link status register. 854 */ 855 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n", 856 CSR_XR(csr_base, LPU_LINK_STATUS)); 857 858 /* 859 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0 860 */ 861 DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n", 862 CSR_XR(csr_base, LPU_INTERRUPT_STATUS)); 863 864 /* 865 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0 866 */ 867 DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n", 868 CSR_XR(csr_base, LPU_INTERRUPT_MASK)); 869 870 /* 871 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0 872 */ 873 DBG(DBG_LPU, NULL, 874 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n", 875 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT)); 876 877 /* 878 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0 879 */ 880 DBG(DBG_LPU, NULL, 881 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n", 882 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL)); 883 884 /* 885 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0 886 */ 887 DBG(DBG_LPU, NULL, 888 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n", 889 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1)); 890 891 /* 892 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0 893 */ 894 DBG(DBG_LPU, NULL, 895 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n", 896 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST)); 897 898 /* 899 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0 900 */ 901 DBG(DBG_LPU, NULL, 902 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n", 903 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2)); 904 905 /* 906 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0 907 */ 908 DBG(DBG_LPU, NULL, 909 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n", 910 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST)); 911 912 /* 913 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100 914 */ 915 916 /* 917 * This is another place where Max Payload can be set, 918 * this time for the link layer. It will be set to 919 * 128B, which is the default, but this will need to 920 * be revisited. 921 */ 922 val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN); 923 CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val); 924 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n", 925 CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG)); 926 927 /* 928 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5 929 */ 930 931 /* 932 * Another R/W status register. Bit 3, DL up Status, will 933 * be set high. The link state machine status bits [2:0] 934 * are set to 0x1, but the status bits are not defined in the 935 * PRM. What does 0x1 mean, what others values are possible 936 * and what are thier meanings? 937 * 938 * This register has been giving us problems in simulation. 939 * It has been mentioned that software should not program 940 * any registers with WE bits except during debug. So 941 * this register will no longer be programmed. 942 */ 943 944 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n", 945 CSR_XR(csr_base, LPU_LINK_LAYER_STATUS)); 946 947 /* 948 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 949 */ 950 DBG(DBG_LPU, NULL, 951 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 952 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST)); 953 954 /* 955 * CSR_V LPU Link Layer interrupt regs (mask, status) 956 */ 957 DBG(DBG_LPU, NULL, 958 "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n", 959 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK)); 960 961 DBG(DBG_LPU, NULL, 962 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n", 963 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS)); 964 965 /* 966 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7 967 */ 968 969 /* 970 * The PRM says that only the first two bits will be set 971 * high by default, which will enable flow control for 972 * posted and non-posted updates, but NOT completetion 973 * updates. 974 */ 975 val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) | 976 (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN); 977 CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val); 978 DBG(DBG_LPU, NULL, 979 "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n", 980 CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL)); 981 982 /* 983 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE 984 * Expect OBP 0x1D4C 985 */ 986 987 /* 988 * This should be set by OBP. We'll check to make sure. 989 */ 990 DBG(DBG_LPU, NULL, "lpu_init - " 991 "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n", 992 CSR_XR(csr_base, 993 LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE)); 994 995 /* 996 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ??? 997 */ 998 999 /* 1000 * This register has Flow Control Update Timer values for 1001 * non-posted and posted requests, bits [30:16] and bits 1002 * [14:0], respectively. These are read-only to SW so 1003 * either HW or OBP needs to set them. 1004 */ 1005 DBG(DBG_LPU, NULL, "lpu_init - " 1006 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n", 1007 CSR_XR(csr_base, 1008 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0)); 1009 1010 /* 1011 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ??? 1012 */ 1013 1014 /* 1015 * Same as timer0 register above, except for bits [14:0] 1016 * have the timer values for completetions. Read-only to 1017 * SW; OBP or HW need to set it. 1018 */ 1019 DBG(DBG_LPU, NULL, "lpu_init - " 1020 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n", 1021 CSR_XR(csr_base, 1022 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1)); 1023 1024 /* 1025 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD 1026 */ 1027 val = acknak_timer_table[max_payload][link_width]; 1028 CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val); 1029 1030 DBG(DBG_LPU, NULL, "lpu_init - " 1031 "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n", 1032 CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD)); 1033 1034 /* 1035 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0 1036 */ 1037 DBG(DBG_LPU, NULL, 1038 "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n", 1039 CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER)); 1040 1041 /* 1042 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD 1043 */ 1044 val = replay_timer_table[max_payload][link_width]; 1045 CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val); 1046 1047 DBG(DBG_LPU, NULL, 1048 "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n", 1049 CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD)); 1050 1051 /* 1052 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0 1053 */ 1054 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n", 1055 CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER)); 1056 1057 /* 1058 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3 1059 */ 1060 DBG(DBG_LPU, NULL, 1061 "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n", 1062 CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS)); 1063 1064 /* 1065 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F 1066 */ 1067 DBG(DBG_LPU, NULL, 1068 "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n", 1069 CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS)); 1070 1071 /* 1072 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000 1073 */ 1074 val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT << 1075 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) | 1076 (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT << 1077 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR)); 1078 1079 CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val); 1080 DBG(DBG_LPU, NULL, 1081 "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n", 1082 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER)); 1083 1084 /* 1085 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0 1086 */ 1087 DBG(DBG_LPU, NULL, 1088 "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n", 1089 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER)); 1090 1091 /* 1092 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580 1093 */ 1094 DBG(DBG_LPU, NULL, 1095 "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n", 1096 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT)); 1097 1098 /* 1099 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000 1100 */ 1101 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n", 1102 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER)); 1103 1104 /* 1105 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF 1106 */ 1107 DBG(DBG_LPU, NULL, 1108 "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n", 1109 CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER)); 1110 1111 /* 1112 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157 1113 */ 1114 1115 /* 1116 * Test only register. Will not be programmed. 1117 */ 1118 DBG(DBG_LPU, NULL, 1119 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n", 1120 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR)); 1121 1122 /* 1123 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000 1124 */ 1125 1126 /* 1127 * Test only register. Will not be programmed. 1128 */ 1129 DBG(DBG_LPU, NULL, 1130 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n", 1131 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS)); 1132 1133 /* 1134 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0 1135 */ 1136 DBG(DBG_LPU, NULL, 1137 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n", 1138 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS)); 1139 1140 /* 1141 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0 1142 */ 1143 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n", 1144 CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL)); 1145 1146 /* 1147 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0 1148 */ 1149 1150 /* 1151 * Test only register. Will not be programmed. 1152 */ 1153 DBG(DBG_LPU, NULL, 1154 "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n", 1155 CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL)); 1156 1157 /* 1158 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0 1159 */ 1160 DBG(DBG_LPU, NULL, 1161 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n", 1162 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0)); 1163 1164 /* 1165 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0 1166 */ 1167 DBG(DBG_LPU, NULL, 1168 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n", 1169 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1)); 1170 1171 /* 1172 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0 1173 */ 1174 DBG(DBG_LPU, NULL, 1175 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n", 1176 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2)); 1177 1178 /* 1179 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0 1180 */ 1181 DBG(DBG_LPU, NULL, 1182 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n", 1183 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3)); 1184 1185 /* 1186 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0 1187 */ 1188 DBG(DBG_LPU, NULL, 1189 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n", 1190 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4)); 1191 1192 /* 1193 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0 1194 */ 1195 1196 /* 1197 * Test only register. Will not be programmed. 1198 */ 1199 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n", 1200 CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT)); 1201 1202 /* 1203 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0 1204 */ 1205 1206 /* 1207 * Test only register. Will not be programmed. 1208 */ 1209 DBG(DBG_LPU, NULL, 1210 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n", 1211 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT)); 1212 1213 /* 1214 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0 1215 */ 1216 1217 /* 1218 * Test only register. 1219 */ 1220 DBG(DBG_LPU, NULL, 1221 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n", 1222 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA)); 1223 1224 /* 1225 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0 1226 */ 1227 DBG(DBG_LPU, NULL, "lpu_init - " 1228 "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n", 1229 CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER)); 1230 1231 /* 1232 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0 1233 */ 1234 1235 /* 1236 * test only register. 1237 */ 1238 DBG(DBG_LPU, NULL, 1239 "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n", 1240 CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED)); 1241 1242 /* 1243 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0 1244 */ 1245 1246 /* 1247 * test only register. 1248 */ 1249 DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n", 1250 CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL)); 1251 1252 /* 1253 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10 1254 */ 1255 DBG(DBG_LPU, NULL, 1256 "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n", 1257 CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION)); 1258 1259 /* 1260 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0 1261 */ 1262 DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n", 1263 CSR_XR(csr_base, LPU_PHY_LAYER_STATUS)); 1264 1265 /* 1266 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1267 */ 1268 DBG(DBG_LPU, NULL, 1269 "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1270 CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST)); 1271 1272 /* 1273 * CSR_V LPU PHY LAYER interrupt regs (mask, status) 1274 */ 1275 DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n", 1276 CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK)); 1277 1278 DBG(DBG_LPU, NULL, 1279 "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n", 1280 CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS)); 1281 1282 /* 1283 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0 1284 */ 1285 1286 /* 1287 * This also needs some explanation. What is the best value 1288 * for the water mark? Test mode enables which test mode? 1289 * Programming model needed for the Receiver Reset Lane N 1290 * bits. 1291 */ 1292 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n", 1293 CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG)); 1294 1295 /* 1296 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0 1297 */ 1298 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n", 1299 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1)); 1300 1301 /* 1302 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0 1303 */ 1304 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n", 1305 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2)); 1306 1307 /* 1308 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0 1309 */ 1310 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n", 1311 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3)); 1312 1313 /* 1314 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1315 */ 1316 DBG(DBG_LPU, NULL, 1317 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1318 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST)); 1319 1320 /* 1321 * CSR_V LPU RX LAYER interrupt regs (mask, status) 1322 */ 1323 DBG(DBG_LPU, NULL, 1324 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n", 1325 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK)); 1326 1327 DBG(DBG_LPU, NULL, 1328 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n", 1329 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS)); 1330 1331 /* 1332 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0 1333 */ 1334 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n", 1335 CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG)); 1336 1337 /* 1338 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0 1339 */ 1340 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n", 1341 CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS)); 1342 1343 /* 1344 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1345 */ 1346 DBG(DBG_LPU, NULL, 1347 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1348 CSR_XR(csr_base, 1349 LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST)); 1350 1351 /* 1352 * CSR_V LPU TX LAYER interrupt regs (mask, status) 1353 */ 1354 DBG(DBG_LPU, NULL, 1355 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n", 1356 CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK)); 1357 1358 DBG(DBG_LPU, NULL, 1359 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n", 1360 CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS)); 1361 1362 /* 1363 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0 1364 */ 1365 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n", 1366 CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2)); 1367 1368 /* 1369 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205 1370 */ 1371 1372 /* 1373 * The new PRM has values for LTSSM 8 ns timeout value and 1374 * LTSSM 20 ns timeout value. But what do these values mean? 1375 * Most of the other bits are questions as well. 1376 * 1377 * As such we will use the reset value. 1378 */ 1379 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n", 1380 CSR_XR(csr_base, LPU_LTSSM_CONFIG1)); 1381 1382 /* 1383 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0 1384 */ 1385 1386 /* 1387 * Again, what does '12 ms timeout value mean'? 1388 */ 1389 val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT << 1390 LPU_LTSSM_CONFIG2_LTSSM_12_TO); 1391 CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val); 1392 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n", 1393 CSR_XR(csr_base, LPU_LTSSM_CONFIG2)); 1394 1395 /* 1396 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120 1397 */ 1398 val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT << 1399 LPU_LTSSM_CONFIG3_LTSSM_2_TO); 1400 CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val); 1401 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n", 1402 CSR_XR(csr_base, LPU_LTSSM_CONFIG3)); 1403 1404 /* 1405 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300 1406 */ 1407 val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT << 1408 LPU_LTSSM_CONFIG4_DATA_RATE) | 1409 (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT << 1410 LPU_LTSSM_CONFIG4_N_FTS)); 1411 CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val); 1412 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n", 1413 CSR_XR(csr_base, LPU_LTSSM_CONFIG4)); 1414 1415 /* 1416 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0 1417 */ 1418 val = 0ull; 1419 CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val); 1420 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n", 1421 CSR_XR(csr_base, LPU_LTSSM_CONFIG5)); 1422 1423 /* 1424 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0 1425 */ 1426 1427 /* 1428 * LTSSM Status registers are test only. 1429 */ 1430 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n", 1431 CSR_XR(csr_base, LPU_LTSSM_STATUS1)); 1432 1433 /* 1434 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0 1435 */ 1436 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n", 1437 CSR_XR(csr_base, LPU_LTSSM_STATUS2)); 1438 1439 /* 1440 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1441 */ 1442 DBG(DBG_LPU, NULL, 1443 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1444 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST)); 1445 1446 /* 1447 * CSR_V LPU LTSSM LAYER interrupt regs (mask, status) 1448 */ 1449 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n", 1450 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK)); 1451 1452 DBG(DBG_LPU, NULL, 1453 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n", 1454 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS)); 1455 1456 /* 1457 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0 1458 */ 1459 DBG(DBG_LPU, NULL, 1460 "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n", 1461 CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE)); 1462 1463 /* 1464 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407 1465 */ 1466 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n", 1467 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1)); 1468 1469 /* 1470 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35 1471 */ 1472 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n", 1473 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2)); 1474 1475 /* 1476 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA 1477 */ 1478 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n", 1479 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3)); 1480 1481 /* 1482 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848 1483 */ 1484 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n", 1485 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4)); 1486 1487 /* 1488 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0 1489 */ 1490 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n", 1491 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS)); 1492 1493 /* 1494 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0 1495 */ 1496 DBG(DBG_LPU, NULL, "lpu_init - " 1497 "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1498 CSR_XR(csr_base, 1499 LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST)); 1500 1501 /* 1502 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status) 1503 */ 1504 DBG(DBG_LPU, NULL, 1505 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n", 1506 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK)); 1507 1508 DBG(DBG_LPU, NULL, 1509 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n", 1510 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS)); 1511 1512 /* 1513 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0 1514 */ 1515 DBG(DBG_LPU, NULL, 1516 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n", 1517 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1)); 1518 1519 /* 1520 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0 1521 */ 1522 DBG(DBG_LPU, NULL, 1523 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n", 1524 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2)); 1525 1526 /* 1527 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0 1528 */ 1529 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n", 1530 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5)); 1531 } 1532 1533 /* ARGSUSED */ 1534 static void 1535 dlu_init(caddr_t csr_base, pxu_t *pxu_p) 1536 { 1537 uint64_t val; 1538 1539 CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull); 1540 DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n", 1541 CSR_XR(csr_base, DLU_INTERRUPT_MASK)); 1542 1543 val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN); 1544 CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val); 1545 DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n", 1546 CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG)); 1547 1548 val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) | 1549 (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN); 1550 1551 CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val); 1552 DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: " 1553 "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL)); 1554 1555 val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT << 1556 DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR); 1557 1558 CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val); 1559 1560 DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: " 1561 "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD)); 1562 } 1563 1564 /* ARGSUSED */ 1565 static void 1566 dmc_init(caddr_t csr_base, pxu_t *pxu_p) 1567 { 1568 uint64_t val; 1569 1570 /* 1571 * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003 1572 */ 1573 1574 val = -1ull; 1575 CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val); 1576 DBG(DBG_DMC, NULL, 1577 "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 1578 CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 1579 1580 /* 1581 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0 1582 */ 1583 DBG(DBG_DMC, NULL, 1584 "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n", 1585 CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS)); 1586 1587 /* 1588 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0 1589 */ 1590 val = 0x0ull; 1591 CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val); 1592 DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n", 1593 CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A)); 1594 1595 /* 1596 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0 1597 */ 1598 val = 0x0ull; 1599 CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val); 1600 DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n", 1601 CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B)); 1602 } 1603 1604 void 1605 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p) 1606 { 1607 uint64_t val; 1608 1609 ilu_init(csr_base, pxu_p); 1610 tlu_init(csr_base, pxu_p); 1611 1612 switch (PX_CHIP_TYPE(pxu_p)) { 1613 case PX_CHIP_OBERON: 1614 dlu_init(csr_base, pxu_p); 1615 break; 1616 case PX_CHIP_FIRE: 1617 lpu_init(csr_base, pxu_p); 1618 break; 1619 default: 1620 DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n", 1621 PX_CHIP_TYPE(pxu_p)); 1622 break; 1623 } 1624 1625 dmc_init(csr_base, pxu_p); 1626 1627 /* 1628 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F 1629 */ 1630 1631 val = -1ull; 1632 CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val); 1633 DBG(DBG_PEC, NULL, 1634 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 1635 CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 1636 1637 /* 1638 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0 1639 */ 1640 DBG(DBG_PEC, NULL, 1641 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n", 1642 CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS)); 1643 } 1644 1645 /* 1646 * Convert a TTE to physical address 1647 */ 1648 static r_addr_t 1649 mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p) 1650 { 1651 uint64_t pa_mask; 1652 1653 switch (PX_CHIP_TYPE(pxu_p)) { 1654 case PX_CHIP_OBERON: 1655 pa_mask = MMU_OBERON_PADDR_MASK; 1656 break; 1657 case PX_CHIP_FIRE: 1658 pa_mask = MMU_FIRE_PADDR_MASK; 1659 break; 1660 default: 1661 DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n", 1662 PX_CHIP_TYPE(pxu_p)); 1663 pa_mask = 0; 1664 break; 1665 } 1666 return ((tte & pa_mask) >> MMU_PAGE_SHIFT); 1667 } 1668 1669 /* 1670 * Return MMU bypass noncache bit for chip 1671 */ 1672 static r_addr_t 1673 mmu_bypass_noncache(pxu_t *pxu_p) 1674 { 1675 r_addr_t bypass_noncache_bit; 1676 1677 switch (PX_CHIP_TYPE(pxu_p)) { 1678 case PX_CHIP_OBERON: 1679 bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE; 1680 break; 1681 case PX_CHIP_FIRE: 1682 bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE; 1683 break; 1684 default: 1685 DBG(DBG_MMU, NULL, 1686 "mmu_bypass_nocache - unknown chip type: 0x%x\n", 1687 PX_CHIP_TYPE(pxu_p)); 1688 bypass_noncache_bit = 0; 1689 break; 1690 } 1691 return (bypass_noncache_bit); 1692 } 1693 1694 /* 1695 * Calculate number of TSB entries for the chip. 1696 */ 1697 /* ARGSUSED */ 1698 static uint_t 1699 mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p) 1700 { 1701 uint64_t tsb_ctrl; 1702 uint_t obp_tsb_entries, obp_tsb_size; 1703 1704 tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL); 1705 1706 obp_tsb_size = tsb_ctrl & 0xF; 1707 1708 obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size); 1709 1710 return (obp_tsb_entries); 1711 } 1712 1713 /* 1714 * Initialize the module, but do not enable interrupts. 1715 */ 1716 void 1717 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p) 1718 { 1719 uint64_t val, i, obp_tsb_pa, *base_tte_addr; 1720 uint_t obp_tsb_entries; 1721 1722 bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size); 1723 1724 /* 1725 * Preserve OBP's TSB 1726 */ 1727 obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK; 1728 1729 obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p); 1730 1731 base_tte_addr = pxu_p->tsb_vaddr + 1732 ((pxu_p->tsb_size >> 3) - obp_tsb_entries); 1733 1734 for (i = 0; i < obp_tsb_entries; i++) { 1735 uint64_t tte = lddphys(obp_tsb_pa + i * 8); 1736 1737 if (!MMU_TTE_VALID(tte)) 1738 continue; 1739 1740 base_tte_addr[i] = tte; 1741 } 1742 1743 /* 1744 * Invalidate the TLB through the diagnostic register. 1745 */ 1746 1747 CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull); 1748 1749 /* 1750 * Configure the Fire MMU TSB Control Register. Determine 1751 * the encoding for either 8KB pages (0) or 64KB pages (1). 1752 * 1753 * Write the most significant 30 bits of the TSB physical address 1754 * and the encoded TSB table size. 1755 */ 1756 for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--); 1757 1758 val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) | 1759 ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i); 1760 1761 CSR_XS(csr_base, MMU_TSB_CONTROL, val); 1762 1763 /* 1764 * Enable the MMU, set the "TSB Cache Snoop Enable", 1765 * the "Cache Mode", the "Bypass Enable" and 1766 * the "Translation Enable" bits. 1767 */ 1768 val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS); 1769 val |= ((1ull << MMU_CONTROL_AND_STATUS_SE) 1770 | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM) 1771 | (1ull << MMU_CONTROL_AND_STATUS_BE) 1772 | (1ull << MMU_CONTROL_AND_STATUS_TE)); 1773 1774 CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val); 1775 1776 /* 1777 * Read the register here to ensure that the previous writes to 1778 * the Fire MMU registers have been flushed. (Technically, this 1779 * is not entirely necessary here as we will likely do later reads 1780 * during Fire initialization, but it is a small price to pay for 1781 * more modular code.) 1782 */ 1783 (void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS); 1784 1785 /* 1786 * CSR_V TLU's UE interrupt regs (log, enable, status, clear) 1787 * Plus header logs 1788 */ 1789 DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n", 1790 CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE)); 1791 1792 DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n", 1793 CSR_XR(csr_base, MMU_INTERRUPT_ENABLE)); 1794 1795 DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n", 1796 CSR_XR(csr_base, MMU_INTERRUPT_STATUS)); 1797 1798 DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n", 1799 CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR)); 1800 } 1801 1802 /* 1803 * Generic IOMMU Servies 1804 */ 1805 1806 /* ARGSUSED */ 1807 uint64_t 1808 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages, 1809 io_attributes_t io_attr, void *addr, size_t pfn_index, int flags) 1810 { 1811 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1812 uint64_t attr = MMU_TTE_V; 1813 int i; 1814 1815 if (io_attr & PCI_MAP_ATTR_WRITE) 1816 attr |= MMU_TTE_W; 1817 1818 if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) && 1819 (io_attr & PCI_MAP_ATTR_RO)) 1820 attr |= MMU_TTE_RO; 1821 1822 if (attr & MMU_TTE_RO) { 1823 DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x " 1824 "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr); 1825 } 1826 1827 if (flags & MMU_MAP_PFN) { 1828 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)addr; 1829 for (i = 0; i < pages; i++, pfn_index++, tsb_index++) { 1830 px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index); 1831 pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr; 1832 1833 /* 1834 * Oberon will need to flush the corresponding TTEs in 1835 * Cache. We only need to flush every cache line. 1836 * Extra PIO's are expensive. 1837 */ 1838 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1839 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) { 1840 CSR_XS(dev_hdl, 1841 MMU_TTE_CACHE_FLUSH_ADDRESS, 1842 (pxu_p->tsb_paddr+ 1843 (tsb_index*MMU_TTE_SIZE))); 1844 } 1845 } 1846 } 1847 } else { 1848 caddr_t a = (caddr_t)addr; 1849 for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) { 1850 px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a); 1851 pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr; 1852 1853 /* 1854 * Oberon will need to flush the corresponding TTEs in 1855 * Cache. We only need to flush every cache line. 1856 * Extra PIO's are expensive. 1857 */ 1858 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1859 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) { 1860 CSR_XS(dev_hdl, 1861 MMU_TTE_CACHE_FLUSH_ADDRESS, 1862 (pxu_p->tsb_paddr+ 1863 (tsb_index*MMU_TTE_SIZE))); 1864 } 1865 } 1866 } 1867 } 1868 1869 return (H_EOK); 1870 } 1871 1872 /* ARGSUSED */ 1873 uint64_t 1874 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1875 pages_t pages) 1876 { 1877 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1878 int i; 1879 1880 for (i = 0; i < pages; i++, tsb_index++) { 1881 pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE; 1882 1883 /* 1884 * Oberon will need to flush the corresponding TTEs in 1885 * Cache. We only need to flush every cache line. 1886 * Extra PIO's are expensive. 1887 */ 1888 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1889 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) { 1890 CSR_XS(dev_hdl, 1891 MMU_TTE_CACHE_FLUSH_ADDRESS, 1892 (pxu_p->tsb_paddr+ 1893 (tsb_index*MMU_TTE_SIZE))); 1894 } 1895 } 1896 } 1897 1898 return (H_EOK); 1899 } 1900 1901 /* ARGSUSED */ 1902 uint64_t 1903 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1904 io_attributes_t *attr_p, r_addr_t *r_addr_p) 1905 { 1906 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1907 uint64_t *tte_addr; 1908 uint64_t ret = H_EOK; 1909 1910 tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index; 1911 1912 if (*tte_addr & MMU_TTE_V) { 1913 *r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p); 1914 *attr_p = (*tte_addr & MMU_TTE_W) ? 1915 PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ; 1916 } else { 1917 *r_addr_p = 0; 1918 *attr_p = 0; 1919 ret = H_ENOMAP; 1920 } 1921 1922 return (ret); 1923 } 1924 1925 /* ARGSUSED */ 1926 uint64_t 1927 hvio_get_bypass_base(pxu_t *pxu_p) 1928 { 1929 uint64_t base; 1930 1931 switch (PX_CHIP_TYPE(pxu_p)) { 1932 case PX_CHIP_OBERON: 1933 base = MMU_OBERON_BYPASS_BASE; 1934 break; 1935 case PX_CHIP_FIRE: 1936 base = MMU_FIRE_BYPASS_BASE; 1937 break; 1938 default: 1939 DBG(DBG_MMU, NULL, 1940 "hvio_get_bypass_base - unknown chip type: 0x%x\n", 1941 PX_CHIP_TYPE(pxu_p)); 1942 base = 0; 1943 break; 1944 } 1945 return (base); 1946 } 1947 1948 /* ARGSUSED */ 1949 uint64_t 1950 hvio_get_bypass_end(pxu_t *pxu_p) 1951 { 1952 uint64_t end; 1953 1954 switch (PX_CHIP_TYPE(pxu_p)) { 1955 case PX_CHIP_OBERON: 1956 end = MMU_OBERON_BYPASS_END; 1957 break; 1958 case PX_CHIP_FIRE: 1959 end = MMU_FIRE_BYPASS_END; 1960 break; 1961 default: 1962 DBG(DBG_MMU, NULL, 1963 "hvio_get_bypass_end - unknown chip type: 0x%x\n", 1964 PX_CHIP_TYPE(pxu_p)); 1965 end = 0; 1966 break; 1967 } 1968 return (end); 1969 } 1970 1971 /* ARGSUSED */ 1972 uint64_t 1973 hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra, 1974 io_attributes_t attr, io_addr_t *io_addr_p) 1975 { 1976 uint64_t pfn = MMU_BTOP(ra); 1977 1978 *io_addr_p = hvio_get_bypass_base(pxu_p) | ra | 1979 (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p)); 1980 1981 return (H_EOK); 1982 } 1983 1984 /* 1985 * Generic IO Interrupt Servies 1986 */ 1987 1988 /* 1989 * Converts a device specific interrupt number given by the 1990 * arguments devhandle and devino into a system specific ino. 1991 */ 1992 /* ARGSUSED */ 1993 uint64_t 1994 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino, 1995 sysino_t *sysino) 1996 { 1997 if (devino > INTERRUPT_MAPPING_ENTRIES) { 1998 DBG(DBG_IB, NULL, "ino %x is invalid\n", devino); 1999 return (H_ENOINTR); 2000 } 2001 2002 *sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino); 2003 2004 return (H_EOK); 2005 } 2006 2007 /* 2008 * Returns state in intr_valid_state if the interrupt defined by sysino 2009 * is valid (enabled) or not-valid (disabled). 2010 */ 2011 uint64_t 2012 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino, 2013 intr_valid_state_t *intr_valid_state) 2014 { 2015 if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2016 SYSINO_TO_DEVINO(sysino), ENTRIES_V)) { 2017 *intr_valid_state = INTR_VALID; 2018 } else { 2019 *intr_valid_state = INTR_NOTVALID; 2020 } 2021 2022 return (H_EOK); 2023 } 2024 2025 /* 2026 * Sets the 'valid' state of the interrupt defined by 2027 * the argument sysino to the state defined by the 2028 * argument intr_valid_state. 2029 */ 2030 uint64_t 2031 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino, 2032 intr_valid_state_t intr_valid_state) 2033 { 2034 switch (intr_valid_state) { 2035 case INTR_VALID: 2036 CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2037 SYSINO_TO_DEVINO(sysino), ENTRIES_V); 2038 break; 2039 case INTR_NOTVALID: 2040 CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2041 SYSINO_TO_DEVINO(sysino), ENTRIES_V); 2042 break; 2043 default: 2044 return (EINVAL); 2045 } 2046 2047 return (H_EOK); 2048 } 2049 2050 /* 2051 * Returns the current state of the interrupt given by the sysino 2052 * argument. 2053 */ 2054 uint64_t 2055 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino, 2056 intr_state_t *intr_state) 2057 { 2058 intr_state_t state; 2059 2060 state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR, 2061 SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE); 2062 2063 switch (state) { 2064 case INTERRUPT_IDLE_STATE: 2065 *intr_state = INTR_IDLE_STATE; 2066 break; 2067 case INTERRUPT_RECEIVED_STATE: 2068 *intr_state = INTR_RECEIVED_STATE; 2069 break; 2070 case INTERRUPT_PENDING_STATE: 2071 *intr_state = INTR_DELIVERED_STATE; 2072 break; 2073 default: 2074 return (EINVAL); 2075 } 2076 2077 return (H_EOK); 2078 2079 } 2080 2081 /* 2082 * Sets the current state of the interrupt given by the sysino 2083 * argument to the value given in the argument intr_state. 2084 * 2085 * Note: Setting the state to INTR_IDLE clears any pending 2086 * interrupt for sysino. 2087 */ 2088 uint64_t 2089 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino, 2090 intr_state_t intr_state) 2091 { 2092 intr_state_t state; 2093 2094 switch (intr_state) { 2095 case INTR_IDLE_STATE: 2096 state = INTERRUPT_IDLE_STATE; 2097 break; 2098 case INTR_DELIVERED_STATE: 2099 state = INTERRUPT_PENDING_STATE; 2100 break; 2101 default: 2102 return (EINVAL); 2103 } 2104 2105 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, 2106 SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state); 2107 2108 return (H_EOK); 2109 } 2110 2111 /* 2112 * Returns the cpuid that is the current target of the 2113 * interrupt given by the sysino argument. 2114 * 2115 * The cpuid value returned is undefined if the target 2116 * has not been set via intr_settarget. 2117 */ 2118 uint64_t 2119 hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino, 2120 cpuid_t *cpuid) 2121 { 2122 switch (PX_CHIP_TYPE(pxu_p)) { 2123 case PX_CHIP_OBERON: 2124 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2125 SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID); 2126 break; 2127 case PX_CHIP_FIRE: 2128 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2129 SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID); 2130 break; 2131 default: 2132 DBG(DBG_CB, NULL, "hvio_intr_gettarget - " 2133 "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p)); 2134 return (EINVAL); 2135 } 2136 2137 return (H_EOK); 2138 } 2139 2140 /* 2141 * Set the target cpu for the interrupt defined by the argument 2142 * sysino to the target cpu value defined by the argument cpuid. 2143 */ 2144 uint64_t 2145 hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino, 2146 cpuid_t cpuid) 2147 { 2148 2149 uint64_t val, intr_controller; 2150 uint32_t ino = SYSINO_TO_DEVINO(sysino); 2151 2152 /* 2153 * For now, we assign interrupt controller in a round 2154 * robin fashion. Later, we may need to come up with 2155 * a more efficient assignment algorithm. 2156 */ 2157 intr_controller = 0x1ull << (cpuid % 4); 2158 2159 switch (PX_CHIP_TYPE(pxu_p)) { 2160 case PX_CHIP_OBERON: 2161 val = (((cpuid & 2162 INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) << 2163 INTERRUPT_MAPPING_ENTRIES_T_DESTID) | 2164 ((intr_controller & 2165 INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK) 2166 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM)); 2167 break; 2168 case PX_CHIP_FIRE: 2169 val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) << 2170 INTERRUPT_MAPPING_ENTRIES_T_JPID) | 2171 ((intr_controller & 2172 INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK) 2173 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM)); 2174 break; 2175 default: 2176 DBG(DBG_CB, NULL, "hvio_intr_settarget - " 2177 "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p)); 2178 return (EINVAL); 2179 } 2180 2181 /* For EQ interrupts, set DATA MONDO bit */ 2182 if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) && 2183 (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT))) 2184 val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE); 2185 2186 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val); 2187 2188 return (H_EOK); 2189 } 2190 2191 /* 2192 * MSIQ Functions: 2193 */ 2194 uint64_t 2195 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p) 2196 { 2197 CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0, 2198 (uint64_t)pxu_p->msiq_mapped_p); 2199 DBG(DBG_IB, NULL, 2200 "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n", 2201 CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS)); 2202 2203 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0, 2204 (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p), 2205 pxu_p->portid) << INO_BITS); 2206 DBG(DBG_IB, NULL, "hvio_msiq_init: " 2207 "INTERRUPT_MONDO_DATA_0: 0x%llx\n", 2208 CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0)); 2209 2210 return (H_EOK); 2211 } 2212 2213 uint64_t 2214 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id, 2215 pci_msiq_valid_state_t *msiq_valid_state) 2216 { 2217 uint32_t eq_state; 2218 uint64_t ret = H_EOK; 2219 2220 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2221 msiq_id, ENTRIES_STATE); 2222 2223 switch (eq_state) { 2224 case EQ_IDLE_STATE: 2225 *msiq_valid_state = PCI_MSIQ_INVALID; 2226 break; 2227 case EQ_ACTIVE_STATE: 2228 case EQ_ERROR_STATE: 2229 *msiq_valid_state = PCI_MSIQ_VALID; 2230 break; 2231 default: 2232 ret = H_EIO; 2233 break; 2234 } 2235 2236 return (ret); 2237 } 2238 2239 uint64_t 2240 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id, 2241 pci_msiq_valid_state_t msiq_valid_state) 2242 { 2243 uint64_t ret = H_EOK; 2244 2245 switch (msiq_valid_state) { 2246 case PCI_MSIQ_INVALID: 2247 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR, 2248 msiq_id, ENTRIES_DIS); 2249 break; 2250 case PCI_MSIQ_VALID: 2251 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2252 msiq_id, ENTRIES_EN); 2253 break; 2254 default: 2255 ret = H_EINVAL; 2256 break; 2257 } 2258 2259 return (ret); 2260 } 2261 2262 uint64_t 2263 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id, 2264 pci_msiq_state_t *msiq_state) 2265 { 2266 uint32_t eq_state; 2267 uint64_t ret = H_EOK; 2268 2269 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2270 msiq_id, ENTRIES_STATE); 2271 2272 switch (eq_state) { 2273 case EQ_IDLE_STATE: 2274 case EQ_ACTIVE_STATE: 2275 *msiq_state = PCI_MSIQ_STATE_IDLE; 2276 break; 2277 case EQ_ERROR_STATE: 2278 *msiq_state = PCI_MSIQ_STATE_ERROR; 2279 break; 2280 default: 2281 ret = H_EIO; 2282 } 2283 2284 return (ret); 2285 } 2286 2287 uint64_t 2288 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id, 2289 pci_msiq_state_t msiq_state) 2290 { 2291 uint32_t eq_state; 2292 uint64_t ret = H_EOK; 2293 2294 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2295 msiq_id, ENTRIES_STATE); 2296 2297 switch (eq_state) { 2298 case EQ_IDLE_STATE: 2299 if (msiq_state == PCI_MSIQ_STATE_ERROR) 2300 ret = H_EIO; 2301 break; 2302 case EQ_ACTIVE_STATE: 2303 if (msiq_state == PCI_MSIQ_STATE_ERROR) 2304 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2305 msiq_id, ENTRIES_ENOVERR); 2306 else 2307 ret = H_EIO; 2308 break; 2309 case EQ_ERROR_STATE: 2310 if (msiq_state == PCI_MSIQ_STATE_IDLE) 2311 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR, 2312 msiq_id, ENTRIES_E2I); 2313 else 2314 ret = H_EIO; 2315 break; 2316 default: 2317 ret = H_EIO; 2318 } 2319 2320 return (ret); 2321 } 2322 2323 uint64_t 2324 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id, 2325 msiqhead_t *msiq_head) 2326 { 2327 *msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, 2328 msiq_id, ENTRIES_HEAD); 2329 2330 return (H_EOK); 2331 } 2332 2333 uint64_t 2334 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id, 2335 msiqhead_t msiq_head) 2336 { 2337 CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id, 2338 ENTRIES_HEAD, msiq_head); 2339 2340 return (H_EOK); 2341 } 2342 2343 uint64_t 2344 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id, 2345 msiqtail_t *msiq_tail) 2346 { 2347 *msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL, 2348 msiq_id, ENTRIES_TAIL); 2349 2350 return (H_EOK); 2351 } 2352 2353 /* 2354 * MSI Functions: 2355 */ 2356 uint64_t 2357 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64) 2358 { 2359 /* PCI MEM 32 resources to perform 32 bit MSI transactions */ 2360 CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0, 2361 ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR); 2362 DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n", 2363 CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS)); 2364 2365 /* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */ 2366 CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0, 2367 ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR); 2368 DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n", 2369 CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS)); 2370 2371 return (H_EOK); 2372 } 2373 2374 uint64_t 2375 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num, 2376 msiqid_t *msiq_id) 2377 { 2378 *msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING, 2379 msi_num, ENTRIES_EQNUM); 2380 2381 return (H_EOK); 2382 } 2383 2384 uint64_t 2385 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num, 2386 msiqid_t msiq_id) 2387 { 2388 CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2389 ENTRIES_EQNUM, msiq_id); 2390 2391 return (H_EOK); 2392 } 2393 2394 uint64_t 2395 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num, 2396 pci_msi_valid_state_t *msi_valid_state) 2397 { 2398 *msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING, 2399 msi_num, ENTRIES_V); 2400 2401 return (H_EOK); 2402 } 2403 2404 uint64_t 2405 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num, 2406 pci_msi_valid_state_t msi_valid_state) 2407 { 2408 uint64_t ret = H_EOK; 2409 2410 switch (msi_valid_state) { 2411 case PCI_MSI_VALID: 2412 CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2413 ENTRIES_V); 2414 break; 2415 case PCI_MSI_INVALID: 2416 CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2417 ENTRIES_V); 2418 break; 2419 default: 2420 ret = H_EINVAL; 2421 } 2422 2423 return (ret); 2424 } 2425 2426 uint64_t 2427 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num, 2428 pci_msi_state_t *msi_state) 2429 { 2430 *msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING, 2431 msi_num, ENTRIES_EQWR_N); 2432 2433 return (H_EOK); 2434 } 2435 2436 uint64_t 2437 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num, 2438 pci_msi_state_t msi_state) 2439 { 2440 uint64_t ret = H_EOK; 2441 2442 switch (msi_state) { 2443 case PCI_MSI_STATE_IDLE: 2444 CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num, 2445 ENTRIES_EQWR_N); 2446 break; 2447 case PCI_MSI_STATE_DELIVERED: 2448 default: 2449 ret = H_EINVAL; 2450 break; 2451 } 2452 2453 return (ret); 2454 } 2455 2456 /* 2457 * MSG Functions: 2458 */ 2459 uint64_t 2460 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2461 msiqid_t *msiq_id) 2462 { 2463 uint64_t ret = H_EOK; 2464 2465 switch (msg_type) { 2466 case PCIE_PME_MSG: 2467 *msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM); 2468 break; 2469 case PCIE_PME_ACK_MSG: 2470 *msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, 2471 EQNUM); 2472 break; 2473 case PCIE_CORR_MSG: 2474 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM); 2475 break; 2476 case PCIE_NONFATAL_MSG: 2477 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, 2478 EQNUM); 2479 break; 2480 case PCIE_FATAL_MSG: 2481 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM); 2482 break; 2483 default: 2484 ret = H_EINVAL; 2485 break; 2486 } 2487 2488 return (ret); 2489 } 2490 2491 uint64_t 2492 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2493 msiqid_t msiq_id) 2494 { 2495 uint64_t ret = H_EOK; 2496 2497 switch (msg_type) { 2498 case PCIE_PME_MSG: 2499 CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id); 2500 break; 2501 case PCIE_PME_ACK_MSG: 2502 CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id); 2503 break; 2504 case PCIE_CORR_MSG: 2505 CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id); 2506 break; 2507 case PCIE_NONFATAL_MSG: 2508 CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id); 2509 break; 2510 case PCIE_FATAL_MSG: 2511 CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id); 2512 break; 2513 default: 2514 ret = H_EINVAL; 2515 break; 2516 } 2517 2518 return (ret); 2519 } 2520 2521 uint64_t 2522 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2523 pcie_msg_valid_state_t *msg_valid_state) 2524 { 2525 uint64_t ret = H_EOK; 2526 2527 switch (msg_type) { 2528 case PCIE_PME_MSG: 2529 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2530 break; 2531 case PCIE_PME_ACK_MSG: 2532 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, 2533 PME_TO_ACK_MAPPING, V); 2534 break; 2535 case PCIE_CORR_MSG: 2536 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2537 break; 2538 case PCIE_NONFATAL_MSG: 2539 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, 2540 ERR_NONFATAL_MAPPING, V); 2541 break; 2542 case PCIE_FATAL_MSG: 2543 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, 2544 V); 2545 break; 2546 default: 2547 ret = H_EINVAL; 2548 break; 2549 } 2550 2551 return (ret); 2552 } 2553 2554 uint64_t 2555 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2556 pcie_msg_valid_state_t msg_valid_state) 2557 { 2558 uint64_t ret = H_EOK; 2559 2560 switch (msg_valid_state) { 2561 case PCIE_MSG_VALID: 2562 switch (msg_type) { 2563 case PCIE_PME_MSG: 2564 CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2565 break; 2566 case PCIE_PME_ACK_MSG: 2567 CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V); 2568 break; 2569 case PCIE_CORR_MSG: 2570 CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2571 break; 2572 case PCIE_NONFATAL_MSG: 2573 CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V); 2574 break; 2575 case PCIE_FATAL_MSG: 2576 CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V); 2577 break; 2578 default: 2579 ret = H_EINVAL; 2580 break; 2581 } 2582 2583 break; 2584 case PCIE_MSG_INVALID: 2585 switch (msg_type) { 2586 case PCIE_PME_MSG: 2587 CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2588 break; 2589 case PCIE_PME_ACK_MSG: 2590 CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V); 2591 break; 2592 case PCIE_CORR_MSG: 2593 CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2594 break; 2595 case PCIE_NONFATAL_MSG: 2596 CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V); 2597 break; 2598 case PCIE_FATAL_MSG: 2599 CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V); 2600 break; 2601 default: 2602 ret = H_EINVAL; 2603 break; 2604 } 2605 break; 2606 default: 2607 ret = H_EINVAL; 2608 } 2609 2610 return (ret); 2611 } 2612 2613 /* 2614 * Suspend/Resume Functions: 2615 * (pec, mmu, ib) 2616 * cb 2617 * Registers saved have all been touched in the XXX_init functions. 2618 */ 2619 uint64_t 2620 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2621 { 2622 uint64_t *config_state; 2623 int total_size; 2624 int i; 2625 2626 if (msiq_suspend(dev_hdl, pxu_p) != H_EOK) 2627 return (H_EIO); 2628 2629 total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE; 2630 config_state = kmem_zalloc(total_size, KM_NOSLEEP); 2631 2632 if (config_state == NULL) { 2633 return (H_EIO); 2634 } 2635 2636 /* 2637 * Soft state for suspend/resume from pxu_t 2638 * uint64_t *pec_config_state; 2639 * uint64_t *mmu_config_state; 2640 * uint64_t *ib_intr_map; 2641 * uint64_t *ib_config_state; 2642 * uint64_t *xcb_config_state; 2643 */ 2644 2645 /* Save the PEC configuration states */ 2646 pxu_p->pec_config_state = config_state; 2647 for (i = 0; i < PEC_KEYS; i++) { 2648 if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) || 2649 (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) { 2650 pxu_p->pec_config_state[i] = 2651 CSR_XR((caddr_t)dev_hdl, 2652 pec_config_state_regs[i].reg); 2653 } 2654 } 2655 2656 /* Save the MMU configuration states */ 2657 pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS; 2658 for (i = 0; i < MMU_KEYS; i++) { 2659 pxu_p->mmu_config_state[i] = 2660 CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]); 2661 } 2662 2663 /* Save the interrupt mapping registers */ 2664 pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS; 2665 for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) { 2666 pxu_p->ib_intr_map[i] = 2667 CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i); 2668 } 2669 2670 /* Save the IB configuration states */ 2671 pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES; 2672 for (i = 0; i < IB_KEYS; i++) { 2673 pxu_p->ib_config_state[i] = 2674 CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]); 2675 } 2676 2677 return (H_EOK); 2678 } 2679 2680 void 2681 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p) 2682 { 2683 int total_size; 2684 sysino_t sysino; 2685 int i; 2686 2687 /* Make sure that suspend actually did occur */ 2688 if (!pxu_p->pec_config_state) { 2689 return; 2690 } 2691 2692 /* Restore IB configuration states */ 2693 for (i = 0; i < IB_KEYS; i++) { 2694 CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i], 2695 pxu_p->ib_config_state[i]); 2696 } 2697 2698 /* 2699 * Restore the interrupt mapping registers 2700 * And make sure the intrs are idle. 2701 */ 2702 for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) { 2703 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i, 2704 ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE); 2705 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i, 2706 pxu_p->ib_intr_map[i]); 2707 } 2708 2709 /* Restore MMU configuration states */ 2710 /* Clear the cache. */ 2711 CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull); 2712 2713 for (i = 0; i < MMU_KEYS; i++) { 2714 CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i], 2715 pxu_p->mmu_config_state[i]); 2716 } 2717 2718 /* Restore PEC configuration states */ 2719 /* Make sure all reset bits are low until error is detected */ 2720 CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull); 2721 2722 for (i = 0; i < PEC_KEYS; i++) { 2723 if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) || 2724 (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) { 2725 CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg, 2726 pxu_p->pec_config_state[i]); 2727 } 2728 } 2729 2730 /* Enable PCI-E interrupt */ 2731 (void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino); 2732 2733 (void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE); 2734 2735 total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE; 2736 kmem_free(pxu_p->pec_config_state, total_size); 2737 2738 pxu_p->pec_config_state = NULL; 2739 pxu_p->mmu_config_state = NULL; 2740 pxu_p->ib_config_state = NULL; 2741 pxu_p->ib_intr_map = NULL; 2742 2743 msiq_resume(dev_hdl, pxu_p); 2744 } 2745 2746 uint64_t 2747 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2748 { 2749 uint64_t *config_state, *cb_regs; 2750 int i, cb_size, cb_keys; 2751 2752 switch (PX_CHIP_TYPE(pxu_p)) { 2753 case PX_CHIP_OBERON: 2754 cb_size = UBC_SIZE; 2755 cb_keys = UBC_KEYS; 2756 cb_regs = ubc_config_state_regs; 2757 break; 2758 case PX_CHIP_FIRE: 2759 cb_size = JBC_SIZE; 2760 cb_keys = JBC_KEYS; 2761 cb_regs = jbc_config_state_regs; 2762 break; 2763 default: 2764 DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n", 2765 PX_CHIP_TYPE(pxu_p)); 2766 break; 2767 } 2768 2769 config_state = kmem_zalloc(cb_size, KM_NOSLEEP); 2770 2771 if (config_state == NULL) { 2772 return (H_EIO); 2773 } 2774 2775 /* Save the configuration states */ 2776 pxu_p->xcb_config_state = config_state; 2777 for (i = 0; i < cb_keys; i++) { 2778 pxu_p->xcb_config_state[i] = 2779 CSR_XR((caddr_t)dev_hdl, cb_regs[i]); 2780 } 2781 2782 return (H_EOK); 2783 } 2784 2785 void 2786 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl, 2787 devino_t devino, pxu_t *pxu_p) 2788 { 2789 sysino_t sysino; 2790 uint64_t *cb_regs; 2791 int i, cb_size, cb_keys; 2792 2793 switch (PX_CHIP_TYPE(pxu_p)) { 2794 case PX_CHIP_OBERON: 2795 cb_size = UBC_SIZE; 2796 cb_keys = UBC_KEYS; 2797 cb_regs = ubc_config_state_regs; 2798 /* 2799 * No reason to have any reset bits high until an error is 2800 * detected on the link. 2801 */ 2802 CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull); 2803 break; 2804 case PX_CHIP_FIRE: 2805 cb_size = JBC_SIZE; 2806 cb_keys = JBC_KEYS; 2807 cb_regs = jbc_config_state_regs; 2808 /* 2809 * No reason to have any reset bits high until an error is 2810 * detected on the link. 2811 */ 2812 CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull); 2813 break; 2814 default: 2815 DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n", 2816 PX_CHIP_TYPE(pxu_p)); 2817 break; 2818 } 2819 2820 ASSERT(pxu_p->xcb_config_state); 2821 2822 /* Restore the configuration states */ 2823 for (i = 0; i < cb_keys; i++) { 2824 CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i], 2825 pxu_p->xcb_config_state[i]); 2826 } 2827 2828 /* Enable XBC interrupt */ 2829 (void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino); 2830 2831 (void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE); 2832 2833 kmem_free(pxu_p->xcb_config_state, cb_size); 2834 2835 pxu_p->xcb_config_state = NULL; 2836 } 2837 2838 static uint64_t 2839 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2840 { 2841 size_t bufsz; 2842 volatile uint64_t *cur_p; 2843 int i; 2844 2845 bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE; 2846 if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) == 2847 NULL) 2848 return (H_EIO); 2849 2850 cur_p = pxu_p->msiq_config_state; 2851 2852 /* Save each EQ state */ 2853 for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) 2854 *cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i); 2855 2856 /* Save MSI mapping registers */ 2857 for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++) 2858 *cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i); 2859 2860 /* Save all other MSIQ registers */ 2861 for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++) 2862 *cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]); 2863 return (H_EOK); 2864 } 2865 2866 static void 2867 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p) 2868 { 2869 size_t bufsz; 2870 uint64_t *cur_p, state; 2871 int i; 2872 2873 bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE; 2874 cur_p = pxu_p->msiq_config_state; 2875 /* 2876 * Initialize EQ base address register and 2877 * Interrupt Mondo Data 0 register. 2878 */ 2879 (void) hvio_msiq_init(dev_hdl, pxu_p); 2880 2881 /* Restore EQ states */ 2882 for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) { 2883 state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK; 2884 if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE)) 2885 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2886 i, ENTRIES_EN); 2887 } 2888 2889 /* Restore MSI mapping */ 2890 for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++) 2891 CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p); 2892 2893 /* 2894 * Restore all other registers. MSI 32 bit address and 2895 * MSI 64 bit address are restored as part of this. 2896 */ 2897 for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++) 2898 CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p); 2899 2900 kmem_free(pxu_p->msiq_config_state, bufsz); 2901 pxu_p->msiq_config_state = NULL; 2902 } 2903 2904 /* 2905 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 2906 * called by px_goto_l23ready. 2907 * returns DDI_SUCCESS or DDI_FAILURE 2908 */ 2909 int 2910 px_send_pme_turnoff(caddr_t csr_base) 2911 { 2912 volatile uint64_t reg; 2913 2914 reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE); 2915 /* If already pending, return failure */ 2916 if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) { 2917 DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit " 2918 "tlu_pme_turn_off_generate = %x\n", reg); 2919 return (DDI_FAILURE); 2920 } 2921 2922 /* write to PME_Turn_off reg to boradcast */ 2923 reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO); 2924 CSR_XS(csr_base, TLU_PME_TURN_OFF_GENERATE, reg); 2925 2926 return (DDI_SUCCESS); 2927 } 2928 2929 /* 2930 * Checks for link being in L1idle state. 2931 * Returns 2932 * DDI_SUCCESS - if the link is in L1idle 2933 * DDI_FAILURE - if the link is not in L1idle 2934 */ 2935 int 2936 px_link_wait4l1idle(caddr_t csr_base) 2937 { 2938 uint8_t ltssm_state; 2939 int ntries = px_max_l1_tries; 2940 2941 while (ntries > 0) { 2942 ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE); 2943 if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0)) 2944 break; 2945 delay(1); 2946 } 2947 DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state); 2948 return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE); 2949 } 2950 2951 /* 2952 * Tranisition the link to L0, after it is down. 2953 */ 2954 int 2955 px_link_retrain(caddr_t csr_base) 2956 { 2957 volatile uint64_t reg; 2958 2959 reg = CSR_XR(csr_base, TLU_CONTROL); 2960 if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) { 2961 DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n"); 2962 return (DDI_FAILURE); 2963 } 2964 2965 /* Clear link down bit in TLU Other Event Clear Status Register. */ 2966 CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P); 2967 2968 /* Clear Drain bit in TLU Status Register */ 2969 CSR_BS(csr_base, TLU_STATUS, DRAIN); 2970 2971 /* Clear Remain in Detect.Quiet bit in TLU Control Register */ 2972 reg = CSR_XR(csr_base, TLU_CONTROL); 2973 reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET); 2974 CSR_XS(csr_base, TLU_CONTROL, reg); 2975 2976 return (DDI_SUCCESS); 2977 } 2978 2979 void 2980 px_enable_detect_quiet(caddr_t csr_base) 2981 { 2982 volatile uint64_t tlu_ctrl; 2983 2984 tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL); 2985 tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET); 2986 CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl); 2987 } 2988 2989 static uint_t 2990 oberon_hp_pwron(caddr_t csr_base) 2991 { 2992 volatile uint64_t reg; 2993 boolean_t link_retrain, link_up; 2994 int i; 2995 2996 DBG(DBG_HP, NULL, "oberon_hp_pwron the slot\n"); 2997 2998 /* Check Leaf Reset status */ 2999 reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE); 3000 if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) { 3001 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not reset\n"); 3002 goto fail; 3003 } 3004 3005 /* Check Slot status */ 3006 reg = CSR_XR(csr_base, TLU_SLOT_STATUS); 3007 if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) || 3008 (reg & (1ull << TLU_SLOT_STATUS_MRLS))) { 3009 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: slot status %lx\n", 3010 reg); 3011 goto fail; 3012 } 3013 3014 /* Blink power LED, this is done from pciehpc already */ 3015 3016 /* Turn on slot power */ 3017 CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN); 3018 3019 /* power fault detection */ 3020 delay(drv_usectohz(25000)); 3021 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3022 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3023 3024 /* wait to check power state */ 3025 delay(drv_usectohz(25000)); 3026 3027 if (!CSR_BR(csr_base, TLU_SLOT_STATUS, PWFD)) { 3028 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: power fault\n"); 3029 goto fail1; 3030 } 3031 3032 /* power is good */ 3033 CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN); 3034 3035 delay(drv_usectohz(25000)); 3036 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3037 CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN); 3038 3039 /* Turn on slot clock */ 3040 CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN); 3041 3042 /* Release PCI-E Reset */ 3043 delay(drv_usectohz(100000)); 3044 CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST); 3045 3046 /* 3047 * Open events' mask 3048 * This should be done from pciehpc already 3049 */ 3050 3051 /* 3052 * Initialize Leaf 3053 * SPLS = 00b, SPLV = 11001b, i.e. 25W 3054 */ 3055 reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES); 3056 reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK << 3057 TLU_SLOT_CAPABILITIES_SPLS); 3058 reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK << 3059 TLU_SLOT_CAPABILITIES_SPLS); 3060 reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLS); 3061 CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg); 3062 3063 /* Enable PCIE port */ 3064 CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS); 3065 CSR_BC(csr_base, FLP_PORT_CONTROL, PORT_DIS); 3066 3067 /* wait for the link up */ 3068 link_up = B_FALSE; 3069 link_retrain = B_TRUE; 3070 for (i = 0; (i < 2) && (link_up == B_FALSE); i++) { 3071 delay(drv_usectohz(100000)); 3072 reg = CSR_XR(csr_base, DLU_LINK_LAYER_STATUS); 3073 3074 if ((((reg >> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS) & 3075 DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK) == 3076 DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE) && 3077 (reg & (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS)) && 3078 ((reg & DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK) == 3079 DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE)) { 3080 DBG(DBG_HP, NULL, "oberon_hp_pwron : link is up\n"); 3081 link_up = B_TRUE; 3082 } else if (link_retrain == B_TRUE) { 3083 DBG(DBG_HP, NULL, "oberon_hp_pwron: retrain link\n"); 3084 /* retrain the link */ 3085 CSR_BS(csr_base, FLP_PORT_LINK_CONTROL, RETRAIN); 3086 link_retrain = B_FALSE; 3087 } 3088 } 3089 3090 if (link_up == B_FALSE) { 3091 DBG(DBG_HP, NULL, "oberon_hp_pwron fails to enable " 3092 "PCI-E port\n"); 3093 goto fail2; 3094 } 3095 3096 /* link is up */ 3097 CSR_BS(csr_base, FLP_PORT_ACTIVE_STATUS, TRAIN_ERROR); 3098 CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_P); 3099 CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_S); 3100 CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS); 3101 3102 /* Turn on Power LED */ 3103 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3104 reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK; 3105 reg = pcie_slotctl_pwr_indicator_set(reg, 3106 PCIE_SLOTCTL_INDICATOR_STATE_ON); 3107 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3108 3109 /* Notify to SCF */ 3110 if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON)) 3111 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON); 3112 else 3113 CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON); 3114 3115 return (DDI_SUCCESS); 3116 3117 fail2: 3118 /* Link up is failed */ 3119 CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS); 3120 CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST); 3121 delay(drv_usectohz(150)); 3122 3123 CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN); 3124 delay(drv_usectohz(100)); 3125 3126 fail1: 3127 CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN); 3128 3129 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3130 3131 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3132 reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK; 3133 reg = pcie_slotctl_pwr_indicator_set(reg, 3134 PCIE_SLOTCTL_INDICATOR_STATE_OFF); 3135 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3136 3137 CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD); 3138 3139 fail: 3140 return (DDI_FAILURE); 3141 } 3142 3143 static uint_t 3144 oberon_hp_pwroff(caddr_t csr_base) 3145 { 3146 volatile uint64_t reg; 3147 volatile uint64_t reg_tluue, reg_tluce; 3148 3149 DBG(DBG_HP, NULL, "oberon_hp_pwroff the slot\n"); 3150 3151 /* Blink power LED, this is done from pciehpc already */ 3152 3153 /* Clear Slot Event */ 3154 CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC); 3155 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3156 3157 /* DRN_TR_DIS on */ 3158 CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS); 3159 delay(drv_usectohz(10000)); 3160 3161 /* Save the TLU registers */ 3162 reg_tluue = CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE); 3163 reg_tluce = CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE); 3164 /* All clear */ 3165 CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 0); 3166 CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, 0); 3167 3168 /* Disable port */ 3169 CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS); 3170 3171 /* PCIE reset */ 3172 delay(drv_usectohz(10000)); 3173 CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST); 3174 3175 /* PCIE clock stop */ 3176 delay(drv_usectohz(150)); 3177 CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN); 3178 3179 /* Turn off slot power */ 3180 delay(drv_usectohz(100)); 3181 CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN); 3182 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3183 delay(drv_usectohz(25000)); 3184 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3185 3186 /* write 0 to bit 7 of ILU Error Log Enable Register */ 3187 CSR_BC(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3); 3188 3189 /* Set back TLU registers */ 3190 CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, reg_tluue); 3191 CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, reg_tluce); 3192 3193 /* Power LED off */ 3194 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3195 reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK; 3196 reg = pcie_slotctl_pwr_indicator_set(reg, 3197 PCIE_SLOTCTL_INDICATOR_STATE_OFF); 3198 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3199 3200 /* Indicator LED blink */ 3201 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3202 reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK; 3203 reg = pcie_slotctl_attn_indicator_set(reg, 3204 PCIE_SLOTCTL_INDICATOR_STATE_BLINK); 3205 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3206 3207 /* Notify to SCF */ 3208 if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON)) 3209 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON); 3210 else 3211 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON); 3212 3213 /* Indicator LED off */ 3214 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3215 reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK; 3216 reg = pcie_slotctl_attn_indicator_set(reg, 3217 PCIE_SLOTCTL_INDICATOR_STATE_OFF); 3218 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3219 3220 return (DDI_SUCCESS); 3221 } 3222 3223 static uint_t 3224 oberon_hpreg_get(void *cookie, off_t off) 3225 { 3226 caddr_t csr_base = *(caddr_t *)cookie; 3227 volatile uint64_t val = -1ull; 3228 3229 switch (off) { 3230 case PCIE_SLOTCAP: 3231 val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES); 3232 break; 3233 case PCIE_SLOTCTL: 3234 val = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3235 3236 /* Get the power state */ 3237 val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) & 3238 (1ull << HOTPLUG_CONTROL_PWREN)) ? 3239 0 : PCIE_SLOTCTL_PWR_CONTROL; 3240 break; 3241 case PCIE_SLOTSTS: 3242 val = CSR_XR(csr_base, TLU_SLOT_STATUS); 3243 break; 3244 case PCIE_LINKCAP: 3245 val = CSR_XR(csr_base, TLU_LINK_CAPABILITIES); 3246 break; 3247 case PCIE_LINKSTS: 3248 val = CSR_XR(csr_base, TLU_LINK_STATUS); 3249 break; 3250 default: 3251 DBG(DBG_HP, NULL, "oberon_hpreg_get(): " 3252 "unsupported offset 0x%lx\n", off); 3253 break; 3254 } 3255 3256 return ((uint_t)val); 3257 } 3258 3259 static uint_t 3260 oberon_hpreg_put(void *cookie, off_t off, uint_t val) 3261 { 3262 caddr_t csr_base = *(caddr_t *)cookie; 3263 volatile uint64_t pwr_state_on, pwr_fault; 3264 uint_t pwr_off, ret = DDI_SUCCESS; 3265 3266 DBG(DBG_HP, NULL, "oberon_hpreg_put 0x%lx: cur %x, new %x\n", 3267 off, oberon_hpreg_get(cookie, off), val); 3268 3269 switch (off) { 3270 case PCIE_SLOTCTL: 3271 /* 3272 * Depending on the current state, insertion or removal 3273 * will go through their respective sequences. 3274 */ 3275 pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN); 3276 pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL; 3277 3278 if (!pwr_off && !pwr_state_on) 3279 ret = oberon_hp_pwron(csr_base); 3280 else if (pwr_off && pwr_state_on) { 3281 pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) & 3282 (1ull << TLU_SLOT_STATUS_PWFD); 3283 3284 if (pwr_fault) { 3285 DBG(DBG_HP, NULL, "oberon_hpreg_put: power " 3286 "off because of power fault\n"); 3287 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3288 } 3289 else 3290 ret = oberon_hp_pwroff(csr_base); 3291 } else 3292 CSR_XS(csr_base, TLU_SLOT_CONTROL, val); 3293 break; 3294 case PCIE_SLOTSTS: 3295 CSR_XS(csr_base, TLU_SLOT_STATUS, val); 3296 break; 3297 default: 3298 DBG(DBG_HP, NULL, "oberon_hpreg_put(): " 3299 "unsupported offset 0x%lx\n", off); 3300 ret = DDI_FAILURE; 3301 break; 3302 } 3303 3304 return (ret); 3305 } 3306 3307 int 3308 hvio_hotplug_init(dev_info_t *dip, void *arg) 3309 { 3310 pciehpc_regops_t *regops = (pciehpc_regops_t *)arg; 3311 px_t *px_p = DIP_TO_STATE(dip); 3312 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 3313 3314 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 3315 if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR], 3316 TLU_SLOT_CAPABILITIES, HP)) { 3317 DBG(DBG_HP, NULL, "%s%d: hotplug capabale not set\n", 3318 ddi_driver_name(dip), ddi_get_instance(dip)); 3319 return (DDI_FAILURE); 3320 } 3321 3322 regops->get = oberon_hpreg_get; 3323 regops->put = oberon_hpreg_put; 3324 3325 /* cookie is the csr_base */ 3326 regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR]; 3327 3328 return (DDI_SUCCESS); 3329 } 3330 3331 return (DDI_ENOTSUP); 3332 } 3333 3334 int 3335 hvio_hotplug_uninit(dev_info_t *dip) 3336 { 3337 px_t *px_p = DIP_TO_STATE(dip); 3338 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 3339 3340 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 3341 return (DDI_SUCCESS); 3342 3343 return (DDI_FAILURE); 3344 } 3345