1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/cmn_err.h> 28 #include <sys/vmsystm.h> 29 #include <sys/vmem.h> 30 #include <sys/machsystm.h> /* lddphys() */ 31 #include <sys/iommutsb.h> 32 #include <sys/pci.h> 33 #include <sys/hotplug/pci/pciehpc.h> 34 #include <pcie_pwr.h> 35 #include <px_obj.h> 36 #include "px_regs.h" 37 #include "oberon_regs.h" 38 #include "px_csr.h" 39 #include "px_lib4u.h" 40 #include "px_err.h" 41 42 /* 43 * Registers that need to be saved and restored during suspend/resume. 44 */ 45 46 /* 47 * Registers in the PEC Module. 48 * LPU_RESET should be set to 0ull during resume 49 * 50 * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips 51 * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only. 52 */ 53 static struct px_pec_regs { 54 uint64_t reg; 55 uint64_t chip; 56 } pec_config_state_regs[] = { 57 {PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 58 {ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 59 {ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 60 {TLU_CONTROL, PX_CHIP_UNIDENTIFIED}, 61 {TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 62 {TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 63 {TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED}, 64 {TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED}, 65 {TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 66 {TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 67 {TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED}, 68 {TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 69 {DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON}, 70 {DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON}, 71 {DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON}, 72 {LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE}, 73 {LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE}, 74 {LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE}, 75 {LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE}, 76 {LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE}, 77 {LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE}, 78 {LPU_RESET, PX_CHIP_FIRE}, 79 {LPU_DEBUG_CONFIG, PX_CHIP_FIRE}, 80 {LPU_INTERRUPT_MASK, PX_CHIP_FIRE}, 81 {LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE}, 82 {LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE}, 83 {LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE}, 84 {LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE}, 85 {LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE}, 86 {LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE}, 87 {LPU_LTSSM_CONFIG2, PX_CHIP_FIRE}, 88 {LPU_LTSSM_CONFIG3, PX_CHIP_FIRE}, 89 {LPU_LTSSM_CONFIG4, PX_CHIP_FIRE}, 90 {LPU_LTSSM_CONFIG5, PX_CHIP_FIRE}, 91 {DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED}, 92 {DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED}, 93 {DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED} 94 }; 95 96 #define PEC_KEYS \ 97 ((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs)) 98 99 #define PEC_SIZE (PEC_KEYS * sizeof (uint64_t)) 100 101 /* 102 * Registers for the MMU module. 103 * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull) 104 */ 105 static uint64_t mmu_config_state_regs[] = { 106 MMU_TSB_CONTROL, 107 MMU_CONTROL_AND_STATUS, 108 MMU_ERROR_LOG_ENABLE, 109 MMU_INTERRUPT_ENABLE 110 }; 111 #define MMU_SIZE (sizeof (mmu_config_state_regs)) 112 #define MMU_KEYS (MMU_SIZE / sizeof (uint64_t)) 113 114 /* 115 * Registers for the IB Module 116 */ 117 static uint64_t ib_config_state_regs[] = { 118 IMU_ERROR_LOG_ENABLE, 119 IMU_INTERRUPT_ENABLE 120 }; 121 #define IB_SIZE (sizeof (ib_config_state_regs)) 122 #define IB_KEYS (IB_SIZE / sizeof (uint64_t)) 123 #define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t)) 124 125 /* 126 * Registers for the JBC module. 127 * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull) 128 */ 129 static uint64_t jbc_config_state_regs[] = { 130 JBUS_PARITY_CONTROL, 131 JBC_FATAL_RESET_ENABLE, 132 JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, 133 JBC_ERROR_LOG_ENABLE, 134 JBC_INTERRUPT_ENABLE 135 }; 136 #define JBC_SIZE (sizeof (jbc_config_state_regs)) 137 #define JBC_KEYS (JBC_SIZE / sizeof (uint64_t)) 138 139 /* 140 * Registers for the UBC module. 141 * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull) 142 */ 143 static uint64_t ubc_config_state_regs[] = { 144 UBC_ERROR_LOG_ENABLE, 145 UBC_INTERRUPT_ENABLE 146 }; 147 #define UBC_SIZE (sizeof (ubc_config_state_regs)) 148 #define UBC_KEYS (UBC_SIZE / sizeof (uint64_t)) 149 150 static uint64_t msiq_config_other_regs[] = { 151 ERR_COR_MAPPING, 152 ERR_NONFATAL_MAPPING, 153 ERR_FATAL_MAPPING, 154 PM_PME_MAPPING, 155 PME_TO_ACK_MAPPING, 156 MSI_32_BIT_ADDRESS, 157 MSI_64_BIT_ADDRESS 158 }; 159 #define MSIQ_OTHER_SIZE (sizeof (msiq_config_other_regs)) 160 #define MSIQ_OTHER_KEYS (MSIQ_OTHER_SIZE / sizeof (uint64_t)) 161 162 #define MSIQ_STATE_SIZE (EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t)) 163 #define MSIQ_MAPPING_SIZE (MSI_MAPPING_ENTRIES * sizeof (uint64_t)) 164 165 /* OPL tuning variables for link unstable issue */ 166 int wait_perst = 5000000; /* step 9, default: 5s */ 167 int wait_enable_port = 30000; /* step 11, default: 30ms */ 168 int link_retry_count = 2; /* step 11, default: 2 */ 169 int link_status_check = 400000; /* step 11, default: 400ms */ 170 171 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p); 172 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p); 173 static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p); 174 static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p); 175 176 extern int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE]; 177 extern int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE]; 178 179 /* 180 * Initialize the bus, but do not enable interrupts. 181 */ 182 /* ARGSUSED */ 183 void 184 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 185 { 186 switch (PX_CHIP_TYPE(pxu_p)) { 187 case PX_CHIP_OBERON: 188 ubc_init(xbc_csr_base, pxu_p); 189 break; 190 case PX_CHIP_FIRE: 191 jbc_init(xbc_csr_base, pxu_p); 192 break; 193 default: 194 DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n", 195 PX_CHIP_TYPE(pxu_p)); 196 break; 197 } 198 } 199 200 /* 201 * Initialize the JBC module, but do not enable interrupts. 202 */ 203 /* ARGSUSED */ 204 static void 205 jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 206 { 207 uint64_t val; 208 209 /* Check if we need to enable inverted parity */ 210 val = (1ULL << JBUS_PARITY_CONTROL_P_EN); 211 CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val); 212 DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n", 213 CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL)); 214 215 val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) | 216 (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) | 217 (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) | 218 (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) | 219 (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) | 220 (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) | 221 (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) | 222 (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN); 223 CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val); 224 DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n", 225 CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE)); 226 227 /* 228 * Enable merge, jbc and dmc interrupts. 229 */ 230 CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull); 231 DBG(DBG_CB, NULL, 232 "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 233 CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 234 235 /* 236 * CSR_V JBC's interrupt regs (log, enable, status, clear) 237 */ 238 DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n", 239 CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE)); 240 241 DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n", 242 CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE)); 243 244 DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n", 245 CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS)); 246 247 DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n", 248 CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR)); 249 } 250 251 /* 252 * Initialize the UBC module, but do not enable interrupts. 253 */ 254 /* ARGSUSED */ 255 static void 256 ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 257 { 258 /* 259 * Enable Uranus bus error log bits. 260 */ 261 CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull); 262 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n", 263 CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE)); 264 265 /* 266 * Clear Uranus bus errors. 267 */ 268 CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull); 269 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n", 270 CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR)); 271 272 /* 273 * CSR_V UBC's interrupt regs (log, enable, status, clear) 274 */ 275 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n", 276 CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE)); 277 278 DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n", 279 CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE)); 280 281 DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n", 282 CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS)); 283 284 DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n", 285 CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR)); 286 } 287 288 /* 289 * Initialize the module, but do not enable interrupts. 290 */ 291 /* ARGSUSED */ 292 void 293 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p) 294 { 295 /* 296 * CSR_V IB's interrupt regs (log, enable, status, clear) 297 */ 298 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n", 299 CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE)); 300 301 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n", 302 CSR_XR(csr_base, IMU_INTERRUPT_ENABLE)); 303 304 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n", 305 CSR_XR(csr_base, IMU_INTERRUPT_STATUS)); 306 307 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n", 308 CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR)); 309 } 310 311 /* 312 * Initialize the module, but do not enable interrupts. 313 */ 314 /* ARGSUSED */ 315 static void 316 ilu_init(caddr_t csr_base, pxu_t *pxu_p) 317 { 318 /* 319 * CSR_V ILU's interrupt regs (log, enable, status, clear) 320 */ 321 DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n", 322 CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE)); 323 324 DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n", 325 CSR_XR(csr_base, ILU_INTERRUPT_ENABLE)); 326 327 DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n", 328 CSR_XR(csr_base, ILU_INTERRUPT_STATUS)); 329 330 DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n", 331 CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR)); 332 } 333 334 /* 335 * Initialize the module, but do not enable interrupts. 336 */ 337 /* ARGSUSED */ 338 static void 339 tlu_init(caddr_t csr_base, pxu_t *pxu_p) 340 { 341 uint64_t val; 342 343 /* 344 * CSR_V TLU_CONTROL Expect OBP ??? 345 */ 346 347 /* 348 * L0s entry default timer value - 7.0 us 349 * Completion timeout select default value - 67.1 ms and 350 * OBP will set this value. 351 * 352 * Configuration - Bit 0 should always be 0 for upstream port. 353 * Bit 1 is clock - how is this related to the clock bit in TLU 354 * Link Control register? Both are hardware dependent and likely 355 * set by OBP. 356 * 357 * NOTE: Do not set the NPWR_EN bit. The desired value of this bit 358 * will be set by OBP. 359 */ 360 val = CSR_XR(csr_base, TLU_CONTROL); 361 val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) | 362 TLU_CONTROL_CONFIG_DEFAULT; 363 364 /* 365 * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking 366 * behind non-posted PIO writes. This blocking could cause a master or 367 * slave timeout on the host bus if multiple serialized PIOs were to 368 * suffer Completion Timeouts because the CTO delays for each PIO ahead 369 * of the read would accumulate. Since the Olympus processor can have 370 * only 1 PIO outstanding, there is no possibility of PIO accesses from 371 * a given CPU to a given device being re-ordered by the PCIe fabric; 372 * therefore turning off serialization should be safe from a PCIe 373 * ordering perspective. 374 */ 375 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 376 val &= ~(1ull << TLU_CONTROL_NPWR_EN); 377 378 /* 379 * Set Detect.Quiet. This will disable automatic link 380 * re-training, if the link goes down e.g. power management 381 * turns off power to the downstream device. This will enable 382 * Fire to go to Drain state, after link down. The drain state 383 * forces a reset to the FC state machine, which is required for 384 * proper link re-training. 385 */ 386 val |= (1ull << TLU_REMAIN_DETECT_QUIET); 387 CSR_XS(csr_base, TLU_CONTROL, val); 388 DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n", 389 CSR_XR(csr_base, TLU_CONTROL)); 390 391 /* 392 * CSR_V TLU_STATUS Expect HW 0x4 393 */ 394 395 /* 396 * Only bit [7:0] are currently defined. Bits [2:0] 397 * are the state, which should likely be in state active, 398 * 100b. Bit three is 'recovery', which is not understood. 399 * All other bits are reserved. 400 */ 401 DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n", 402 CSR_XR(csr_base, TLU_STATUS)); 403 404 /* 405 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0 406 */ 407 DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n", 408 CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE)); 409 410 /* 411 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0 412 */ 413 414 /* 415 * Ingress credits initial register. Bits [39:32] should be 416 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should 417 * be 0xC0. These are the reset values, and should be set by 418 * HW. 419 */ 420 DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n", 421 CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL)); 422 423 /* 424 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0 425 */ 426 427 /* 428 * Diagnostic register - always zero unless we are debugging. 429 */ 430 DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n", 431 CSR_XR(csr_base, TLU_DIAGNOSTIC)); 432 433 /* 434 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0 435 */ 436 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n", 437 CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED)); 438 439 /* 440 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0 441 */ 442 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n", 443 CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT)); 444 445 /* 446 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0 447 */ 448 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n", 449 CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER)); 450 451 /* 452 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0 453 */ 454 DBG(DBG_TLU, NULL, 455 "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n", 456 CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED)); 457 458 /* 459 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0 460 */ 461 DBG(DBG_TLU, NULL, 462 "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n", 463 CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED)); 464 465 /* 466 * CSR_V TLU's interrupt regs (log, enable, status, clear) 467 */ 468 DBG(DBG_TLU, NULL, 469 "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n", 470 CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE)); 471 472 DBG(DBG_TLU, NULL, 473 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n", 474 CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE)); 475 476 DBG(DBG_TLU, NULL, 477 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n", 478 CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS)); 479 480 DBG(DBG_TLU, NULL, 481 "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n", 482 CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR)); 483 484 /* 485 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0 486 */ 487 DBG(DBG_TLU, NULL, 488 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n", 489 CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG)); 490 491 /* 492 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0 493 */ 494 DBG(DBG_TLU, NULL, 495 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n", 496 CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG)); 497 498 /* 499 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0 500 */ 501 DBG(DBG_TLU, NULL, 502 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n", 503 CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG)); 504 505 /* 506 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0 507 */ 508 DBG(DBG_TLU, NULL, 509 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n", 510 CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG)); 511 512 /* 513 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0 514 */ 515 DBG(DBG_TLU, NULL, 516 "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n", 517 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT)); 518 519 /* 520 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0 521 */ 522 DBG(DBG_TLU, NULL, 523 "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n", 524 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO)); 525 526 /* 527 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0 528 */ 529 DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n", 530 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE)); 531 532 /* 533 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0 534 */ 535 DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n", 536 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO)); 537 538 /* 539 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0 540 */ 541 542 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n", 543 CSR_XR(csr_base, TLU_DEBUG_SELECT_A)); 544 545 /* 546 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0 547 */ 548 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n", 549 CSR_XR(csr_base, TLU_DEBUG_SELECT_B)); 550 551 /* 552 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2 553 */ 554 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n", 555 CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES)); 556 557 /* 558 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0 559 */ 560 561 /* 562 * Bits [14:12] are the Max Read Request Size, which is always 64 563 * bytes which is 000b. Bits [7:5] are Max Payload Size, which 564 * start at 128 bytes which is 000b. This may be revisited if 565 * init_child finds greater values. 566 */ 567 val = 0x0ull; 568 CSR_XS(csr_base, TLU_DEVICE_CONTROL, val); 569 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n", 570 CSR_XR(csr_base, TLU_DEVICE_CONTROL)); 571 572 /* 573 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0 574 */ 575 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n", 576 CSR_XR(csr_base, TLU_DEVICE_STATUS)); 577 578 /* 579 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81 580 */ 581 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n", 582 CSR_XR(csr_base, TLU_LINK_CAPABILITIES)); 583 584 /* 585 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40 586 */ 587 588 /* 589 * The CLOCK bit should be set by OBP if the hardware dictates, 590 * and if it is set then ASPM should be used since then L0s exit 591 * latency should be lower than L1 exit latency. 592 * 593 * Note that we will not enable power management during bringup 594 * since it has not been test and is creating some problems in 595 * simulation. 596 */ 597 val = (1ull << TLU_LINK_CONTROL_CLOCK); 598 599 CSR_XS(csr_base, TLU_LINK_CONTROL, val); 600 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n", 601 CSR_XR(csr_base, TLU_LINK_CONTROL)); 602 603 /* 604 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011 605 */ 606 607 /* 608 * Not sure if HW or OBP will be setting this read only 609 * register. Bit 12 is Clock, and it should always be 1 610 * signifying that the component uses the same physical 611 * clock as the platform. Bits [9:4] are for the width, 612 * with the expected value above signifying a x1 width. 613 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s, 614 * the only speed as yet supported by the PCI-E spec. 615 */ 616 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n", 617 CSR_XR(csr_base, TLU_LINK_STATUS)); 618 619 /* 620 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ??? 621 */ 622 623 /* 624 * Power Limits for the slots. Will be platform 625 * dependent, and OBP will need to set after consulting 626 * with the HW guys. 627 * 628 * Bits [16:15] are power limit scale, which most likely 629 * will be 0b signifying 1x. Bits [14:7] are the Set 630 * Power Limit Value, which is a number which is multiplied 631 * by the power limit scale to get the actual power limit. 632 */ 633 DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n", 634 CSR_XR(csr_base, TLU_SLOT_CAPABILITIES)); 635 636 /* 637 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011 638 */ 639 DBG(DBG_TLU, NULL, 640 "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n", 641 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE)); 642 643 /* 644 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect 645 * Kernel 0x17F0110017F011 646 */ 647 DBG(DBG_TLU, NULL, 648 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n", 649 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE)); 650 651 /* 652 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0 653 */ 654 DBG(DBG_TLU, NULL, 655 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n", 656 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS)); 657 658 /* 659 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0 660 */ 661 DBG(DBG_TLU, NULL, 662 "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n", 663 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR)); 664 665 /* 666 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0 667 */ 668 DBG(DBG_TLU, NULL, 669 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n", 670 CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG)); 671 672 /* 673 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0 674 */ 675 DBG(DBG_TLU, NULL, 676 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n", 677 CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG)); 678 679 /* 680 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0 681 */ 682 DBG(DBG_TLU, NULL, 683 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n", 684 CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG)); 685 686 /* 687 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0 688 */ 689 DBG(DBG_TLU, NULL, 690 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n", 691 CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG)); 692 693 694 /* 695 * CSR_V TLU's CE interrupt regs (log, enable, status, clear) 696 * Plus header logs 697 */ 698 699 /* 700 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1 701 */ 702 DBG(DBG_TLU, NULL, 703 "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n", 704 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE)); 705 706 /* 707 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1 708 */ 709 DBG(DBG_TLU, NULL, 710 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n", 711 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE)); 712 713 /* 714 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0 715 */ 716 DBG(DBG_TLU, NULL, 717 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n", 718 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS)); 719 720 /* 721 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0 722 */ 723 DBG(DBG_TLU, NULL, 724 "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n", 725 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR)); 726 } 727 728 /* ARGSUSED */ 729 static void 730 lpu_init(caddr_t csr_base, pxu_t *pxu_p) 731 { 732 /* Variables used to set the ACKNAK Latency Timer and Replay Timer */ 733 int link_width, max_payload; 734 735 uint64_t val; 736 737 /* 738 * Get the Link Width. See table above LINK_WIDTH_ARR_SIZE #define 739 * Only Link Widths of x1, x4, and x8 are supported. 740 * If any width is reported other than x8, set default to x8. 741 */ 742 link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH); 743 DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width); 744 745 /* 746 * Convert link_width to match timer array configuration. 747 */ 748 switch (link_width) { 749 case 1: 750 link_width = 0; 751 break; 752 case 4: 753 link_width = 1; 754 break; 755 case 8: 756 link_width = 2; 757 break; 758 case 16: 759 link_width = 3; 760 break; 761 default: 762 link_width = 0; 763 } 764 765 /* 766 * Get the Max Payload Size. 767 * See table above LINK_MAX_PKT_ARR_SIZE #define 768 */ 769 max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) & 770 TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT); 771 772 DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n", 773 (0x80 << max_payload)); 774 775 /* Make sure the packet size is not greater than 4096 */ 776 max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ? 777 (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload; 778 779 /* 780 * CSR_V LPU_ID Expect HW 0x0 781 */ 782 783 /* 784 * This register has link id, phy id and gigablaze id. 785 * Should be set by HW. 786 */ 787 DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n", 788 CSR_XR(csr_base, LPU_ID)); 789 790 /* 791 * CSR_V LPU_RESET Expect Kernel 0x0 792 */ 793 794 /* 795 * No reason to have any reset bits high until an error is 796 * detected on the link. 797 */ 798 val = 0ull; 799 CSR_XS(csr_base, LPU_RESET, val); 800 DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n", 801 CSR_XR(csr_base, LPU_RESET)); 802 803 /* 804 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0 805 */ 806 807 /* 808 * Bits [15:8] are Debug B, and bit [7:0] are Debug A. 809 * They are read-only. What do the 8 bits mean, and 810 * how do they get set if they are read only? 811 */ 812 DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n", 813 CSR_XR(csr_base, LPU_DEBUG_STATUS)); 814 815 /* 816 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0 817 */ 818 DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n", 819 CSR_XR(csr_base, LPU_DEBUG_CONFIG)); 820 821 /* 822 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0 823 */ 824 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n", 825 CSR_XR(csr_base, LPU_LTSSM_CONTROL)); 826 827 /* 828 * CSR_V LPU_LINK_STATUS Expect HW 0x101 829 */ 830 831 /* 832 * This register has bits [9:4] for link width, and the 833 * default 0x10, means a width of x16. The problem is 834 * this width is not supported according to the TLU 835 * link status register. 836 */ 837 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n", 838 CSR_XR(csr_base, LPU_LINK_STATUS)); 839 840 /* 841 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0 842 */ 843 DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n", 844 CSR_XR(csr_base, LPU_INTERRUPT_STATUS)); 845 846 /* 847 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0 848 */ 849 DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n", 850 CSR_XR(csr_base, LPU_INTERRUPT_MASK)); 851 852 /* 853 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0 854 */ 855 DBG(DBG_LPU, NULL, 856 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n", 857 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT)); 858 859 /* 860 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0 861 */ 862 DBG(DBG_LPU, NULL, 863 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n", 864 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL)); 865 866 /* 867 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0 868 */ 869 DBG(DBG_LPU, NULL, 870 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n", 871 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1)); 872 873 /* 874 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0 875 */ 876 DBG(DBG_LPU, NULL, 877 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n", 878 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST)); 879 880 /* 881 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0 882 */ 883 DBG(DBG_LPU, NULL, 884 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n", 885 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2)); 886 887 /* 888 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0 889 */ 890 DBG(DBG_LPU, NULL, 891 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n", 892 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST)); 893 894 /* 895 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100 896 */ 897 898 /* 899 * This is another place where Max Payload can be set, 900 * this time for the link layer. It will be set to 901 * 128B, which is the default, but this will need to 902 * be revisited. 903 */ 904 val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN); 905 CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val); 906 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n", 907 CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG)); 908 909 /* 910 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5 911 */ 912 913 /* 914 * Another R/W status register. Bit 3, DL up Status, will 915 * be set high. The link state machine status bits [2:0] 916 * are set to 0x1, but the status bits are not defined in the 917 * PRM. What does 0x1 mean, what others values are possible 918 * and what are thier meanings? 919 * 920 * This register has been giving us problems in simulation. 921 * It has been mentioned that software should not program 922 * any registers with WE bits except during debug. So 923 * this register will no longer be programmed. 924 */ 925 926 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n", 927 CSR_XR(csr_base, LPU_LINK_LAYER_STATUS)); 928 929 /* 930 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 931 */ 932 DBG(DBG_LPU, NULL, 933 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 934 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST)); 935 936 /* 937 * CSR_V LPU Link Layer interrupt regs (mask, status) 938 */ 939 DBG(DBG_LPU, NULL, 940 "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n", 941 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK)); 942 943 DBG(DBG_LPU, NULL, 944 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n", 945 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS)); 946 947 /* 948 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7 949 */ 950 951 /* 952 * The PRM says that only the first two bits will be set 953 * high by default, which will enable flow control for 954 * posted and non-posted updates, but NOT completetion 955 * updates. 956 */ 957 val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) | 958 (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN); 959 CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val); 960 DBG(DBG_LPU, NULL, 961 "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n", 962 CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL)); 963 964 /* 965 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE 966 * Expect OBP 0x1D4C 967 */ 968 969 /* 970 * This should be set by OBP. We'll check to make sure. 971 */ 972 DBG(DBG_LPU, NULL, "lpu_init - " 973 "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n", 974 CSR_XR(csr_base, 975 LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE)); 976 977 /* 978 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ??? 979 */ 980 981 /* 982 * This register has Flow Control Update Timer values for 983 * non-posted and posted requests, bits [30:16] and bits 984 * [14:0], respectively. These are read-only to SW so 985 * either HW or OBP needs to set them. 986 */ 987 DBG(DBG_LPU, NULL, "lpu_init - " 988 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n", 989 CSR_XR(csr_base, 990 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0)); 991 992 /* 993 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ??? 994 */ 995 996 /* 997 * Same as timer0 register above, except for bits [14:0] 998 * have the timer values for completetions. Read-only to 999 * SW; OBP or HW need to set it. 1000 */ 1001 DBG(DBG_LPU, NULL, "lpu_init - " 1002 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n", 1003 CSR_XR(csr_base, 1004 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1)); 1005 1006 /* 1007 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD 1008 */ 1009 val = px_acknak_timer_table[max_payload][link_width]; 1010 CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val); 1011 1012 DBG(DBG_LPU, NULL, "lpu_init - " 1013 "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n", 1014 CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD)); 1015 1016 /* 1017 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0 1018 */ 1019 DBG(DBG_LPU, NULL, 1020 "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n", 1021 CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER)); 1022 1023 /* 1024 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD 1025 */ 1026 val = px_replay_timer_table[max_payload][link_width]; 1027 CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val); 1028 1029 DBG(DBG_LPU, NULL, 1030 "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n", 1031 CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD)); 1032 1033 /* 1034 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0 1035 */ 1036 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n", 1037 CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER)); 1038 1039 /* 1040 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3 1041 */ 1042 DBG(DBG_LPU, NULL, 1043 "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n", 1044 CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS)); 1045 1046 /* 1047 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F 1048 */ 1049 DBG(DBG_LPU, NULL, 1050 "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n", 1051 CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS)); 1052 1053 /* 1054 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000 1055 */ 1056 val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT << 1057 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) | 1058 (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT << 1059 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR)); 1060 1061 CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val); 1062 DBG(DBG_LPU, NULL, 1063 "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n", 1064 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER)); 1065 1066 /* 1067 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0 1068 */ 1069 DBG(DBG_LPU, NULL, 1070 "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n", 1071 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER)); 1072 1073 /* 1074 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580 1075 */ 1076 DBG(DBG_LPU, NULL, 1077 "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n", 1078 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT)); 1079 1080 /* 1081 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000 1082 */ 1083 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n", 1084 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER)); 1085 1086 /* 1087 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF 1088 */ 1089 DBG(DBG_LPU, NULL, 1090 "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n", 1091 CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER)); 1092 1093 /* 1094 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157 1095 */ 1096 1097 /* 1098 * Test only register. Will not be programmed. 1099 */ 1100 DBG(DBG_LPU, NULL, 1101 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n", 1102 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR)); 1103 1104 /* 1105 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000 1106 */ 1107 1108 /* 1109 * Test only register. Will not be programmed. 1110 */ 1111 DBG(DBG_LPU, NULL, 1112 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n", 1113 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS)); 1114 1115 /* 1116 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0 1117 */ 1118 DBG(DBG_LPU, NULL, 1119 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n", 1120 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS)); 1121 1122 /* 1123 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0 1124 */ 1125 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n", 1126 CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL)); 1127 1128 /* 1129 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0 1130 */ 1131 1132 /* 1133 * Test only register. Will not be programmed. 1134 */ 1135 DBG(DBG_LPU, NULL, 1136 "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n", 1137 CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL)); 1138 1139 /* 1140 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0 1141 */ 1142 DBG(DBG_LPU, NULL, 1143 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n", 1144 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0)); 1145 1146 /* 1147 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0 1148 */ 1149 DBG(DBG_LPU, NULL, 1150 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n", 1151 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1)); 1152 1153 /* 1154 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0 1155 */ 1156 DBG(DBG_LPU, NULL, 1157 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n", 1158 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2)); 1159 1160 /* 1161 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0 1162 */ 1163 DBG(DBG_LPU, NULL, 1164 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n", 1165 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3)); 1166 1167 /* 1168 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0 1169 */ 1170 DBG(DBG_LPU, NULL, 1171 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n", 1172 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4)); 1173 1174 /* 1175 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0 1176 */ 1177 1178 /* 1179 * Test only register. Will not be programmed. 1180 */ 1181 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n", 1182 CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT)); 1183 1184 /* 1185 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0 1186 */ 1187 1188 /* 1189 * Test only register. Will not be programmed. 1190 */ 1191 DBG(DBG_LPU, NULL, 1192 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n", 1193 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT)); 1194 1195 /* 1196 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0 1197 */ 1198 1199 /* 1200 * Test only register. 1201 */ 1202 DBG(DBG_LPU, NULL, 1203 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n", 1204 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA)); 1205 1206 /* 1207 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0 1208 */ 1209 DBG(DBG_LPU, NULL, "lpu_init - " 1210 "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n", 1211 CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER)); 1212 1213 /* 1214 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0 1215 */ 1216 1217 /* 1218 * test only register. 1219 */ 1220 DBG(DBG_LPU, NULL, 1221 "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n", 1222 CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED)); 1223 1224 /* 1225 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0 1226 */ 1227 1228 /* 1229 * test only register. 1230 */ 1231 DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n", 1232 CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL)); 1233 1234 /* 1235 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10 1236 */ 1237 DBG(DBG_LPU, NULL, 1238 "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n", 1239 CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION)); 1240 1241 /* 1242 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0 1243 */ 1244 DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n", 1245 CSR_XR(csr_base, LPU_PHY_LAYER_STATUS)); 1246 1247 /* 1248 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1249 */ 1250 DBG(DBG_LPU, NULL, 1251 "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1252 CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST)); 1253 1254 /* 1255 * CSR_V LPU PHY LAYER interrupt regs (mask, status) 1256 */ 1257 DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n", 1258 CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK)); 1259 1260 DBG(DBG_LPU, NULL, 1261 "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n", 1262 CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS)); 1263 1264 /* 1265 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0 1266 */ 1267 1268 /* 1269 * This also needs some explanation. What is the best value 1270 * for the water mark? Test mode enables which test mode? 1271 * Programming model needed for the Receiver Reset Lane N 1272 * bits. 1273 */ 1274 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n", 1275 CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG)); 1276 1277 /* 1278 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0 1279 */ 1280 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n", 1281 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1)); 1282 1283 /* 1284 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0 1285 */ 1286 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n", 1287 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2)); 1288 1289 /* 1290 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0 1291 */ 1292 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n", 1293 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3)); 1294 1295 /* 1296 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1297 */ 1298 DBG(DBG_LPU, NULL, 1299 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1300 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST)); 1301 1302 /* 1303 * CSR_V LPU RX LAYER interrupt regs (mask, status) 1304 */ 1305 DBG(DBG_LPU, NULL, 1306 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n", 1307 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK)); 1308 1309 DBG(DBG_LPU, NULL, 1310 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n", 1311 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS)); 1312 1313 /* 1314 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0 1315 */ 1316 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n", 1317 CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG)); 1318 1319 /* 1320 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0 1321 */ 1322 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n", 1323 CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS)); 1324 1325 /* 1326 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1327 */ 1328 DBG(DBG_LPU, NULL, 1329 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1330 CSR_XR(csr_base, 1331 LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST)); 1332 1333 /* 1334 * CSR_V LPU TX LAYER interrupt regs (mask, status) 1335 */ 1336 DBG(DBG_LPU, NULL, 1337 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n", 1338 CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK)); 1339 1340 DBG(DBG_LPU, NULL, 1341 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n", 1342 CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS)); 1343 1344 /* 1345 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0 1346 */ 1347 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n", 1348 CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2)); 1349 1350 /* 1351 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205 1352 */ 1353 1354 /* 1355 * The new PRM has values for LTSSM 8 ns timeout value and 1356 * LTSSM 20 ns timeout value. But what do these values mean? 1357 * Most of the other bits are questions as well. 1358 * 1359 * As such we will use the reset value. 1360 */ 1361 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n", 1362 CSR_XR(csr_base, LPU_LTSSM_CONFIG1)); 1363 1364 /* 1365 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0 1366 */ 1367 1368 /* 1369 * Again, what does '12 ms timeout value mean'? 1370 */ 1371 val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT << 1372 LPU_LTSSM_CONFIG2_LTSSM_12_TO); 1373 CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val); 1374 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n", 1375 CSR_XR(csr_base, LPU_LTSSM_CONFIG2)); 1376 1377 /* 1378 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120 1379 */ 1380 val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT << 1381 LPU_LTSSM_CONFIG3_LTSSM_2_TO); 1382 CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val); 1383 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n", 1384 CSR_XR(csr_base, LPU_LTSSM_CONFIG3)); 1385 1386 /* 1387 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300 1388 */ 1389 val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT << 1390 LPU_LTSSM_CONFIG4_DATA_RATE) | 1391 (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT << 1392 LPU_LTSSM_CONFIG4_N_FTS)); 1393 CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val); 1394 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n", 1395 CSR_XR(csr_base, LPU_LTSSM_CONFIG4)); 1396 1397 /* 1398 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0 1399 */ 1400 val = 0ull; 1401 CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val); 1402 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n", 1403 CSR_XR(csr_base, LPU_LTSSM_CONFIG5)); 1404 1405 /* 1406 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0 1407 */ 1408 1409 /* 1410 * LTSSM Status registers are test only. 1411 */ 1412 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n", 1413 CSR_XR(csr_base, LPU_LTSSM_STATUS1)); 1414 1415 /* 1416 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0 1417 */ 1418 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n", 1419 CSR_XR(csr_base, LPU_LTSSM_STATUS2)); 1420 1421 /* 1422 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1423 */ 1424 DBG(DBG_LPU, NULL, 1425 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1426 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST)); 1427 1428 /* 1429 * CSR_V LPU LTSSM LAYER interrupt regs (mask, status) 1430 */ 1431 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n", 1432 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK)); 1433 1434 DBG(DBG_LPU, NULL, 1435 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n", 1436 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS)); 1437 1438 /* 1439 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0 1440 */ 1441 DBG(DBG_LPU, NULL, 1442 "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n", 1443 CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE)); 1444 1445 /* 1446 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407 1447 */ 1448 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n", 1449 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1)); 1450 1451 /* 1452 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35 1453 */ 1454 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n", 1455 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2)); 1456 1457 /* 1458 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA 1459 */ 1460 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n", 1461 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3)); 1462 1463 /* 1464 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848 1465 */ 1466 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n", 1467 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4)); 1468 1469 /* 1470 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0 1471 */ 1472 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n", 1473 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS)); 1474 1475 /* 1476 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0 1477 */ 1478 DBG(DBG_LPU, NULL, "lpu_init - " 1479 "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1480 CSR_XR(csr_base, 1481 LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST)); 1482 1483 /* 1484 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status) 1485 */ 1486 DBG(DBG_LPU, NULL, 1487 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n", 1488 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK)); 1489 1490 DBG(DBG_LPU, NULL, 1491 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n", 1492 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS)); 1493 1494 /* 1495 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0 1496 */ 1497 DBG(DBG_LPU, NULL, 1498 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n", 1499 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1)); 1500 1501 /* 1502 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0 1503 */ 1504 DBG(DBG_LPU, NULL, 1505 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n", 1506 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2)); 1507 1508 /* 1509 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0 1510 */ 1511 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n", 1512 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5)); 1513 } 1514 1515 /* ARGSUSED */ 1516 static void 1517 dlu_init(caddr_t csr_base, pxu_t *pxu_p) 1518 { 1519 uint64_t val; 1520 1521 CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull); 1522 DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n", 1523 CSR_XR(csr_base, DLU_INTERRUPT_MASK)); 1524 1525 val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN); 1526 CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val); 1527 DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n", 1528 CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG)); 1529 1530 val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) | 1531 (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN); 1532 1533 CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val); 1534 DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: " 1535 "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL)); 1536 1537 val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT << 1538 DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR); 1539 1540 CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val); 1541 1542 DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: " 1543 "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD)); 1544 } 1545 1546 /* ARGSUSED */ 1547 static void 1548 dmc_init(caddr_t csr_base, pxu_t *pxu_p) 1549 { 1550 uint64_t val; 1551 1552 /* 1553 * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003 1554 */ 1555 1556 val = -1ull; 1557 CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val); 1558 DBG(DBG_DMC, NULL, 1559 "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 1560 CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 1561 1562 /* 1563 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0 1564 */ 1565 DBG(DBG_DMC, NULL, 1566 "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n", 1567 CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS)); 1568 1569 /* 1570 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0 1571 */ 1572 val = 0x0ull; 1573 CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val); 1574 DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n", 1575 CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A)); 1576 1577 /* 1578 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0 1579 */ 1580 val = 0x0ull; 1581 CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val); 1582 DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n", 1583 CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B)); 1584 } 1585 1586 void 1587 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p) 1588 { 1589 uint64_t val; 1590 1591 ilu_init(csr_base, pxu_p); 1592 tlu_init(csr_base, pxu_p); 1593 1594 switch (PX_CHIP_TYPE(pxu_p)) { 1595 case PX_CHIP_OBERON: 1596 dlu_init(csr_base, pxu_p); 1597 break; 1598 case PX_CHIP_FIRE: 1599 lpu_init(csr_base, pxu_p); 1600 break; 1601 default: 1602 DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n", 1603 PX_CHIP_TYPE(pxu_p)); 1604 break; 1605 } 1606 1607 dmc_init(csr_base, pxu_p); 1608 1609 /* 1610 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F 1611 */ 1612 1613 val = -1ull; 1614 CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val); 1615 DBG(DBG_PEC, NULL, 1616 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 1617 CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 1618 1619 /* 1620 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0 1621 */ 1622 DBG(DBG_PEC, NULL, 1623 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n", 1624 CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS)); 1625 } 1626 1627 /* 1628 * Convert a TTE to physical address 1629 */ 1630 static r_addr_t 1631 mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p) 1632 { 1633 uint64_t pa_mask; 1634 1635 switch (PX_CHIP_TYPE(pxu_p)) { 1636 case PX_CHIP_OBERON: 1637 pa_mask = MMU_OBERON_PADDR_MASK; 1638 break; 1639 case PX_CHIP_FIRE: 1640 pa_mask = MMU_FIRE_PADDR_MASK; 1641 break; 1642 default: 1643 DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n", 1644 PX_CHIP_TYPE(pxu_p)); 1645 pa_mask = 0; 1646 break; 1647 } 1648 return ((tte & pa_mask) >> MMU_PAGE_SHIFT); 1649 } 1650 1651 /* 1652 * Return MMU bypass noncache bit for chip 1653 */ 1654 static r_addr_t 1655 mmu_bypass_noncache(pxu_t *pxu_p) 1656 { 1657 r_addr_t bypass_noncache_bit; 1658 1659 switch (PX_CHIP_TYPE(pxu_p)) { 1660 case PX_CHIP_OBERON: 1661 bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE; 1662 break; 1663 case PX_CHIP_FIRE: 1664 bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE; 1665 break; 1666 default: 1667 DBG(DBG_MMU, NULL, 1668 "mmu_bypass_nocache - unknown chip type: 0x%x\n", 1669 PX_CHIP_TYPE(pxu_p)); 1670 bypass_noncache_bit = 0; 1671 break; 1672 } 1673 return (bypass_noncache_bit); 1674 } 1675 1676 /* 1677 * Calculate number of TSB entries for the chip. 1678 */ 1679 /* ARGSUSED */ 1680 static uint_t 1681 mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p) 1682 { 1683 uint64_t tsb_ctrl; 1684 uint_t obp_tsb_entries, obp_tsb_size; 1685 1686 tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL); 1687 1688 obp_tsb_size = tsb_ctrl & 0xF; 1689 1690 obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size); 1691 1692 return (obp_tsb_entries); 1693 } 1694 1695 /* 1696 * Initialize the module, but do not enable interrupts. 1697 */ 1698 void 1699 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p) 1700 { 1701 uint64_t val, i, obp_tsb_pa, *base_tte_addr; 1702 uint_t obp_tsb_entries; 1703 1704 bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size); 1705 1706 /* 1707 * Preserve OBP's TSB 1708 */ 1709 obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK; 1710 1711 obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p); 1712 1713 base_tte_addr = pxu_p->tsb_vaddr + 1714 ((pxu_p->tsb_size >> 3) - obp_tsb_entries); 1715 1716 for (i = 0; i < obp_tsb_entries; i++) { 1717 uint64_t tte = lddphys(obp_tsb_pa + i * 8); 1718 1719 if (!MMU_TTE_VALID(tte)) 1720 continue; 1721 1722 base_tte_addr[i] = tte; 1723 } 1724 1725 /* 1726 * Invalidate the TLB through the diagnostic register. 1727 */ 1728 1729 CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull); 1730 1731 /* 1732 * Configure the Fire MMU TSB Control Register. Determine 1733 * the encoding for either 8KB pages (0) or 64KB pages (1). 1734 * 1735 * Write the most significant 30 bits of the TSB physical address 1736 * and the encoded TSB table size. 1737 */ 1738 for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--) {} 1739 1740 val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) | 1741 ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i); 1742 1743 CSR_XS(csr_base, MMU_TSB_CONTROL, val); 1744 1745 /* 1746 * Enable the MMU, set the "TSB Cache Snoop Enable", 1747 * the "Cache Mode", the "Bypass Enable" and 1748 * the "Translation Enable" bits. 1749 */ 1750 val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS); 1751 val |= ((1ull << MMU_CONTROL_AND_STATUS_SE) 1752 | (MMU_CONTROL_AND_STATUS_ROE_BIT63_ENABLE << 1753 MMU_CONTROL_AND_STATUS_ROE) 1754 | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM) 1755 | (1ull << MMU_CONTROL_AND_STATUS_BE) 1756 | (1ull << MMU_CONTROL_AND_STATUS_TE)); 1757 1758 CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val); 1759 1760 /* 1761 * Read the register here to ensure that the previous writes to 1762 * the Fire MMU registers have been flushed. (Technically, this 1763 * is not entirely necessary here as we will likely do later reads 1764 * during Fire initialization, but it is a small price to pay for 1765 * more modular code.) 1766 */ 1767 (void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS); 1768 1769 /* 1770 * CSR_V TLU's UE interrupt regs (log, enable, status, clear) 1771 * Plus header logs 1772 */ 1773 DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n", 1774 CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE)); 1775 1776 DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n", 1777 CSR_XR(csr_base, MMU_INTERRUPT_ENABLE)); 1778 1779 DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n", 1780 CSR_XR(csr_base, MMU_INTERRUPT_STATUS)); 1781 1782 DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n", 1783 CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR)); 1784 } 1785 1786 /* 1787 * Generic IOMMU Servies 1788 */ 1789 1790 /* ARGSUSED */ 1791 uint64_t 1792 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages, 1793 io_attributes_t io_attr, void *addr, size_t pfn_index, int flags) 1794 { 1795 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1796 uint64_t attr = MMU_TTE_V; 1797 int i; 1798 1799 if (io_attr & PCI_MAP_ATTR_WRITE) 1800 attr |= MMU_TTE_W; 1801 1802 if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) && 1803 (io_attr & PCI_MAP_ATTR_RO)) 1804 attr |= MMU_TTE_RO; 1805 1806 if (attr & MMU_TTE_RO) { 1807 DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x " 1808 "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr); 1809 } 1810 1811 if (flags & MMU_MAP_PFN) { 1812 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)addr; 1813 for (i = 0; i < pages; i++, pfn_index++, tsb_index++) { 1814 px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index); 1815 pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr; 1816 1817 /* 1818 * Oberon will need to flush the corresponding TTEs in 1819 * Cache. We only need to flush every cache line. 1820 * Extra PIO's are expensive. 1821 */ 1822 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1823 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) { 1824 CSR_XS(dev_hdl, 1825 MMU_TTE_CACHE_FLUSH_ADDRESS, 1826 (pxu_p->tsb_paddr+ 1827 (tsb_index*MMU_TTE_SIZE))); 1828 } 1829 } 1830 } 1831 } else { 1832 caddr_t a = (caddr_t)addr; 1833 for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) { 1834 px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a); 1835 pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr; 1836 1837 /* 1838 * Oberon will need to flush the corresponding TTEs in 1839 * Cache. We only need to flush every cache line. 1840 * Extra PIO's are expensive. 1841 */ 1842 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1843 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) { 1844 CSR_XS(dev_hdl, 1845 MMU_TTE_CACHE_FLUSH_ADDRESS, 1846 (pxu_p->tsb_paddr+ 1847 (tsb_index*MMU_TTE_SIZE))); 1848 } 1849 } 1850 } 1851 } 1852 1853 return (H_EOK); 1854 } 1855 1856 /* ARGSUSED */ 1857 uint64_t 1858 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1859 pages_t pages) 1860 { 1861 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1862 int i; 1863 1864 for (i = 0; i < pages; i++, tsb_index++) { 1865 pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE; 1866 1867 /* 1868 * Oberon will need to flush the corresponding TTEs in 1869 * Cache. We only need to flush every cache line. 1870 * Extra PIO's are expensive. 1871 */ 1872 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1873 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) { 1874 CSR_XS(dev_hdl, 1875 MMU_TTE_CACHE_FLUSH_ADDRESS, 1876 (pxu_p->tsb_paddr+ 1877 (tsb_index*MMU_TTE_SIZE))); 1878 } 1879 } 1880 } 1881 1882 return (H_EOK); 1883 } 1884 1885 /* ARGSUSED */ 1886 uint64_t 1887 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1888 io_attributes_t *attr_p, r_addr_t *r_addr_p) 1889 { 1890 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1891 uint64_t *tte_addr; 1892 uint64_t ret = H_EOK; 1893 1894 tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index; 1895 1896 if (*tte_addr & MMU_TTE_V) { 1897 *r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p); 1898 *attr_p = (*tte_addr & MMU_TTE_W) ? 1899 PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ; 1900 } else { 1901 *r_addr_p = 0; 1902 *attr_p = 0; 1903 ret = H_ENOMAP; 1904 } 1905 1906 return (ret); 1907 } 1908 1909 /* ARGSUSED */ 1910 uint64_t 1911 hvio_get_bypass_base(pxu_t *pxu_p) 1912 { 1913 uint64_t base; 1914 1915 switch (PX_CHIP_TYPE(pxu_p)) { 1916 case PX_CHIP_OBERON: 1917 base = MMU_OBERON_BYPASS_BASE; 1918 break; 1919 case PX_CHIP_FIRE: 1920 base = MMU_FIRE_BYPASS_BASE; 1921 break; 1922 default: 1923 DBG(DBG_MMU, NULL, 1924 "hvio_get_bypass_base - unknown chip type: 0x%x\n", 1925 PX_CHIP_TYPE(pxu_p)); 1926 base = 0; 1927 break; 1928 } 1929 return (base); 1930 } 1931 1932 /* ARGSUSED */ 1933 uint64_t 1934 hvio_get_bypass_end(pxu_t *pxu_p) 1935 { 1936 uint64_t end; 1937 1938 switch (PX_CHIP_TYPE(pxu_p)) { 1939 case PX_CHIP_OBERON: 1940 end = MMU_OBERON_BYPASS_END; 1941 break; 1942 case PX_CHIP_FIRE: 1943 end = MMU_FIRE_BYPASS_END; 1944 break; 1945 default: 1946 DBG(DBG_MMU, NULL, 1947 "hvio_get_bypass_end - unknown chip type: 0x%x\n", 1948 PX_CHIP_TYPE(pxu_p)); 1949 end = 0; 1950 break; 1951 } 1952 return (end); 1953 } 1954 1955 /* ARGSUSED */ 1956 uint64_t 1957 hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra, 1958 io_attributes_t attr, io_addr_t *io_addr_p) 1959 { 1960 uint64_t pfn = MMU_BTOP(ra); 1961 1962 *io_addr_p = hvio_get_bypass_base(pxu_p) | ra | 1963 (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p)); 1964 1965 return (H_EOK); 1966 } 1967 1968 /* 1969 * Generic IO Interrupt Servies 1970 */ 1971 1972 /* 1973 * Converts a device specific interrupt number given by the 1974 * arguments devhandle and devino into a system specific ino. 1975 */ 1976 /* ARGSUSED */ 1977 uint64_t 1978 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino, 1979 sysino_t *sysino) 1980 { 1981 if (devino > INTERRUPT_MAPPING_ENTRIES) { 1982 DBG(DBG_IB, NULL, "ino %x is invalid\n", devino); 1983 return (H_ENOINTR); 1984 } 1985 1986 *sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino); 1987 1988 return (H_EOK); 1989 } 1990 1991 /* 1992 * Returns state in intr_valid_state if the interrupt defined by sysino 1993 * is valid (enabled) or not-valid (disabled). 1994 */ 1995 uint64_t 1996 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino, 1997 intr_valid_state_t *intr_valid_state) 1998 { 1999 if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2000 SYSINO_TO_DEVINO(sysino), ENTRIES_V)) { 2001 *intr_valid_state = INTR_VALID; 2002 } else { 2003 *intr_valid_state = INTR_NOTVALID; 2004 } 2005 2006 return (H_EOK); 2007 } 2008 2009 /* 2010 * Sets the 'valid' state of the interrupt defined by 2011 * the argument sysino to the state defined by the 2012 * argument intr_valid_state. 2013 */ 2014 uint64_t 2015 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino, 2016 intr_valid_state_t intr_valid_state) 2017 { 2018 switch (intr_valid_state) { 2019 case INTR_VALID: 2020 CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2021 SYSINO_TO_DEVINO(sysino), ENTRIES_V); 2022 break; 2023 case INTR_NOTVALID: 2024 CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2025 SYSINO_TO_DEVINO(sysino), ENTRIES_V); 2026 break; 2027 default: 2028 return (EINVAL); 2029 } 2030 2031 return (H_EOK); 2032 } 2033 2034 /* 2035 * Returns the current state of the interrupt given by the sysino 2036 * argument. 2037 */ 2038 uint64_t 2039 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino, 2040 intr_state_t *intr_state) 2041 { 2042 intr_state_t state; 2043 2044 state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR, 2045 SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE); 2046 2047 switch (state) { 2048 case INTERRUPT_IDLE_STATE: 2049 *intr_state = INTR_IDLE_STATE; 2050 break; 2051 case INTERRUPT_RECEIVED_STATE: 2052 *intr_state = INTR_RECEIVED_STATE; 2053 break; 2054 case INTERRUPT_PENDING_STATE: 2055 *intr_state = INTR_DELIVERED_STATE; 2056 break; 2057 default: 2058 return (EINVAL); 2059 } 2060 2061 return (H_EOK); 2062 2063 } 2064 2065 /* 2066 * Sets the current state of the interrupt given by the sysino 2067 * argument to the value given in the argument intr_state. 2068 * 2069 * Note: Setting the state to INTR_IDLE clears any pending 2070 * interrupt for sysino. 2071 */ 2072 uint64_t 2073 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino, 2074 intr_state_t intr_state) 2075 { 2076 intr_state_t state; 2077 2078 switch (intr_state) { 2079 case INTR_IDLE_STATE: 2080 state = INTERRUPT_IDLE_STATE; 2081 break; 2082 case INTR_DELIVERED_STATE: 2083 state = INTERRUPT_PENDING_STATE; 2084 break; 2085 default: 2086 return (EINVAL); 2087 } 2088 2089 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, 2090 SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state); 2091 2092 return (H_EOK); 2093 } 2094 2095 /* 2096 * Returns the cpuid that is the current target of the 2097 * interrupt given by the sysino argument. 2098 * 2099 * The cpuid value returned is undefined if the target 2100 * has not been set via intr_settarget. 2101 */ 2102 uint64_t 2103 hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino, 2104 cpuid_t *cpuid) 2105 { 2106 switch (PX_CHIP_TYPE(pxu_p)) { 2107 case PX_CHIP_OBERON: 2108 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2109 SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID); 2110 break; 2111 case PX_CHIP_FIRE: 2112 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 2113 SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID); 2114 break; 2115 default: 2116 DBG(DBG_CB, NULL, "hvio_intr_gettarget - " 2117 "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p)); 2118 return (EINVAL); 2119 } 2120 2121 return (H_EOK); 2122 } 2123 2124 /* 2125 * Set the target cpu for the interrupt defined by the argument 2126 * sysino to the target cpu value defined by the argument cpuid. 2127 */ 2128 uint64_t 2129 hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino, 2130 cpuid_t cpuid) 2131 { 2132 2133 uint64_t val, intr_controller; 2134 uint32_t ino = SYSINO_TO_DEVINO(sysino); 2135 2136 /* 2137 * For now, we assign interrupt controller in a round 2138 * robin fashion. Later, we may need to come up with 2139 * a more efficient assignment algorithm. 2140 */ 2141 intr_controller = 0x1ull << (cpuid % 4); 2142 2143 switch (PX_CHIP_TYPE(pxu_p)) { 2144 case PX_CHIP_OBERON: 2145 val = (((cpuid & 2146 INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) << 2147 INTERRUPT_MAPPING_ENTRIES_T_DESTID) | 2148 ((intr_controller & 2149 INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK) 2150 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM)); 2151 break; 2152 case PX_CHIP_FIRE: 2153 val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) << 2154 INTERRUPT_MAPPING_ENTRIES_T_JPID) | 2155 ((intr_controller & 2156 INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK) 2157 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM)); 2158 break; 2159 default: 2160 DBG(DBG_CB, NULL, "hvio_intr_settarget - " 2161 "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p)); 2162 return (EINVAL); 2163 } 2164 2165 /* For EQ interrupts, set DATA MONDO bit */ 2166 if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) && 2167 (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT))) 2168 val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE); 2169 2170 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val); 2171 2172 return (H_EOK); 2173 } 2174 2175 /* 2176 * MSIQ Functions: 2177 */ 2178 uint64_t 2179 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p) 2180 { 2181 CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0, 2182 (uint64_t)pxu_p->msiq_mapped_p); 2183 DBG(DBG_IB, NULL, 2184 "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n", 2185 CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS)); 2186 2187 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0, 2188 (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p), 2189 pxu_p->portid) << INO_BITS); 2190 DBG(DBG_IB, NULL, "hvio_msiq_init: " 2191 "INTERRUPT_MONDO_DATA_0: 0x%llx\n", 2192 CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0)); 2193 2194 return (H_EOK); 2195 } 2196 2197 uint64_t 2198 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id, 2199 pci_msiq_valid_state_t *msiq_valid_state) 2200 { 2201 uint32_t eq_state; 2202 uint64_t ret = H_EOK; 2203 2204 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2205 msiq_id, ENTRIES_STATE); 2206 2207 switch (eq_state) { 2208 case EQ_IDLE_STATE: 2209 *msiq_valid_state = PCI_MSIQ_INVALID; 2210 break; 2211 case EQ_ACTIVE_STATE: 2212 case EQ_ERROR_STATE: 2213 *msiq_valid_state = PCI_MSIQ_VALID; 2214 break; 2215 default: 2216 ret = H_EIO; 2217 break; 2218 } 2219 2220 return (ret); 2221 } 2222 2223 uint64_t 2224 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id, 2225 pci_msiq_valid_state_t msiq_valid_state) 2226 { 2227 uint64_t ret = H_EOK; 2228 2229 switch (msiq_valid_state) { 2230 case PCI_MSIQ_INVALID: 2231 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR, 2232 msiq_id, ENTRIES_DIS); 2233 break; 2234 case PCI_MSIQ_VALID: 2235 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2236 msiq_id, ENTRIES_EN); 2237 break; 2238 default: 2239 ret = H_EINVAL; 2240 break; 2241 } 2242 2243 return (ret); 2244 } 2245 2246 uint64_t 2247 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id, 2248 pci_msiq_state_t *msiq_state) 2249 { 2250 uint32_t eq_state; 2251 uint64_t ret = H_EOK; 2252 2253 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2254 msiq_id, ENTRIES_STATE); 2255 2256 switch (eq_state) { 2257 case EQ_IDLE_STATE: 2258 case EQ_ACTIVE_STATE: 2259 *msiq_state = PCI_MSIQ_STATE_IDLE; 2260 break; 2261 case EQ_ERROR_STATE: 2262 *msiq_state = PCI_MSIQ_STATE_ERROR; 2263 break; 2264 default: 2265 ret = H_EIO; 2266 } 2267 2268 return (ret); 2269 } 2270 2271 uint64_t 2272 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id, 2273 pci_msiq_state_t msiq_state) 2274 { 2275 uint32_t eq_state; 2276 uint64_t ret = H_EOK; 2277 2278 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2279 msiq_id, ENTRIES_STATE); 2280 2281 switch (eq_state) { 2282 case EQ_IDLE_STATE: 2283 if (msiq_state == PCI_MSIQ_STATE_ERROR) 2284 ret = H_EIO; 2285 break; 2286 case EQ_ACTIVE_STATE: 2287 if (msiq_state == PCI_MSIQ_STATE_ERROR) 2288 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2289 msiq_id, ENTRIES_ENOVERR); 2290 else 2291 ret = H_EIO; 2292 break; 2293 case EQ_ERROR_STATE: 2294 if (msiq_state == PCI_MSIQ_STATE_IDLE) 2295 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR, 2296 msiq_id, ENTRIES_E2I); 2297 else 2298 ret = H_EIO; 2299 break; 2300 default: 2301 ret = H_EIO; 2302 } 2303 2304 return (ret); 2305 } 2306 2307 uint64_t 2308 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id, 2309 msiqhead_t *msiq_head) 2310 { 2311 *msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, 2312 msiq_id, ENTRIES_HEAD); 2313 2314 return (H_EOK); 2315 } 2316 2317 uint64_t 2318 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id, 2319 msiqhead_t msiq_head) 2320 { 2321 CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id, 2322 ENTRIES_HEAD, msiq_head); 2323 2324 return (H_EOK); 2325 } 2326 2327 uint64_t 2328 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id, 2329 msiqtail_t *msiq_tail) 2330 { 2331 *msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL, 2332 msiq_id, ENTRIES_TAIL); 2333 2334 return (H_EOK); 2335 } 2336 2337 /* 2338 * MSI Functions: 2339 */ 2340 uint64_t 2341 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64) 2342 { 2343 /* PCI MEM 32 resources to perform 32 bit MSI transactions */ 2344 CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0, 2345 ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR); 2346 DBG(DBG_IB, NULL, "hvio_msi_init: MSI_32_BIT_ADDRESS: 0x%llx\n", 2347 CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS)); 2348 2349 /* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */ 2350 CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0, 2351 ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR); 2352 DBG(DBG_IB, NULL, "hvio_msi_init: MSI_64_BIT_ADDRESS: 0x%llx\n", 2353 CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS)); 2354 2355 return (H_EOK); 2356 } 2357 2358 uint64_t 2359 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num, 2360 msiqid_t *msiq_id) 2361 { 2362 *msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING, 2363 msi_num, ENTRIES_EQNUM); 2364 2365 return (H_EOK); 2366 } 2367 2368 uint64_t 2369 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num, 2370 msiqid_t msiq_id) 2371 { 2372 CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2373 ENTRIES_EQNUM, msiq_id); 2374 2375 return (H_EOK); 2376 } 2377 2378 uint64_t 2379 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num, 2380 pci_msi_valid_state_t *msi_valid_state) 2381 { 2382 *msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING, 2383 msi_num, ENTRIES_V); 2384 2385 return (H_EOK); 2386 } 2387 2388 uint64_t 2389 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num, 2390 pci_msi_valid_state_t msi_valid_state) 2391 { 2392 uint64_t ret = H_EOK; 2393 2394 switch (msi_valid_state) { 2395 case PCI_MSI_VALID: 2396 CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2397 ENTRIES_V); 2398 break; 2399 case PCI_MSI_INVALID: 2400 CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2401 ENTRIES_V); 2402 break; 2403 default: 2404 ret = H_EINVAL; 2405 } 2406 2407 return (ret); 2408 } 2409 2410 uint64_t 2411 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num, 2412 pci_msi_state_t *msi_state) 2413 { 2414 *msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING, 2415 msi_num, ENTRIES_EQWR_N); 2416 2417 return (H_EOK); 2418 } 2419 2420 uint64_t 2421 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num, 2422 pci_msi_state_t msi_state) 2423 { 2424 uint64_t ret = H_EOK; 2425 2426 switch (msi_state) { 2427 case PCI_MSI_STATE_IDLE: 2428 CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num, 2429 ENTRIES_EQWR_N); 2430 break; 2431 case PCI_MSI_STATE_DELIVERED: 2432 default: 2433 ret = H_EINVAL; 2434 break; 2435 } 2436 2437 return (ret); 2438 } 2439 2440 /* 2441 * MSG Functions: 2442 */ 2443 uint64_t 2444 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2445 msiqid_t *msiq_id) 2446 { 2447 uint64_t ret = H_EOK; 2448 2449 switch (msg_type) { 2450 case PCIE_PME_MSG: 2451 *msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM); 2452 break; 2453 case PCIE_PME_ACK_MSG: 2454 *msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, 2455 EQNUM); 2456 break; 2457 case PCIE_CORR_MSG: 2458 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM); 2459 break; 2460 case PCIE_NONFATAL_MSG: 2461 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, 2462 EQNUM); 2463 break; 2464 case PCIE_FATAL_MSG: 2465 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM); 2466 break; 2467 default: 2468 ret = H_EINVAL; 2469 break; 2470 } 2471 2472 return (ret); 2473 } 2474 2475 uint64_t 2476 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2477 msiqid_t msiq_id) 2478 { 2479 uint64_t ret = H_EOK; 2480 2481 switch (msg_type) { 2482 case PCIE_PME_MSG: 2483 CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id); 2484 break; 2485 case PCIE_PME_ACK_MSG: 2486 CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id); 2487 break; 2488 case PCIE_CORR_MSG: 2489 CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id); 2490 break; 2491 case PCIE_NONFATAL_MSG: 2492 CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id); 2493 break; 2494 case PCIE_FATAL_MSG: 2495 CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id); 2496 break; 2497 default: 2498 ret = H_EINVAL; 2499 break; 2500 } 2501 2502 return (ret); 2503 } 2504 2505 uint64_t 2506 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2507 pcie_msg_valid_state_t *msg_valid_state) 2508 { 2509 uint64_t ret = H_EOK; 2510 2511 switch (msg_type) { 2512 case PCIE_PME_MSG: 2513 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2514 break; 2515 case PCIE_PME_ACK_MSG: 2516 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, 2517 PME_TO_ACK_MAPPING, V); 2518 break; 2519 case PCIE_CORR_MSG: 2520 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2521 break; 2522 case PCIE_NONFATAL_MSG: 2523 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, 2524 ERR_NONFATAL_MAPPING, V); 2525 break; 2526 case PCIE_FATAL_MSG: 2527 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, 2528 V); 2529 break; 2530 default: 2531 ret = H_EINVAL; 2532 break; 2533 } 2534 2535 return (ret); 2536 } 2537 2538 uint64_t 2539 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2540 pcie_msg_valid_state_t msg_valid_state) 2541 { 2542 uint64_t ret = H_EOK; 2543 2544 switch (msg_valid_state) { 2545 case PCIE_MSG_VALID: 2546 switch (msg_type) { 2547 case PCIE_PME_MSG: 2548 CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2549 break; 2550 case PCIE_PME_ACK_MSG: 2551 CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V); 2552 break; 2553 case PCIE_CORR_MSG: 2554 CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2555 break; 2556 case PCIE_NONFATAL_MSG: 2557 CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V); 2558 break; 2559 case PCIE_FATAL_MSG: 2560 CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V); 2561 break; 2562 default: 2563 ret = H_EINVAL; 2564 break; 2565 } 2566 2567 break; 2568 case PCIE_MSG_INVALID: 2569 switch (msg_type) { 2570 case PCIE_PME_MSG: 2571 CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2572 break; 2573 case PCIE_PME_ACK_MSG: 2574 CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V); 2575 break; 2576 case PCIE_CORR_MSG: 2577 CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2578 break; 2579 case PCIE_NONFATAL_MSG: 2580 CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V); 2581 break; 2582 case PCIE_FATAL_MSG: 2583 CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V); 2584 break; 2585 default: 2586 ret = H_EINVAL; 2587 break; 2588 } 2589 break; 2590 default: 2591 ret = H_EINVAL; 2592 } 2593 2594 return (ret); 2595 } 2596 2597 /* 2598 * Suspend/Resume Functions: 2599 * (pec, mmu, ib) 2600 * cb 2601 * Registers saved have all been touched in the XXX_init functions. 2602 */ 2603 uint64_t 2604 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2605 { 2606 uint64_t *config_state; 2607 int total_size; 2608 int i; 2609 2610 if (msiq_suspend(dev_hdl, pxu_p) != H_EOK) 2611 return (H_EIO); 2612 2613 total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE; 2614 config_state = kmem_zalloc(total_size, KM_NOSLEEP); 2615 2616 if (config_state == NULL) { 2617 return (H_EIO); 2618 } 2619 2620 /* 2621 * Soft state for suspend/resume from pxu_t 2622 * uint64_t *pec_config_state; 2623 * uint64_t *mmu_config_state; 2624 * uint64_t *ib_intr_map; 2625 * uint64_t *ib_config_state; 2626 * uint64_t *xcb_config_state; 2627 */ 2628 2629 /* Save the PEC configuration states */ 2630 pxu_p->pec_config_state = config_state; 2631 for (i = 0; i < PEC_KEYS; i++) { 2632 if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) || 2633 (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) { 2634 pxu_p->pec_config_state[i] = 2635 CSR_XR((caddr_t)dev_hdl, 2636 pec_config_state_regs[i].reg); 2637 } 2638 } 2639 2640 /* Save the MMU configuration states */ 2641 pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS; 2642 for (i = 0; i < MMU_KEYS; i++) { 2643 pxu_p->mmu_config_state[i] = 2644 CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]); 2645 } 2646 2647 /* Save the interrupt mapping registers */ 2648 pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS; 2649 for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) { 2650 pxu_p->ib_intr_map[i] = 2651 CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i); 2652 } 2653 2654 /* Save the IB configuration states */ 2655 pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES; 2656 for (i = 0; i < IB_KEYS; i++) { 2657 pxu_p->ib_config_state[i] = 2658 CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]); 2659 } 2660 2661 return (H_EOK); 2662 } 2663 2664 void 2665 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p) 2666 { 2667 int total_size; 2668 sysino_t sysino; 2669 int i; 2670 uint64_t ret; 2671 2672 /* Make sure that suspend actually did occur */ 2673 if (!pxu_p->pec_config_state) { 2674 return; 2675 } 2676 2677 /* Restore IB configuration states */ 2678 for (i = 0; i < IB_KEYS; i++) { 2679 CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i], 2680 pxu_p->ib_config_state[i]); 2681 } 2682 2683 /* 2684 * Restore the interrupt mapping registers 2685 * And make sure the intrs are idle. 2686 */ 2687 for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) { 2688 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i, 2689 ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE); 2690 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i, 2691 pxu_p->ib_intr_map[i]); 2692 } 2693 2694 /* Restore MMU configuration states */ 2695 /* Clear the cache. */ 2696 CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull); 2697 2698 for (i = 0; i < MMU_KEYS; i++) { 2699 CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i], 2700 pxu_p->mmu_config_state[i]); 2701 } 2702 2703 /* Restore PEC configuration states */ 2704 /* Make sure all reset bits are low until error is detected */ 2705 CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull); 2706 2707 for (i = 0; i < PEC_KEYS; i++) { 2708 if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) || 2709 (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) { 2710 CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg, 2711 pxu_p->pec_config_state[i]); 2712 } 2713 } 2714 2715 /* Enable PCI-E interrupt */ 2716 if ((ret = hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, 2717 &sysino)) != H_EOK) { 2718 cmn_err(CE_WARN, 2719 "hvio_resume: hvio_intr_devino_to_sysino failed, " 2720 "ret 0x%lx", ret); 2721 } 2722 2723 if ((ret = hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE)) 2724 != H_EOK) { 2725 cmn_err(CE_WARN, 2726 "hvio_resume: hvio_intr_setstate failed, " 2727 "ret 0x%lx", ret); 2728 } 2729 2730 total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE; 2731 kmem_free(pxu_p->pec_config_state, total_size); 2732 2733 pxu_p->pec_config_state = NULL; 2734 pxu_p->mmu_config_state = NULL; 2735 pxu_p->ib_config_state = NULL; 2736 pxu_p->ib_intr_map = NULL; 2737 2738 msiq_resume(dev_hdl, pxu_p); 2739 } 2740 2741 uint64_t 2742 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2743 { 2744 uint64_t *config_state, *cb_regs; 2745 int i, cb_size, cb_keys; 2746 2747 switch (PX_CHIP_TYPE(pxu_p)) { 2748 case PX_CHIP_OBERON: 2749 cb_size = UBC_SIZE; 2750 cb_keys = UBC_KEYS; 2751 cb_regs = ubc_config_state_regs; 2752 break; 2753 case PX_CHIP_FIRE: 2754 cb_size = JBC_SIZE; 2755 cb_keys = JBC_KEYS; 2756 cb_regs = jbc_config_state_regs; 2757 break; 2758 default: 2759 DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n", 2760 PX_CHIP_TYPE(pxu_p)); 2761 break; 2762 } 2763 2764 config_state = kmem_zalloc(cb_size, KM_NOSLEEP); 2765 2766 if (config_state == NULL) { 2767 return (H_EIO); 2768 } 2769 2770 /* Save the configuration states */ 2771 pxu_p->xcb_config_state = config_state; 2772 for (i = 0; i < cb_keys; i++) { 2773 pxu_p->xcb_config_state[i] = 2774 CSR_XR((caddr_t)dev_hdl, cb_regs[i]); 2775 } 2776 2777 return (H_EOK); 2778 } 2779 2780 void 2781 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl, 2782 devino_t devino, pxu_t *pxu_p) 2783 { 2784 sysino_t sysino; 2785 uint64_t *cb_regs; 2786 int i, cb_size, cb_keys; 2787 uint64_t ret; 2788 2789 switch (PX_CHIP_TYPE(pxu_p)) { 2790 case PX_CHIP_OBERON: 2791 cb_size = UBC_SIZE; 2792 cb_keys = UBC_KEYS; 2793 cb_regs = ubc_config_state_regs; 2794 /* 2795 * No reason to have any reset bits high until an error is 2796 * detected on the link. 2797 */ 2798 CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull); 2799 break; 2800 case PX_CHIP_FIRE: 2801 cb_size = JBC_SIZE; 2802 cb_keys = JBC_KEYS; 2803 cb_regs = jbc_config_state_regs; 2804 /* 2805 * No reason to have any reset bits high until an error is 2806 * detected on the link. 2807 */ 2808 CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull); 2809 break; 2810 default: 2811 DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n", 2812 PX_CHIP_TYPE(pxu_p)); 2813 break; 2814 } 2815 2816 ASSERT(pxu_p->xcb_config_state); 2817 2818 /* Restore the configuration states */ 2819 for (i = 0; i < cb_keys; i++) { 2820 CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i], 2821 pxu_p->xcb_config_state[i]); 2822 } 2823 2824 /* Enable XBC interrupt */ 2825 if ((ret = hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, 2826 &sysino)) != H_EOK) { 2827 cmn_err(CE_WARN, 2828 "hvio_cb_resume: hvio_intr_devino_to_sysino failed, " 2829 "ret 0x%lx", ret); 2830 } 2831 2832 if ((ret = hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE)) 2833 != H_EOK) { 2834 cmn_err(CE_WARN, 2835 "hvio_cb_resume: hvio_intr_setstate failed, " 2836 "ret 0x%lx", ret); 2837 } 2838 2839 kmem_free(pxu_p->xcb_config_state, cb_size); 2840 2841 pxu_p->xcb_config_state = NULL; 2842 } 2843 2844 static uint64_t 2845 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2846 { 2847 size_t bufsz; 2848 volatile uint64_t *cur_p; 2849 int i; 2850 2851 bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE; 2852 if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) == 2853 NULL) 2854 return (H_EIO); 2855 2856 cur_p = pxu_p->msiq_config_state; 2857 2858 /* Save each EQ state */ 2859 for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) 2860 *cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i); 2861 2862 /* Save MSI mapping registers */ 2863 for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++) 2864 *cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i); 2865 2866 /* Save all other MSIQ registers */ 2867 for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++) 2868 *cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]); 2869 return (H_EOK); 2870 } 2871 2872 static void 2873 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p) 2874 { 2875 size_t bufsz; 2876 uint64_t *cur_p, state; 2877 int i; 2878 uint64_t ret; 2879 2880 bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE; 2881 cur_p = pxu_p->msiq_config_state; 2882 /* 2883 * Initialize EQ base address register and 2884 * Interrupt Mondo Data 0 register. 2885 */ 2886 if ((ret = hvio_msiq_init(dev_hdl, pxu_p)) != H_EOK) { 2887 cmn_err(CE_WARN, 2888 "msiq_resume: hvio_msiq_init failed, " 2889 "ret 0x%lx", ret); 2890 } 2891 2892 /* Restore EQ states */ 2893 for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) { 2894 state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK; 2895 if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE)) 2896 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2897 i, ENTRIES_EN); 2898 } 2899 2900 /* Restore MSI mapping */ 2901 for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++) 2902 CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p); 2903 2904 /* 2905 * Restore all other registers. MSI 32 bit address and 2906 * MSI 64 bit address are restored as part of this. 2907 */ 2908 for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++) 2909 CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p); 2910 2911 kmem_free(pxu_p->msiq_config_state, bufsz); 2912 pxu_p->msiq_config_state = NULL; 2913 } 2914 2915 /* 2916 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 2917 * called by px_goto_l23ready. 2918 * returns DDI_SUCCESS or DDI_FAILURE 2919 */ 2920 int 2921 px_send_pme_turnoff(caddr_t csr_base) 2922 { 2923 volatile uint64_t reg; 2924 2925 reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE); 2926 /* If already pending, return failure */ 2927 if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) { 2928 DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit " 2929 "tlu_pme_turn_off_generate = %x\n", reg); 2930 return (DDI_FAILURE); 2931 } 2932 2933 /* write to PME_Turn_off reg to boradcast */ 2934 reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO); 2935 CSR_XS(csr_base, TLU_PME_TURN_OFF_GENERATE, reg); 2936 2937 return (DDI_SUCCESS); 2938 } 2939 2940 /* 2941 * Checks for link being in L1idle state. 2942 * Returns 2943 * DDI_SUCCESS - if the link is in L1idle 2944 * DDI_FAILURE - if the link is not in L1idle 2945 */ 2946 int 2947 px_link_wait4l1idle(caddr_t csr_base) 2948 { 2949 uint8_t ltssm_state; 2950 int ntries = px_max_l1_tries; 2951 2952 while (ntries > 0) { 2953 ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE); 2954 if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0)) 2955 break; 2956 delay(1); 2957 } 2958 DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state); 2959 return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE); 2960 } 2961 2962 /* 2963 * Tranisition the link to L0, after it is down. 2964 */ 2965 int 2966 px_link_retrain(caddr_t csr_base) 2967 { 2968 volatile uint64_t reg; 2969 2970 reg = CSR_XR(csr_base, TLU_CONTROL); 2971 if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) { 2972 DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n"); 2973 return (DDI_FAILURE); 2974 } 2975 2976 /* Clear link down bit in TLU Other Event Clear Status Register. */ 2977 CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P); 2978 2979 /* Clear Drain bit in TLU Status Register */ 2980 CSR_BS(csr_base, TLU_STATUS, DRAIN); 2981 2982 /* Clear Remain in Detect.Quiet bit in TLU Control Register */ 2983 reg = CSR_XR(csr_base, TLU_CONTROL); 2984 reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET); 2985 CSR_XS(csr_base, TLU_CONTROL, reg); 2986 2987 return (DDI_SUCCESS); 2988 } 2989 2990 void 2991 px_enable_detect_quiet(caddr_t csr_base) 2992 { 2993 volatile uint64_t tlu_ctrl; 2994 2995 tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL); 2996 tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET); 2997 CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl); 2998 } 2999 3000 static uint_t 3001 oberon_hp_pwron(caddr_t csr_base) 3002 { 3003 volatile uint64_t reg; 3004 boolean_t link_retry, link_up; 3005 int loop, i; 3006 3007 DBG(DBG_HP, NULL, "oberon_hp_pwron the slot\n"); 3008 3009 /* Check Leaf Reset status */ 3010 reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE); 3011 if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) { 3012 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not reset\n"); 3013 goto fail; 3014 } 3015 3016 /* Check HP Capable */ 3017 if (!CSR_BR(csr_base, TLU_SLOT_CAPABILITIES, HP)) { 3018 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not " 3019 "hotplugable\n"); 3020 goto fail; 3021 } 3022 3023 /* Check Slot status */ 3024 reg = CSR_XR(csr_base, TLU_SLOT_STATUS); 3025 if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) || 3026 (reg & (1ull << TLU_SLOT_STATUS_MRLS))) { 3027 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: slot status %lx\n", 3028 reg); 3029 goto fail; 3030 } 3031 3032 /* Blink power LED, this is done from pciehpc already */ 3033 3034 /* Turn on slot power */ 3035 CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN); 3036 3037 /* power fault detection */ 3038 delay(drv_usectohz(25000)); 3039 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3040 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3041 3042 /* wait to check power state */ 3043 delay(drv_usectohz(25000)); 3044 3045 if (!CSR_BR(csr_base, TLU_SLOT_STATUS, PWFD)) { 3046 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: power fault\n"); 3047 goto fail1; 3048 } 3049 3050 /* power is good */ 3051 CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN); 3052 3053 delay(drv_usectohz(25000)); 3054 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3055 CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN); 3056 3057 /* Turn on slot clock */ 3058 CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN); 3059 3060 link_up = B_FALSE; 3061 link_retry = B_FALSE; 3062 3063 for (loop = 0; (loop < link_retry_count) && (link_up == B_FALSE); 3064 loop++) { 3065 if (link_retry == B_TRUE) { 3066 DBG(DBG_HP, NULL, "oberon_hp_pwron : retry link loop " 3067 "%d\n", loop); 3068 CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS); 3069 CSR_XS(csr_base, FLP_PORT_CONTROL, 0x1); 3070 delay(drv_usectohz(10000)); 3071 CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS); 3072 CSR_BS(csr_base, TLU_DIAGNOSTIC, IFC_DIS); 3073 CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST); 3074 delay(drv_usectohz(50000)); 3075 } 3076 3077 /* Release PCI-E Reset */ 3078 delay(drv_usectohz(wait_perst)); 3079 CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST); 3080 3081 /* 3082 * Open events' mask 3083 * This should be done from pciehpc already 3084 */ 3085 3086 /* Enable PCIE port */ 3087 delay(drv_usectohz(wait_enable_port)); 3088 CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS); 3089 CSR_XS(csr_base, FLP_PORT_CONTROL, 0x20); 3090 3091 /* wait for the link up */ 3092 /* BEGIN CSTYLED */ 3093 for (i = 0; (i < 2) && (link_up == B_FALSE); i++) { 3094 delay(drv_usectohz(link_status_check)); 3095 reg = CSR_XR(csr_base, DLU_LINK_LAYER_STATUS); 3096 3097 if ((((reg >> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS) & 3098 DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK) == 3099 DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE) && 3100 (reg & (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS)) && 3101 ((reg & DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK) 3102 == 3103 DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE)) { 3104 DBG(DBG_HP, NULL, "oberon_hp_pwron : link is up\n"); 3105 link_up = B_TRUE; 3106 } else 3107 link_retry = B_TRUE; 3108 } 3109 /* END CSTYLED */ 3110 } 3111 3112 if (link_up == B_FALSE) { 3113 DBG(DBG_HP, NULL, "oberon_hp_pwron fails to enable " 3114 "PCI-E port\n"); 3115 goto fail2; 3116 } 3117 3118 /* link is up */ 3119 CSR_BC(csr_base, TLU_DIAGNOSTIC, IFC_DIS); 3120 CSR_BS(csr_base, FLP_PORT_ACTIVE_STATUS, TRAIN_ERROR); 3121 CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_P); 3122 CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_S); 3123 CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS); 3124 3125 /* Restore LUP/LDN */ 3126 reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE); 3127 if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P)) 3128 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P; 3129 if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P)) 3130 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P; 3131 if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S)) 3132 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S; 3133 if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S)) 3134 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S; 3135 CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg); 3136 3137 /* 3138 * Initialize Leaf 3139 * SPLS = 00b, SPLV = 11001b, i.e. 25W 3140 */ 3141 reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES); 3142 reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK << 3143 TLU_SLOT_CAPABILITIES_SPLS); 3144 reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK << 3145 TLU_SLOT_CAPABILITIES_SPLV); 3146 reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLV); 3147 CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg); 3148 3149 /* Turn on Power LED */ 3150 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3151 reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK; 3152 reg = pcie_slotctl_pwr_indicator_set(reg, 3153 PCIE_SLOTCTL_INDICATOR_STATE_ON); 3154 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3155 3156 /* Notify to SCF */ 3157 if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON)) 3158 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON); 3159 else 3160 CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON); 3161 3162 /* Wait for one second */ 3163 delay(drv_usectohz(1000000)); 3164 3165 return (DDI_SUCCESS); 3166 3167 fail2: 3168 /* Link up is failed */ 3169 CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS); 3170 CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST); 3171 delay(drv_usectohz(150)); 3172 3173 CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN); 3174 delay(drv_usectohz(100)); 3175 3176 fail1: 3177 CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN); 3178 3179 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3180 3181 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3182 reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK; 3183 reg = pcie_slotctl_pwr_indicator_set(reg, 3184 PCIE_SLOTCTL_INDICATOR_STATE_OFF); 3185 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3186 3187 CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD); 3188 3189 fail: 3190 return ((uint_t)DDI_FAILURE); 3191 } 3192 3193 hrtime_t oberon_leaf_reset_timeout = 120ll * NANOSEC; /* 120 seconds */ 3194 3195 static uint_t 3196 oberon_hp_pwroff(caddr_t csr_base) 3197 { 3198 volatile uint64_t reg; 3199 volatile uint64_t reg_tluue, reg_tluce; 3200 hrtime_t start_time, end_time; 3201 3202 DBG(DBG_HP, NULL, "oberon_hp_pwroff the slot\n"); 3203 3204 /* Blink power LED, this is done from pciehpc already */ 3205 3206 /* Clear Slot Event */ 3207 CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC); 3208 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3209 3210 /* DRN_TR_DIS on */ 3211 CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS); 3212 delay(drv_usectohz(10000)); 3213 3214 /* Disable LUP/LDN */ 3215 reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE); 3216 reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) | 3217 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) | 3218 (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) | 3219 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S)); 3220 CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg); 3221 3222 /* Save the TLU registers */ 3223 reg_tluue = CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE); 3224 reg_tluce = CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE); 3225 /* All clear */ 3226 CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 0); 3227 CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, 0); 3228 3229 /* Disable port */ 3230 CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS); 3231 3232 /* PCIE reset */ 3233 delay(drv_usectohz(10000)); 3234 CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST); 3235 3236 /* PCIE clock stop */ 3237 delay(drv_usectohz(150)); 3238 CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN); 3239 3240 /* Turn off slot power */ 3241 delay(drv_usectohz(100)); 3242 CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN); 3243 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3244 delay(drv_usectohz(25000)); 3245 CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD); 3246 3247 /* write 0 to bit 7 of ILU Error Log Enable Register */ 3248 CSR_BC(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3); 3249 3250 /* Set back TLU registers */ 3251 CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, reg_tluue); 3252 CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, reg_tluce); 3253 3254 /* Power LED off */ 3255 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3256 reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK; 3257 reg = pcie_slotctl_pwr_indicator_set(reg, 3258 PCIE_SLOTCTL_INDICATOR_STATE_OFF); 3259 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3260 3261 /* Indicator LED blink */ 3262 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3263 reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK; 3264 reg = pcie_slotctl_attn_indicator_set(reg, 3265 PCIE_SLOTCTL_INDICATOR_STATE_BLINK); 3266 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3267 3268 /* Notify to SCF */ 3269 if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON)) 3270 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON); 3271 else 3272 CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON); 3273 3274 start_time = gethrtime(); 3275 /* Check Leaf Reset status */ 3276 while (!(CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))) { 3277 if ((end_time = (gethrtime() - start_time)) > 3278 oberon_leaf_reset_timeout) { 3279 cmn_err(CE_WARN, "Oberon leaf reset is not completed, " 3280 "even after waiting %llx ticks", end_time); 3281 3282 break; 3283 } 3284 3285 /* Wait for one second */ 3286 delay(drv_usectohz(1000000)); 3287 } 3288 3289 /* Indicator LED off */ 3290 reg = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3291 reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK; 3292 reg = pcie_slotctl_attn_indicator_set(reg, 3293 PCIE_SLOTCTL_INDICATOR_STATE_OFF); 3294 CSR_XS(csr_base, TLU_SLOT_CONTROL, reg); 3295 3296 return (DDI_SUCCESS); 3297 } 3298 3299 static uint_t 3300 oberon_hpreg_get(void *cookie, off_t off) 3301 { 3302 caddr_t csr_base = *(caddr_t *)cookie; 3303 volatile uint64_t val = -1ull; 3304 3305 switch (off) { 3306 case PCIE_SLOTCAP: 3307 val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES); 3308 break; 3309 case PCIE_SLOTCTL: 3310 val = CSR_XR(csr_base, TLU_SLOT_CONTROL); 3311 3312 /* Get the power state */ 3313 val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) & 3314 (1ull << HOTPLUG_CONTROL_PWREN)) ? 3315 0 : PCIE_SLOTCTL_PWR_CONTROL; 3316 break; 3317 case PCIE_SLOTSTS: 3318 val = CSR_XR(csr_base, TLU_SLOT_STATUS); 3319 break; 3320 case PCIE_LINKCAP: 3321 val = CSR_XR(csr_base, TLU_LINK_CAPABILITIES); 3322 break; 3323 case PCIE_LINKSTS: 3324 val = CSR_XR(csr_base, TLU_LINK_STATUS); 3325 break; 3326 default: 3327 DBG(DBG_HP, NULL, "oberon_hpreg_get(): " 3328 "unsupported offset 0x%lx\n", off); 3329 break; 3330 } 3331 3332 return ((uint_t)val); 3333 } 3334 3335 static uint_t 3336 oberon_hpreg_put(void *cookie, off_t off, uint_t val) 3337 { 3338 caddr_t csr_base = *(caddr_t *)cookie; 3339 volatile uint64_t pwr_state_on, pwr_fault; 3340 uint_t pwr_off, ret = DDI_SUCCESS; 3341 3342 DBG(DBG_HP, NULL, "oberon_hpreg_put 0x%lx: cur %x, new %x\n", 3343 off, oberon_hpreg_get(cookie, off), val); 3344 3345 switch (off) { 3346 case PCIE_SLOTCTL: 3347 /* 3348 * Depending on the current state, insertion or removal 3349 * will go through their respective sequences. 3350 */ 3351 pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN); 3352 pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL; 3353 3354 if (!pwr_off && !pwr_state_on) 3355 ret = oberon_hp_pwron(csr_base); 3356 else if (pwr_off && pwr_state_on) { 3357 pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) & 3358 (1ull << TLU_SLOT_STATUS_PWFD); 3359 3360 if (pwr_fault) { 3361 DBG(DBG_HP, NULL, "oberon_hpreg_put: power " 3362 "off because of power fault\n"); 3363 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN); 3364 } 3365 else 3366 ret = oberon_hp_pwroff(csr_base); 3367 } else 3368 CSR_XS(csr_base, TLU_SLOT_CONTROL, val); 3369 break; 3370 case PCIE_SLOTSTS: 3371 CSR_XS(csr_base, TLU_SLOT_STATUS, val); 3372 break; 3373 default: 3374 DBG(DBG_HP, NULL, "oberon_hpreg_put(): " 3375 "unsupported offset 0x%lx\n", off); 3376 ret = (uint_t)DDI_FAILURE; 3377 break; 3378 } 3379 3380 return (ret); 3381 } 3382 3383 int 3384 hvio_hotplug_init(dev_info_t *dip, void *arg) 3385 { 3386 pciehpc_regops_t *regops = (pciehpc_regops_t *)arg; 3387 px_t *px_p = DIP_TO_STATE(dip); 3388 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 3389 volatile uint64_t reg; 3390 3391 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 3392 if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR], 3393 TLU_SLOT_CAPABILITIES, HP)) { 3394 DBG(DBG_HP, NULL, "%s%d: hotplug capabale not set\n", 3395 ddi_driver_name(dip), ddi_get_instance(dip)); 3396 return (DDI_FAILURE); 3397 } 3398 3399 /* For empty or disconnected slot, disable LUP/LDN */ 3400 if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR], 3401 TLU_SLOT_STATUS, PSD) || 3402 !CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR], 3403 HOTPLUG_CONTROL, PWREN)) { 3404 3405 reg = CSR_XR((caddr_t)pxu_p->px_address[PX_REG_CSR], 3406 TLU_OTHER_EVENT_LOG_ENABLE); 3407 reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) | 3408 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) | 3409 (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) | 3410 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S)); 3411 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_CSR], 3412 TLU_OTHER_EVENT_LOG_ENABLE, reg); 3413 } 3414 3415 regops->get = oberon_hpreg_get; 3416 regops->put = oberon_hpreg_put; 3417 3418 /* cookie is the csr_base */ 3419 regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR]; 3420 3421 return (DDI_SUCCESS); 3422 } 3423 3424 return (DDI_ENOTSUP); 3425 } 3426 3427 int 3428 hvio_hotplug_uninit(dev_info_t *dip) 3429 { 3430 px_t *px_p = DIP_TO_STATE(dip); 3431 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 3432 3433 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 3434 return (DDI_SUCCESS); 3435 3436 return (DDI_FAILURE); 3437 } 3438