1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/cmn_err.h> 31 #include <sys/vmsystm.h> 32 #include <sys/vmem.h> 33 #include <sys/machsystm.h> /* lddphys() */ 34 #include <sys/iommutsb.h> 35 #include <sys/pci.h> 36 #include <pcie_pwr.h> 37 #include <px_obj.h> 38 #include "px_regs.h" 39 #include "px_csr.h" 40 #include "px_lib4u.h" 41 42 /* 43 * Registers that need to be saved and restored during suspend/resume. 44 */ 45 46 /* 47 * Registers in the PEC Module. 48 * LPU_RESET should be set to 0ull during resume 49 */ 50 static uint64_t pec_config_state_regs[] = { 51 PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, 52 ILU_ERROR_LOG_ENABLE, 53 ILU_INTERRUPT_ENABLE, 54 TLU_CONTROL, 55 TLU_OTHER_EVENT_LOG_ENABLE, 56 TLU_OTHER_EVENT_INTERRUPT_ENABLE, 57 TLU_DEVICE_CONTROL, 58 TLU_LINK_CONTROL, 59 TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 60 TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, 61 TLU_CORRECTABLE_ERROR_LOG_ENABLE, 62 TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, 63 LPU_DEBUG_CONFIG, 64 LPU_INTERRUPT_MASK, 65 LPU_LINK_LAYER_CONFIG, 66 LPU_FLOW_CONTROL_UPDATE_CONTROL, 67 LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, 68 LPU_TXLINK_REPLAY_TIMER_THRESHOLD, 69 LPU_REPLAY_BUFFER_MAX_ADDRESS, 70 LPU_TXLINK_RETRY_FIFO_POINTER, 71 LPU_PHY_INTERRUPT_MASK, 72 LPU_RECEIVE_PHY_INTERRUPT_MASK, 73 LPU_TRANSMIT_PHY_INTERRUPT_MASK, 74 LPU_LTSSM_CONFIG2, 75 LPU_LTSSM_CONFIG3, 76 LPU_LTSSM_CONFIG4, 77 LPU_LTSSM_CONFIG5, 78 LPU_LTSSM_INTERRUPT_MASK, 79 LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, 80 DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, 81 DMC_DEBUG_SELECT_FOR_PORT_A, 82 DMC_DEBUG_SELECT_FOR_PORT_B 83 }; 84 #define PEC_SIZE (sizeof (pec_config_state_regs)) 85 #define PEC_KEYS (PEC_SIZE / sizeof (uint64_t)) 86 87 /* 88 * Registers for the MMU module. 89 * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull) 90 */ 91 static uint64_t mmu_config_state_regs[] = { 92 MMU_TSB_CONTROL, 93 MMU_CONTROL_AND_STATUS, 94 MMU_INTERRUPT_ENABLE 95 }; 96 #define MMU_SIZE (sizeof (mmu_config_state_regs)) 97 #define MMU_KEYS (MMU_SIZE / sizeof (uint64_t)) 98 99 /* 100 * Registers for the IB Module 101 */ 102 static uint64_t ib_config_state_regs[] = { 103 IMU_ERROR_LOG_ENABLE, 104 IMU_INTERRUPT_ENABLE 105 }; 106 #define IB_SIZE (sizeof (ib_config_state_regs)) 107 #define IB_KEYS (IB_SIZE / sizeof (uint64_t)) 108 #define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t)) 109 110 /* 111 * Registers for the CB module. 112 * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull) 113 */ 114 static uint64_t cb_config_state_regs[] = { 115 JBUS_PARITY_CONTROL, 116 JBC_FATAL_RESET_ENABLE, 117 JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, 118 JBC_ERROR_LOG_ENABLE, 119 JBC_INTERRUPT_ENABLE 120 }; 121 #define CB_SIZE (sizeof (cb_config_state_regs)) 122 #define CB_KEYS (CB_SIZE / sizeof (uint64_t)) 123 124 static uint64_t msiq_config_other_regs[] = { 125 ERR_COR_MAPPING, 126 ERR_NONFATAL_MAPPING, 127 ERR_FATAL_MAPPING, 128 PM_PME_MAPPING, 129 PME_TO_ACK_MAPPING, 130 MSI_32_BIT_ADDRESS, 131 MSI_64_BIT_ADDRESS 132 }; 133 #define MSIQ_OTHER_SIZE (sizeof (msiq_config_other_regs)) 134 #define MSIQ_OTHER_KEYS (MSIQ_OTHER_SIZE / sizeof (uint64_t)) 135 136 #define MSIQ_STATE_SIZE (EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t)) 137 #define MSIQ_MAPPING_SIZE (MSI_MAPPING_ENTRIES * sizeof (uint64_t)) 138 139 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p); 140 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p); 141 142 /* ARGSUSED */ 143 void 144 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p) 145 { 146 uint64_t val; 147 148 /* Check if we need to enable inverted parity */ 149 val = (1ULL << JBUS_PARITY_CONTROL_P_EN); 150 CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val); 151 DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n", 152 CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL)); 153 154 val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN)| 155 (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) | 156 (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) | 157 (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) | 158 (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) | 159 (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) | 160 (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) | 161 (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN); 162 CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val); 163 DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n", 164 CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE)); 165 166 /* 167 * Enable merge, jbc and dmc interrupts. 168 */ 169 CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull); 170 DBG(DBG_CB, NULL, 171 "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 172 CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 173 174 /* 175 * Enable all error log bits. 176 */ 177 CSR_XS(xbc_csr_base, JBC_ERROR_LOG_ENABLE, -1ull); 178 DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n", 179 CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE)); 180 181 /* 182 * Enable all interrupts. 183 */ 184 CSR_XS(xbc_csr_base, JBC_INTERRUPT_ENABLE, -1ull); 185 DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n", 186 CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE)); 187 188 /* 189 * Emit warning for pending errors and flush the logged error 190 * status register. 191 */ 192 val = CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR); 193 194 CSR_XS(xbc_csr_base, JBC_ERROR_STATUS_CLEAR, -1ull); 195 DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n", 196 CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR)); 197 } 198 199 /* ARGSUSED */ 200 void 201 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p) 202 { 203 uint64_t val; 204 205 /* 206 * CSR_V IMU_ERROR_LOG_ENABLE Expect Kernel 0x3FF 207 */ 208 val = -1ull; 209 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, val); 210 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n", 211 CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE)); 212 213 /* 214 * CSR_V IMU_INTERRUPT_ENABLE Expect Kernel 0x3FF000003FF 215 */ 216 val = -1ull; 217 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, val); 218 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n", 219 CSR_XR(csr_base, IMU_INTERRUPT_ENABLE)); 220 221 /* 222 * CSR_V IMU_INTERRUPT_STATUS Expect HW 0x0 223 */ 224 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n", 225 CSR_XR(csr_base, IMU_INTERRUPT_STATUS)); 226 227 /* 228 * CSR_V IMU_ERROR_STATUS_CLEAR Expect HW 0x0 229 */ 230 DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n", 231 CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR)); 232 } 233 234 /* ARGSUSED */ 235 static void 236 ilu_init(caddr_t csr_base, pxu_t *pxu_p) 237 { 238 uint64_t val; 239 240 /* 241 * CSR_V ILU_ERROR_LOG_ENABLE Expect OBP 0x10 242 */ 243 244 val = 0ull; 245 val = (1ull << ILU_ERROR_LOG_ENABLE_IHB_PE); 246 247 CSR_XS(csr_base, ILU_ERROR_LOG_ENABLE, val); 248 DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n", 249 CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE)); 250 251 /* 252 * CSR_V ILU_INTERRUPT_ENABLE Expect OBP 0x1000000010 253 */ 254 255 val = (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_S) | 256 (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_P); 257 258 CSR_XS(csr_base, ILU_INTERRUPT_ENABLE, val); 259 DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n", 260 CSR_XR(csr_base, ILU_INTERRUPT_ENABLE)); 261 262 /* 263 * CSR_V ILU_INTERRUPT_STATUS Expect HW 0x1000000010 264 */ 265 DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n", 266 CSR_XR(csr_base, ILU_INTERRUPT_STATUS)); 267 268 /* 269 * CSR_V ILU_ERROR_STATUS_CLEAR Expect HW 0x0 270 */ 271 DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n", 272 CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR)); 273 } 274 275 static void 276 tlu_init(caddr_t csr_base, pxu_t *pxu_p) 277 { 278 uint64_t val; 279 280 /* 281 * CSR_V TLU_CONTROL Expect OBP ??? 282 */ 283 284 /* 285 * L0s entry default timer value - 7.0 us 286 * Completion timeout select default value - 67.1 ms and 287 * OBP will set this value. 288 * 289 * Configuration - Bit 0 should always be 0 for upstream port. 290 * Bit 1 is clock - how is this related to the clock bit in TLU 291 * Link Control register? Both are hardware dependent and likely 292 * set by OBP. 293 * 294 * Disable non-posted write bit - ordering by setting 295 * NPWR_EN bit to force serialization of writes. 296 */ 297 val = CSR_XR(csr_base, TLU_CONTROL); 298 299 if (pxu_p->chip_id == FIRE_VER_10) { 300 val |= (TLU_CONTROL_L0S_TIM_DEFAULT << 301 FIRE10_TLU_CONTROL_L0S_TIM) | 302 (1ull << FIRE10_TLU_CONTROL_NPWR_EN) | 303 TLU_CONTROL_CONFIG_DEFAULT; 304 } else { 305 /* Default case is FIRE2.0 */ 306 val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) | 307 (1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT; 308 } 309 310 CSR_XS(csr_base, TLU_CONTROL, val); 311 DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n", 312 CSR_XR(csr_base, TLU_CONTROL)); 313 314 /* 315 * CSR_V TLU_STATUS Expect HW 0x4 316 */ 317 318 /* 319 * Only bit [7:0] are currently defined. Bits [2:0] 320 * are the state, which should likely be in state active, 321 * 100b. Bit three is 'recovery', which is not understood. 322 * All other bits are reserved. 323 */ 324 DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n", 325 CSR_XR(csr_base, TLU_STATUS)); 326 327 /* 328 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0 329 */ 330 DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n", 331 CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE)); 332 333 /* 334 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0 335 */ 336 337 /* 338 * Ingress credits initial register. Bits [39:32] should be 339 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should 340 * be 0xC0. These are the reset values, and should be set by 341 * HW. 342 */ 343 DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n", 344 CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL)); 345 346 /* 347 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0 348 */ 349 350 /* 351 * Diagnostic register - always zero unless we are debugging. 352 */ 353 DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n", 354 CSR_XR(csr_base, TLU_DIAGNOSTIC)); 355 356 /* 357 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0 358 */ 359 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n", 360 CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED)); 361 362 /* 363 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0 364 */ 365 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n", 366 CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT)); 367 368 /* 369 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0 370 */ 371 DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n", 372 CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER)); 373 374 /* 375 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0 376 */ 377 DBG(DBG_TLU, NULL, 378 "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n", 379 CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED)); 380 381 /* 382 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0 383 */ 384 DBG(DBG_TLU, NULL, 385 "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n", 386 CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED)); 387 388 /* 389 * CSR_V TLU_OTHER_EVENT_LOG_ENABLE Expected HW 0x7FF0F 390 */ 391 392 /* 393 * First of a 'guilty five'. Problem now is that the orde 394 * seems to different - some are log enable first then 395 * interrupt enable, others are have them reversed. For 396 * now I'll do them independently before creating a common 397 * framework for them all. 398 */ 399 400 val = -1ull; 401 CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, val); 402 DBG(DBG_TLU, NULL, "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n", 403 CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE)); 404 405 /* 406 * CSR_V TLU_OTHER_EVENT_INTERRUPT_ENABLE OBP 0x7FF0F0007FF0F 407 */ 408 409 /* 410 * Second of five. Bits [55-32] enable secondary other event 411 * interrupt enables, bit [23:0] enable primatry other event 412 * interrupt enables. 413 */ 414 415 val = -1ull; 416 CSR_XS(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE, val); 417 DBG(DBG_TLU, NULL, 418 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n", 419 CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE)); 420 421 /* 422 * CSR_V TLU_OTHER_EVENT_INTERRUPT_STATUS Expect HW 0x0 423 */ 424 DBG(DBG_TLU, NULL, 425 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n", 426 CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS)); 427 428 /* 429 * CSR_V TLU_OTHER_EVENT_STATUS_CLEAR Expect HW 0x0 430 */ 431 DBG(DBG_TLU, NULL, 432 "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n", 433 CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR)); 434 435 /* 436 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0 437 */ 438 DBG(DBG_TLU, NULL, 439 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n", 440 CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG)); 441 442 /* 443 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0 444 */ 445 DBG(DBG_TLU, NULL, 446 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n", 447 CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG)); 448 449 /* 450 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0 451 */ 452 DBG(DBG_TLU, NULL, 453 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n", 454 CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG)); 455 456 /* 457 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0 458 */ 459 DBG(DBG_TLU, NULL, 460 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n", 461 CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG)); 462 463 /* 464 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0 465 */ 466 DBG(DBG_TLU, NULL, 467 "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n", 468 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT)); 469 470 /* 471 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0 472 */ 473 DBG(DBG_TLU, NULL, 474 "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n", 475 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO)); 476 477 /* 478 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0 479 */ 480 DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n", 481 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE)); 482 483 /* 484 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0 485 */ 486 DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n", 487 CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO)); 488 489 /* 490 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0 491 */ 492 493 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n", 494 CSR_XR(csr_base, TLU_DEBUG_SELECT_A)); 495 496 /* 497 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0 498 */ 499 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n", 500 CSR_XR(csr_base, TLU_DEBUG_SELECT_B)); 501 502 /* 503 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2 504 */ 505 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n", 506 CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES)); 507 508 /* 509 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0 510 */ 511 512 /* 513 * Bits [14:12] are the Max Read Request Size, which is always 64 514 * bytes which is 000b. Bits [7:5] are Max Payload Size, which 515 * start at 128 bytes which is 000b. This may be revisited if 516 * init_child finds greater values. 517 */ 518 val = 0x0ull; 519 CSR_XS(csr_base, TLU_DEVICE_CONTROL, val); 520 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n", 521 CSR_XR(csr_base, TLU_DEVICE_CONTROL)); 522 523 /* 524 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0 525 */ 526 DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n", 527 CSR_XR(csr_base, TLU_DEVICE_STATUS)); 528 529 /* 530 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81 531 */ 532 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n", 533 CSR_XR(csr_base, TLU_LINK_CAPABILITIES)); 534 535 /* 536 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40 537 */ 538 539 /* 540 * The CLOCK bit should be set by OBP if the hardware dictates, 541 * and if it is set then ASPM should be used since then L0s exit 542 * latency should be lower than L1 exit latency. 543 * 544 * Note that we will not enable power management during bringup 545 * since it has not been test and is creating some problems in 546 * simulation. 547 */ 548 val = (1ull << TLU_LINK_CONTROL_CLOCK); 549 550 CSR_XS(csr_base, TLU_LINK_CONTROL, val); 551 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n", 552 CSR_XR(csr_base, TLU_LINK_CONTROL)); 553 554 /* 555 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011 556 */ 557 558 /* 559 * Not sure if HW or OBP will be setting this read only 560 * register. Bit 12 is Clock, and it should always be 1 561 * signifying that the component uses the same physical 562 * clock as the platform. Bits [9:4] are for the width, 563 * with the expected value above signifying a x1 width. 564 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s, 565 * the only speed as yet supported by the PCI-E spec. 566 */ 567 DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n", 568 CSR_XR(csr_base, TLU_LINK_STATUS)); 569 570 /* 571 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ??? 572 */ 573 574 /* 575 * Power Limits for the slots. Will be platform 576 * dependent, and OBP will need to set after consulting 577 * with the HW guys. 578 * 579 * Bits [16:15] are power limit scale, which most likely 580 * will be 0b signifying 1x. Bits [14:7] are the Set 581 * Power Limit Value, which is a number which is multiplied 582 * by the power limit scale to get the actual power limit. 583 */ 584 DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n", 585 CSR_XR(csr_base, TLU_SLOT_CAPABILITIES)); 586 587 /* 588 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011 589 */ 590 591 /* 592 * First of a 'guilty five'. See note for Other Event Log. 593 */ 594 val = -1ull; 595 CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, val); 596 DBG(DBG_TLU, NULL, 597 "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n", 598 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE)); 599 600 /* 601 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE 602 * Expect Kernel 0x17F0110017F011 603 */ 604 605 /* 606 * Second of a 'guilty five'. Needs the value in both bits [52:32] 607 * and bits [20:0] for primary and secondary error interrupts. 608 */ 609 val = -1ull; 610 CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, val); 611 DBG(DBG_TLU, NULL, 612 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n", 613 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE)); 614 615 /* 616 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0 617 */ 618 DBG(DBG_TLU, NULL, 619 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n", 620 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS)); 621 622 /* 623 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0 624 */ 625 DBG(DBG_TLU, NULL, 626 "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n", 627 CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR)); 628 629 /* 630 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0 631 */ 632 DBG(DBG_TLU, NULL, 633 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n", 634 CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG)); 635 636 /* 637 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0 638 */ 639 DBG(DBG_TLU, NULL, 640 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n", 641 CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG)); 642 643 /* 644 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0 645 */ 646 DBG(DBG_TLU, NULL, 647 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n", 648 CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG)); 649 650 /* 651 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0 652 */ 653 DBG(DBG_TLU, NULL, 654 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n", 655 CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG)); 656 657 /* 658 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1 659 */ 660 661 /* 662 * Another set of 'guilty five'. 663 */ 664 665 val = -1ull; 666 CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, val); 667 DBG(DBG_TLU, NULL, 668 "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n", 669 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE)); 670 671 /* 672 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1 673 */ 674 675 /* 676 * Bits [44:32] for secondary error, bits [12:0] for primary errors. 677 */ 678 val = -1ull; 679 CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, val); 680 DBG(DBG_TLU, NULL, 681 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n", 682 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE)); 683 684 /* 685 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0 686 */ 687 DBG(DBG_TLU, NULL, 688 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n", 689 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS)); 690 691 /* 692 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0 693 */ 694 DBG(DBG_TLU, NULL, 695 "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n", 696 CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR)); 697 } 698 699 static void 700 lpu_init(caddr_t csr_base, pxu_t *pxu_p) 701 { 702 /* Variables used to set the ACKNAK Latency Timer and Replay Timer */ 703 int link_width, max_payload; 704 705 uint64_t val; 706 707 /* 708 * ACKNAK Latency Threshold Table. 709 * See Fire PRM 1.0 sections 1.2.11.1, table 1-17. 710 */ 711 int fire10_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE] 712 [LINK_WIDTH_ARR_SIZE] = { 713 {0xED, 0x76, 0x70, 0x58}, 714 {0x1A0, 0x76, 0x6B, 0x61}, 715 {0x22F, 0x9A, 0x6A, 0x6A}, 716 {0x42F, 0x11A, 0x96, 0x96}, 717 {0x82F, 0x21A, 0x116, 0x116}, 718 {0x102F, 0x41A, 0x216, 0x216} 719 }; 720 721 /* 722 * TxLink Replay Timer Latency Table 723 * See Fire PRM 1.0 sections 1.2.11.2, table 1-18. 724 */ 725 int fire10_replay_timer_table[LINK_MAX_PKT_ARR_SIZE] 726 [LINK_WIDTH_ARR_SIZE] = { 727 {0x2C7, 0x108, 0xF6, 0xBD}, 728 {0x4E0, 0x162, 0x141, 0xF1}, 729 {0x68D, 0x1CE, 0x102, 0x102}, 730 {0xC8D, 0x34E, 0x1C2, 0x1C2}, 731 {0x188D, 0x64E, 0x342, 0x342}, 732 {0x308D, 0xC4E, 0x642, 0x642} 733 }; 734 735 /* 736 * ACKNAK Latency Threshold Table. 737 * See Fire PRM 2.0 section 1.2.12.2, table 1-17. 738 */ 739 int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = { 740 {0xED, 0x49, 0x43, 0x30}, 741 {0x1A0, 0x76, 0x6B, 0x48}, 742 {0x22F, 0x9A, 0x56, 0x56}, 743 {0x42F, 0x11A, 0x96, 0x96}, 744 {0x82F, 0x21A, 0x116, 0x116}, 745 {0x102F, 0x41A, 0x216, 0x216} 746 }; 747 748 /* 749 * TxLink Replay Timer Latency Table 750 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18. 751 */ 752 int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = { 753 {0x379, 0x112, 0xFC, 0xB4}, 754 {0x618, 0x1BA, 0x192, 0x10E}, 755 {0x831, 0x242, 0x143, 0x143}, 756 {0xFB1, 0x422, 0x233, 0x233}, 757 {0x1EB0, 0x7E1, 0x412, 0x412}, 758 {0x3CB0, 0xF61, 0x7D2, 0x7D2} 759 }; 760 /* 761 * Get the Link Width. See table above LINK_WIDTH_ARR_SIZE #define 762 * Only Link Widths of x1, x4, and x8 are supported. 763 * If any width is reported other than x8, set default to x8. 764 */ 765 link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH); 766 DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width); 767 768 /* 769 * Convert link_width to match timer array configuration. 770 */ 771 switch (link_width) { 772 case 1: 773 link_width = 0; 774 break; 775 case 4: 776 link_width = 1; 777 break; 778 case 8: 779 link_width = 2; 780 break; 781 case 16: 782 link_width = 3; 783 break; 784 default: 785 link_width = 0; 786 } 787 788 /* 789 * Get the Max Payload Size. 790 * See table above LINK_MAX_PKT_ARR_SIZE #define 791 */ 792 if (pxu_p->chip_id == FIRE_VER_10) { 793 max_payload = CSR_FR(csr_base, 794 FIRE10_LPU_LINK_LAYER_CONFIG, MAX_PAYLOAD); 795 } else { 796 /* Default case is FIRE2.0 */ 797 max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) & 798 TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT); 799 } 800 801 DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n", 802 (0x80 << max_payload)); 803 804 /* Make sure the packet size is not greater than 4096 */ 805 max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ? 806 (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload; 807 808 /* 809 * CSR_V LPU_ID Expect HW 0x0 810 */ 811 812 /* 813 * This register has link id, phy id and gigablaze id. 814 * Should be set by HW. 815 */ 816 DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n", 817 CSR_XR(csr_base, LPU_ID)); 818 819 /* 820 * CSR_V LPU_RESET Expect Kernel 0x0 821 */ 822 823 /* 824 * No reason to have any reset bits high until an error is 825 * detected on the link. 826 */ 827 val = 0ull; 828 CSR_XS(csr_base, LPU_RESET, val); 829 DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n", 830 CSR_XR(csr_base, LPU_RESET)); 831 832 /* 833 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0 834 */ 835 836 /* 837 * Bits [15:8] are Debug B, and bit [7:0] are Debug A. 838 * They are read-only. What do the 8 bits mean, and 839 * how do they get set if they are read only? 840 */ 841 DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n", 842 CSR_XR(csr_base, LPU_DEBUG_STATUS)); 843 844 /* 845 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0 846 */ 847 DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n", 848 CSR_XR(csr_base, LPU_DEBUG_CONFIG)); 849 850 /* 851 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0 852 */ 853 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n", 854 CSR_XR(csr_base, LPU_LTSSM_CONTROL)); 855 856 /* 857 * CSR_V LPU_LINK_STATUS Expect HW 0x101 858 */ 859 860 /* 861 * This register has bits [9:4] for link width, and the 862 * default 0x10, means a width of x16. The problem is 863 * this width is not supported according to the TLU 864 * link status register. 865 */ 866 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n", 867 CSR_XR(csr_base, LPU_LINK_STATUS)); 868 869 /* 870 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0 871 */ 872 DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n", 873 CSR_XR(csr_base, LPU_INTERRUPT_STATUS)); 874 875 /* 876 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0 877 */ 878 val = 0ull; 879 CSR_XS(csr_base, LPU_INTERRUPT_MASK, val); 880 DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n", 881 CSR_XR(csr_base, LPU_INTERRUPT_MASK)); 882 883 /* 884 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0 885 */ 886 DBG(DBG_LPU, NULL, 887 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n", 888 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT)); 889 890 /* 891 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0 892 */ 893 DBG(DBG_LPU, NULL, 894 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n", 895 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL)); 896 897 /* 898 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0 899 */ 900 DBG(DBG_LPU, NULL, 901 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n", 902 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1)); 903 904 /* 905 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0 906 */ 907 DBG(DBG_LPU, NULL, 908 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n", 909 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST)); 910 911 /* 912 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0 913 */ 914 DBG(DBG_LPU, NULL, 915 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n", 916 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2)); 917 918 /* 919 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0 920 */ 921 DBG(DBG_LPU, NULL, 922 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n", 923 CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST)); 924 925 /* 926 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100 927 */ 928 929 /* 930 * This is another place where Max Payload can be set, 931 * this time for the link layer. It will be set to 932 * 128B, which is the default, but this will need to 933 * be revisited. 934 */ 935 val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN); 936 CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val); 937 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n", 938 CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG)); 939 940 /* 941 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5 942 */ 943 944 /* 945 * Another R/W status register. Bit 3, DL up Status, will 946 * be set high. The link state machine status bits [2:0] 947 * are set to 0x1, but the status bits are not defined in the 948 * PRM. What does 0x1 mean, what others values are possible 949 * and what are thier meanings? 950 * 951 * This register has been giving us problems in simulation. 952 * It has been mentioned that software should not program 953 * any registers with WE bits except during debug. So 954 * this register will no longer be programmed. 955 */ 956 957 DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n", 958 CSR_XR(csr_base, LPU_LINK_LAYER_STATUS)); 959 960 /* 961 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS Expect HW 0x0 962 */ 963 DBG(DBG_LPU, NULL, 964 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n", 965 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS)); 966 967 /* 968 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 969 */ 970 DBG(DBG_LPU, NULL, 971 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 972 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST)); 973 974 /* 975 * CSR_V LPU_LINK_LAYER_INTERRUPT_MASK Expect OBP 0x0 976 */ 977 DBG(DBG_LPU, NULL, 978 "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n", 979 CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK)); 980 981 /* 982 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7 983 */ 984 985 /* 986 * The PRM says that only the first two bits will be set 987 * high by default, which will enable flow control for 988 * posted and non-posted updates, but NOT completetion 989 * updates. 990 */ 991 val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) | 992 (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN); 993 CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val); 994 DBG(DBG_LPU, NULL, 995 "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n", 996 CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL)); 997 998 /* 999 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE 1000 * Expect OBP 0x1D4C 1001 */ 1002 1003 /* 1004 * This should be set by OBP. We'll check to make sure. 1005 */ 1006 DBG(DBG_LPU, NULL, 1007 "lpu_init - " 1008 "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n", 1009 CSR_XR(csr_base, 1010 LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE)); 1011 1012 /* 1013 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ??? 1014 */ 1015 1016 /* 1017 * This register has Flow Control Update Timer values for 1018 * non-posted and posted requests, bits [30:16] and bits 1019 * [14:0], respectively. These are read-only to SW so 1020 * either HW or OBP needs to set them. 1021 */ 1022 DBG(DBG_LPU, NULL, 1023 "lpu_init - " 1024 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n", 1025 CSR_XR(csr_base, 1026 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0)); 1027 1028 /* 1029 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ??? 1030 */ 1031 1032 /* 1033 * Same as timer0 register above, except for bits [14:0] 1034 * have the timer values for completetions. Read-only to 1035 * SW; OBP or HW need to set it. 1036 */ 1037 DBG(DBG_LPU, NULL, 1038 "lpu_init - " 1039 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n", 1040 CSR_XR(csr_base, 1041 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1)); 1042 1043 /* 1044 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD 1045 */ 1046 if (pxu_p->chip_id == FIRE_VER_10) { 1047 val = fire10_acknak_timer_table[max_payload][link_width]; 1048 } else { 1049 /* Default case is FIRE2.0 */ 1050 val = acknak_timer_table[max_payload][link_width]; 1051 } 1052 1053 CSR_XS(csr_base, 1054 LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val); 1055 1056 DBG(DBG_LPU, NULL, "lpu_init - " 1057 "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n", 1058 CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD)); 1059 1060 /* 1061 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0 1062 */ 1063 DBG(DBG_LPU, NULL, 1064 "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n", 1065 CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER)); 1066 1067 /* 1068 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD 1069 */ 1070 if (pxu_p->chip_id == FIRE_VER_10) { 1071 val = fire10_replay_timer_table[max_payload][link_width]; 1072 } else { 1073 /* Default case is FIRE2.0 */ 1074 val = replay_timer_table[max_payload][link_width]; 1075 } 1076 1077 CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val); 1078 1079 DBG(DBG_LPU, NULL, 1080 "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n", 1081 CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD)); 1082 1083 /* 1084 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0 1085 */ 1086 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n", 1087 CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER)); 1088 1089 /* 1090 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3 1091 */ 1092 DBG(DBG_LPU, NULL, 1093 "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n", 1094 CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS)); 1095 1096 /* 1097 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F 1098 */ 1099 DBG(DBG_LPU, NULL, 1100 "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n", 1101 CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS)); 1102 1103 /* 1104 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000 1105 */ 1106 val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT << 1107 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) | 1108 (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT << 1109 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR)); 1110 1111 CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val); 1112 DBG(DBG_LPU, NULL, 1113 "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n", 1114 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER)); 1115 1116 /* 1117 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0 1118 */ 1119 DBG(DBG_LPU, NULL, 1120 "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n", 1121 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER)); 1122 1123 /* 1124 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580 1125 */ 1126 DBG(DBG_LPU, NULL, 1127 "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n", 1128 CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT)); 1129 1130 /* 1131 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000 1132 */ 1133 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n", 1134 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER)); 1135 1136 /* 1137 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF 1138 */ 1139 DBG(DBG_LPU, NULL, 1140 "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n", 1141 CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER)); 1142 1143 /* 1144 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157 1145 */ 1146 1147 /* 1148 * Test only register. Will not be programmed. 1149 */ 1150 DBG(DBG_LPU, NULL, 1151 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n", 1152 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR)); 1153 1154 /* 1155 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000 1156 */ 1157 1158 /* 1159 * Test only register. Will not be programmed. 1160 */ 1161 DBG(DBG_LPU, NULL, 1162 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n", 1163 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS)); 1164 1165 /* 1166 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0 1167 */ 1168 DBG(DBG_LPU, NULL, 1169 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n", 1170 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS)); 1171 1172 /* 1173 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0 1174 */ 1175 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n", 1176 CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL)); 1177 1178 /* 1179 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0 1180 */ 1181 1182 /* 1183 * Test only register. Will not be programmed. 1184 */ 1185 DBG(DBG_LPU, NULL, 1186 "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n", 1187 CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL)); 1188 1189 /* 1190 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0 1191 */ 1192 DBG(DBG_LPU, NULL, 1193 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n", 1194 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0)); 1195 1196 /* 1197 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0 1198 */ 1199 DBG(DBG_LPU, NULL, 1200 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n", 1201 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1)); 1202 1203 /* 1204 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0 1205 */ 1206 DBG(DBG_LPU, NULL, 1207 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n", 1208 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2)); 1209 1210 /* 1211 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0 1212 */ 1213 DBG(DBG_LPU, NULL, 1214 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n", 1215 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3)); 1216 1217 /* 1218 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0 1219 */ 1220 DBG(DBG_LPU, NULL, 1221 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n", 1222 CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4)); 1223 1224 /* 1225 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0 1226 */ 1227 1228 /* 1229 * Test only register. Will not be programmed. 1230 */ 1231 DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n", 1232 CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT)); 1233 1234 /* 1235 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0 1236 */ 1237 1238 /* 1239 * Test only register. Will not be programmed. 1240 */ 1241 DBG(DBG_LPU, NULL, 1242 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n", 1243 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT)); 1244 1245 /* 1246 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0 1247 */ 1248 1249 /* 1250 * Test only register. 1251 */ 1252 DBG(DBG_LPU, NULL, 1253 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n", 1254 CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA)); 1255 1256 /* 1257 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0 1258 */ 1259 DBG(DBG_LPU, NULL, "lpu_init - " 1260 "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n", 1261 CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER)); 1262 1263 /* 1264 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0 1265 */ 1266 1267 /* 1268 * test only register. 1269 */ 1270 DBG(DBG_LPU, NULL, 1271 "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n", 1272 CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED)); 1273 1274 /* 1275 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0 1276 */ 1277 1278 /* 1279 * test only register. 1280 */ 1281 DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n", 1282 CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL)); 1283 1284 /* 1285 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10 1286 */ 1287 DBG(DBG_LPU, NULL, 1288 "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n", 1289 CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION)); 1290 1291 /* 1292 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0 1293 */ 1294 DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n", 1295 CSR_XR(csr_base, LPU_PHY_LAYER_STATUS)); 1296 1297 /* 1298 * CSR_V LPU_PHY_LAYER_INTERRUPT_AND_STATUS Expect HW 0x0 1299 */ 1300 DBG(DBG_LPU, NULL, 1301 "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n", 1302 CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS)); 1303 1304 /* 1305 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1306 */ 1307 DBG(DBG_LPU, NULL, 1308 "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1309 CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST)); 1310 1311 /* 1312 * CSR_V LPU_PHY_INTERRUPT_MASK Expect HW 0x0 1313 */ 1314 1315 val = 0ull; 1316 CSR_XS(csr_base, LPU_PHY_INTERRUPT_MASK, val); 1317 DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n", 1318 CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK)); 1319 1320 /* 1321 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0 1322 */ 1323 1324 /* 1325 * This also needs some explanation. What is the best value 1326 * for the water mark? Test mode enables which test mode? 1327 * Programming model needed for the Receiver Reset Lane N 1328 * bits. 1329 */ 1330 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n", 1331 CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG)); 1332 1333 /* 1334 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0 1335 */ 1336 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n", 1337 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1)); 1338 1339 /* 1340 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0 1341 */ 1342 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n", 1343 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2)); 1344 1345 /* 1346 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0 1347 */ 1348 DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n", 1349 CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3)); 1350 1351 /* 1352 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS Expect HW 0x0 1353 */ 1354 DBG(DBG_LPU, NULL, 1355 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n", 1356 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS)); 1357 1358 /* 1359 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1360 */ 1361 DBG(DBG_LPU, NULL, 1362 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1363 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST)); 1364 1365 /* 1366 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_MASK Expect OBP 0x0 1367 */ 1368 val = 0ull; 1369 CSR_XS(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK, val); 1370 DBG(DBG_LPU, NULL, 1371 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n", 1372 CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK)); 1373 1374 /* 1375 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0 1376 */ 1377 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n", 1378 CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG)); 1379 1380 /* 1381 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0 1382 */ 1383 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n", 1384 CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS)); 1385 1386 /* 1387 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS Expect HW 0x0 1388 */ 1389 DBG(DBG_LPU, NULL, 1390 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n", 1391 CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS)); 1392 1393 /* 1394 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1395 */ 1396 DBG(DBG_LPU, NULL, 1397 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1398 CSR_XR(csr_base, 1399 LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST)); 1400 1401 /* 1402 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_MASK Expect HW 0x0 1403 */ 1404 val = 0ull; 1405 CSR_XS(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK, val); 1406 DBG(DBG_LPU, NULL, 1407 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n", 1408 CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK)); 1409 1410 /* 1411 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0 1412 */ 1413 DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n", 1414 CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2)); 1415 1416 /* 1417 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205 1418 */ 1419 1420 /* 1421 * The new PRM has values for LTSSM 8 ns timeout value and 1422 * LTSSM 20 ns timeout value. But what do these values mean? 1423 * Most of the other bits are questions as well. 1424 * 1425 * As such we will use the reset value. 1426 */ 1427 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n", 1428 CSR_XR(csr_base, LPU_LTSSM_CONFIG1)); 1429 1430 /* 1431 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0 1432 */ 1433 1434 /* 1435 * Again, what does '12 ms timeout value mean'? 1436 */ 1437 val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT << 1438 LPU_LTSSM_CONFIG2_LTSSM_12_TO); 1439 CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val); 1440 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n", 1441 CSR_XR(csr_base, LPU_LTSSM_CONFIG2)); 1442 1443 /* 1444 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120 1445 */ 1446 val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT << 1447 LPU_LTSSM_CONFIG3_LTSSM_2_TO); 1448 CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val); 1449 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n", 1450 CSR_XR(csr_base, LPU_LTSSM_CONFIG3)); 1451 1452 /* 1453 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300 1454 * 1455 * XXX fix LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT & 1456 * LPU_LTSSM_CONFIG4_N_FTS_DEFAULT in px_pec.h 1457 */ 1458 val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT << 1459 LPU_LTSSM_CONFIG4_DATA_RATE) | 1460 (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT << 1461 LPU_LTSSM_CONFIG4_N_FTS)); 1462 CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val); 1463 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n", 1464 CSR_XR(csr_base, LPU_LTSSM_CONFIG4)); 1465 1466 /* 1467 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0 1468 */ 1469 val = 0ull; 1470 CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val); 1471 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n", 1472 CSR_XR(csr_base, LPU_LTSSM_CONFIG5)); 1473 1474 /* 1475 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0 1476 */ 1477 1478 /* 1479 * LTSSM Status registers are test only. 1480 */ 1481 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n", 1482 CSR_XR(csr_base, LPU_LTSSM_STATUS1)); 1483 1484 /* 1485 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0 1486 */ 1487 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n", 1488 CSR_XR(csr_base, LPU_LTSSM_STATUS2)); 1489 1490 /* 1491 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS Expect HW 0x0 1492 */ 1493 DBG(DBG_LPU, NULL, 1494 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n", 1495 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS)); 1496 1497 /* 1498 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0 1499 */ 1500 DBG(DBG_LPU, NULL, 1501 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1502 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST)); 1503 1504 /* 1505 * CSR_V LPU_LTSSM_INTERRUPT_MASK Expect HW 0x0 1506 */ 1507 val = 0ull; 1508 CSR_XS(csr_base, LPU_LTSSM_INTERRUPT_MASK, val); 1509 DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n", 1510 CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK)); 1511 1512 /* 1513 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0 1514 */ 1515 DBG(DBG_LPU, NULL, 1516 "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n", 1517 CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE)); 1518 1519 /* 1520 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407 1521 */ 1522 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n", 1523 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1)); 1524 1525 /* 1526 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35 1527 */ 1528 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n", 1529 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2)); 1530 1531 /* 1532 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA 1533 */ 1534 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n", 1535 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3)); 1536 1537 /* 1538 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848 1539 */ 1540 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n", 1541 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4)); 1542 1543 /* 1544 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0 1545 */ 1546 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n", 1547 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS)); 1548 1549 /* 1550 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS Expect OBP 0x0 1551 */ 1552 DBG(DBG_LPU, NULL, 1553 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n", 1554 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS)); 1555 1556 /* 1557 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0 1558 */ 1559 DBG(DBG_LPU, NULL, 1560 "lpu_init - " 1561 "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n", 1562 CSR_XR(csr_base, 1563 LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST)); 1564 1565 /* 1566 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_MASK Expect OBP 0x0 1567 */ 1568 1569 /* 1570 * Reset value masks all interrupts. This will be changed 1571 * to enable all interrupts. 1572 */ 1573 val = 0x0ull; 1574 CSR_XS(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, val); 1575 DBG(DBG_LPU, NULL, 1576 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n", 1577 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK)); 1578 1579 /* 1580 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0 1581 */ 1582 DBG(DBG_LPU, NULL, 1583 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n", 1584 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1)); 1585 1586 /* 1587 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0 1588 */ 1589 DBG(DBG_LPU, NULL, 1590 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n", 1591 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2)); 1592 1593 /* 1594 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0 1595 */ 1596 DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n", 1597 CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5)); 1598 } 1599 1600 /* ARGSUSED */ 1601 static void 1602 dmc_init(caddr_t csr_base, pxu_t *pxu_p) 1603 { 1604 uint64_t val; 1605 1606 /* 1607 * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003 1608 */ 1609 1610 val = -1ull; 1611 CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val); 1612 DBG(DBG_DMC, NULL, 1613 "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 1614 CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 1615 1616 /* 1617 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0 1618 */ 1619 DBG(DBG_DMC, NULL, 1620 "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n", 1621 CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS)); 1622 1623 /* 1624 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0 1625 */ 1626 val = 0x0ull; 1627 CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val); 1628 DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n", 1629 CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A)); 1630 1631 /* 1632 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0 1633 */ 1634 val = 0x0ull; 1635 CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val); 1636 DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n", 1637 CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B)); 1638 } 1639 1640 void 1641 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p) 1642 { 1643 uint64_t val; 1644 1645 ilu_init(csr_base, pxu_p); 1646 tlu_init(csr_base, pxu_p); 1647 lpu_init(csr_base, pxu_p); 1648 dmc_init(csr_base, pxu_p); 1649 1650 /* 1651 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F 1652 */ 1653 1654 val = -1ull; 1655 CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val); 1656 DBG(DBG_PEC, NULL, 1657 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n", 1658 CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE)); 1659 1660 /* 1661 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0 1662 */ 1663 DBG(DBG_PEC, NULL, 1664 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n", 1665 CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS)); 1666 } 1667 1668 void 1669 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p) 1670 { 1671 uint64_t val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr; 1672 uint_t obp_tsb_entries, obp_tsb_size; 1673 1674 bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size); 1675 1676 /* 1677 * Preserve OBP's TSB 1678 */ 1679 val = CSR_XR(csr_base, MMU_TSB_CONTROL); 1680 1681 tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL); 1682 1683 obp_tsb_pa = tsb_ctrl & 0x7FFFFFFE000; 1684 obp_tsb_size = tsb_ctrl & 0xF; 1685 1686 obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size); 1687 1688 base_tte_addr = pxu_p->tsb_vaddr + 1689 ((pxu_p->tsb_size >> 3) - obp_tsb_entries); 1690 1691 for (i = 0; i < obp_tsb_entries; i++) { 1692 uint64_t tte = lddphys(obp_tsb_pa + i * 8); 1693 1694 if (!MMU_TTE_VALID(tte)) 1695 continue; 1696 1697 base_tte_addr[i] = tte; 1698 } 1699 1700 /* 1701 * Invalidate the TLB through the diagnostic register. 1702 */ 1703 1704 CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull); 1705 1706 /* 1707 * Configure the Fire MMU TSB Control Register. Determine 1708 * the encoding for either 8KB pages (0) or 64KB pages (1). 1709 * 1710 * Write the most significant 30 bits of the TSB physical address 1711 * and the encoded TSB table size. 1712 */ 1713 for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--); 1714 1715 val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) | 1716 ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i); 1717 1718 CSR_XS(csr_base, MMU_TSB_CONTROL, val); 1719 1720 /* 1721 * Enable the MMU, set the "TSB Cache Snoop Enable", 1722 * the "Cache Mode", the "Bypass Enable" and 1723 * the "Translation Enable" bits. 1724 */ 1725 val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS); 1726 val |= ((1ull << MMU_CONTROL_AND_STATUS_SE) 1727 | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM) 1728 | (1ull << MMU_CONTROL_AND_STATUS_BE) 1729 | (1ull << MMU_CONTROL_AND_STATUS_TE)); 1730 1731 CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val); 1732 1733 /* 1734 * Read the register here to ensure that the previous writes to 1735 * the Fire MMU registers have been flushed. (Technically, this 1736 * is not entirely necessary here as we will likely do later reads 1737 * during Fire initialization, but it is a small price to pay for 1738 * more modular code.) 1739 */ 1740 (void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS); 1741 1742 /* 1743 * Enable all primary and secondary interrupts. 1744 */ 1745 val = -1ull; 1746 CSR_XS(csr_base, MMU_INTERRUPT_ENABLE, val); 1747 } 1748 1749 /* 1750 * Generic IOMMU Servies 1751 */ 1752 1753 /* ARGSUSED */ 1754 uint64_t 1755 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1756 pages_t pages, io_attributes_t io_attributes, 1757 void *addr, size_t pfn_index, int flag) 1758 { 1759 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1760 uint64_t attr = MMU_TTE_V; 1761 int i; 1762 1763 if (io_attributes & PCI_MAP_ATTR_WRITE) 1764 attr |= MMU_TTE_W; 1765 1766 if (flag == MMU_MAP_MP) { 1767 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)addr; 1768 1769 for (i = 0; i < pages; i++, pfn_index++, tsb_index++) { 1770 px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index); 1771 1772 pxu_p->tsb_vaddr[tsb_index] = 1773 MMU_PTOB(pfn) | attr; 1774 } 1775 } else { 1776 caddr_t a = (caddr_t)addr; 1777 1778 for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) { 1779 px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a); 1780 1781 pxu_p->tsb_vaddr[tsb_index] = 1782 MMU_PTOB(pfn) | attr; 1783 } 1784 } 1785 1786 return (H_EOK); 1787 } 1788 1789 /* ARGSUSED */ 1790 uint64_t 1791 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1792 pages_t pages) 1793 { 1794 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1795 int i; 1796 1797 for (i = 0; i < pages; i++, tsb_index++) { 1798 pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE; 1799 } 1800 1801 return (H_EOK); 1802 } 1803 1804 /* ARGSUSED */ 1805 uint64_t 1806 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, 1807 io_attributes_t *attributes_p, r_addr_t *r_addr_p) 1808 { 1809 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 1810 uint64_t *tte_addr; 1811 uint64_t ret = H_EOK; 1812 1813 tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index; 1814 1815 if (*tte_addr & MMU_TTE_V) { 1816 *r_addr_p = MMU_TTETOPA(*tte_addr); 1817 *attributes_p = (*tte_addr & MMU_TTE_W) ? 1818 PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ; 1819 } else { 1820 *r_addr_p = 0; 1821 *attributes_p = 0; 1822 ret = H_ENOMAP; 1823 } 1824 1825 return (ret); 1826 } 1827 1828 /* ARGSUSED */ 1829 uint64_t 1830 hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra, 1831 io_attributes_t io_attributes, io_addr_t *io_addr_p) 1832 { 1833 uint64_t pfn = MMU_BTOP(ra); 1834 1835 *io_addr_p = MMU_BYPASS_BASE | ra | 1836 (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE); 1837 1838 return (H_EOK); 1839 } 1840 1841 /* 1842 * Generic IO Interrupt Servies 1843 */ 1844 1845 /* 1846 * Converts a device specific interrupt number given by the 1847 * arguments devhandle and devino into a system specific ino. 1848 */ 1849 /* ARGSUSED */ 1850 uint64_t 1851 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino, 1852 sysino_t *sysino) 1853 { 1854 if (devino > INTERRUPT_MAPPING_ENTRIES) { 1855 DBG(DBG_IB, NULL, "ino %x is invalid\n", devino); 1856 return (H_ENOINTR); 1857 } 1858 1859 *sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino); 1860 1861 return (H_EOK); 1862 } 1863 1864 /* 1865 * Returns state in intr_valid_state if the interrupt defined by sysino 1866 * is valid (enabled) or not-valid (disabled). 1867 */ 1868 uint64_t 1869 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino, 1870 intr_valid_state_t *intr_valid_state) 1871 { 1872 if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 1873 SYSINO_TO_DEVINO(sysino), ENTRIES_V)) { 1874 *intr_valid_state = INTR_VALID; 1875 } else { 1876 *intr_valid_state = INTR_NOTVALID; 1877 } 1878 1879 return (H_EOK); 1880 } 1881 1882 /* 1883 * Sets the 'valid' state of the interrupt defined by 1884 * the argument sysino to the state defined by the 1885 * argument intr_valid_state. 1886 */ 1887 uint64_t 1888 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino, 1889 intr_valid_state_t intr_valid_state) 1890 { 1891 switch (intr_valid_state) { 1892 case INTR_VALID: 1893 CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING, 1894 SYSINO_TO_DEVINO(sysino), ENTRIES_V); 1895 break; 1896 case INTR_NOTVALID: 1897 CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING, 1898 SYSINO_TO_DEVINO(sysino), ENTRIES_V); 1899 break; 1900 default: 1901 return (EINVAL); 1902 } 1903 1904 return (H_EOK); 1905 } 1906 1907 /* 1908 * Returns the current state of the interrupt given by the sysino 1909 * argument. 1910 */ 1911 uint64_t 1912 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino, 1913 intr_state_t *intr_state) 1914 { 1915 intr_state_t state; 1916 1917 state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR, 1918 SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE); 1919 1920 switch (state) { 1921 case INTERRUPT_IDLE_STATE: 1922 *intr_state = INTR_IDLE_STATE; 1923 break; 1924 case INTERRUPT_RECEIVED_STATE: 1925 *intr_state = INTR_RECEIVED_STATE; 1926 break; 1927 case INTERRUPT_PENDING_STATE: 1928 *intr_state = INTR_DELIVERED_STATE; 1929 break; 1930 default: 1931 return (EINVAL); 1932 } 1933 1934 return (H_EOK); 1935 1936 } 1937 1938 /* 1939 * Sets the current state of the interrupt given by the sysino 1940 * argument to the value given in the argument intr_state. 1941 * 1942 * Note: Setting the state to INTR_IDLE clears any pending 1943 * interrupt for sysino. 1944 */ 1945 uint64_t 1946 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino, 1947 intr_state_t intr_state) 1948 { 1949 intr_state_t state; 1950 1951 switch (intr_state) { 1952 case INTR_IDLE_STATE: 1953 state = INTERRUPT_IDLE_STATE; 1954 break; 1955 case INTR_DELIVERED_STATE: 1956 state = INTERRUPT_PENDING_STATE; 1957 break; 1958 default: 1959 return (EINVAL); 1960 } 1961 1962 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, 1963 SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state); 1964 1965 return (H_EOK); 1966 } 1967 1968 /* 1969 * Returns the cpuid that is the current target of the 1970 * interrupt given by the sysino argument. 1971 * 1972 * The cpuid value returned is undefined if the target 1973 * has not been set via intr_settarget. 1974 */ 1975 uint64_t 1976 hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid) 1977 { 1978 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING, 1979 SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID); 1980 1981 return (H_EOK); 1982 } 1983 1984 /* 1985 * Set the target cpu for the interrupt defined by the argument 1986 * sysino to the target cpu value defined by the argument cpuid. 1987 */ 1988 uint64_t 1989 hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid) 1990 { 1991 1992 uint64_t val, intr_controller; 1993 uint32_t ino = SYSINO_TO_DEVINO(sysino); 1994 1995 /* 1996 * For now, we assign interrupt controller in a round 1997 * robin fashion. Later, we may need to come up with 1998 * a more efficient assignment algorithm. 1999 */ 2000 intr_controller = 0x1ull << (cpuid % 4); 2001 2002 val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) << 2003 INTERRUPT_MAPPING_ENTRIES_T_JPID) | 2004 ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK) 2005 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM)); 2006 2007 /* For EQ interrupts, set DATA MONDO bit */ 2008 if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) && 2009 (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT))) 2010 val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE); 2011 2012 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val); 2013 2014 return (H_EOK); 2015 } 2016 2017 /* 2018 * MSIQ Functions: 2019 */ 2020 uint64_t 2021 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p) 2022 { 2023 CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0, 2024 (uint64_t)pxu_p->msiq_mapped_p); 2025 DBG(DBG_IB, NULL, 2026 "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n", 2027 CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS)); 2028 2029 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0, 2030 (uint64_t)ID_TO_IGN(pxu_p->portid) << INO_BITS); 2031 DBG(DBG_IB, NULL, "hvio_msiq_init: " 2032 "INTERRUPT_MONDO_DATA_0: 0x%llx\n", 2033 CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0)); 2034 2035 return (H_EOK); 2036 } 2037 2038 uint64_t 2039 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id, 2040 pci_msiq_valid_state_t *msiq_valid_state) 2041 { 2042 uint32_t eq_state; 2043 uint64_t ret = H_EOK; 2044 2045 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2046 msiq_id, ENTRIES_STATE); 2047 2048 switch (eq_state) { 2049 case EQ_IDLE_STATE: 2050 *msiq_valid_state = PCI_MSIQ_INVALID; 2051 break; 2052 case EQ_ACTIVE_STATE: 2053 case EQ_ERROR_STATE: 2054 *msiq_valid_state = PCI_MSIQ_VALID; 2055 break; 2056 default: 2057 ret = H_EIO; 2058 break; 2059 } 2060 2061 return (ret); 2062 } 2063 2064 uint64_t 2065 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id, 2066 pci_msiq_valid_state_t msiq_valid_state) 2067 { 2068 uint64_t ret = H_EOK; 2069 2070 switch (msiq_valid_state) { 2071 case PCI_MSIQ_INVALID: 2072 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR, 2073 msiq_id, ENTRIES_DIS); 2074 break; 2075 case PCI_MSIQ_VALID: 2076 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2077 msiq_id, ENTRIES_EN); 2078 break; 2079 default: 2080 ret = H_EINVAL; 2081 break; 2082 } 2083 2084 return (ret); 2085 } 2086 2087 uint64_t 2088 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id, 2089 pci_msiq_state_t *msiq_state) 2090 { 2091 uint32_t eq_state; 2092 uint64_t ret = H_EOK; 2093 2094 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2095 msiq_id, ENTRIES_STATE); 2096 2097 switch (eq_state) { 2098 case EQ_IDLE_STATE: 2099 case EQ_ACTIVE_STATE: 2100 *msiq_state = PCI_MSIQ_STATE_IDLE; 2101 break; 2102 case EQ_ERROR_STATE: 2103 *msiq_state = PCI_MSIQ_STATE_ERROR; 2104 break; 2105 default: 2106 ret = H_EIO; 2107 } 2108 2109 return (ret); 2110 } 2111 2112 uint64_t 2113 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id, 2114 pci_msiq_state_t msiq_state) 2115 { 2116 uint32_t eq_state; 2117 uint64_t ret = H_EOK; 2118 2119 eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, 2120 msiq_id, ENTRIES_STATE); 2121 2122 switch (eq_state) { 2123 case EQ_IDLE_STATE: 2124 if (msiq_state == PCI_MSIQ_STATE_ERROR) 2125 ret = H_EIO; 2126 break; 2127 case EQ_ACTIVE_STATE: 2128 if (msiq_state == PCI_MSIQ_STATE_ERROR) 2129 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2130 msiq_id, ENTRIES_ENOVERR); 2131 else 2132 ret = H_EIO; 2133 break; 2134 case EQ_ERROR_STATE: 2135 if (msiq_state == PCI_MSIQ_STATE_IDLE) 2136 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR, 2137 msiq_id, ENTRIES_E2I); 2138 else 2139 ret = H_EIO; 2140 break; 2141 default: 2142 ret = H_EIO; 2143 } 2144 2145 return (ret); 2146 } 2147 2148 uint64_t 2149 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id, 2150 msiqhead_t *msiq_head) 2151 { 2152 *msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, 2153 msiq_id, ENTRIES_HEAD); 2154 2155 return (H_EOK); 2156 } 2157 2158 uint64_t 2159 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id, 2160 msiqhead_t msiq_head) 2161 { 2162 CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id, 2163 ENTRIES_HEAD, msiq_head); 2164 2165 return (H_EOK); 2166 } 2167 2168 uint64_t 2169 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id, 2170 msiqtail_t *msiq_tail) 2171 { 2172 *msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL, 2173 msiq_id, ENTRIES_TAIL); 2174 2175 return (H_EOK); 2176 } 2177 2178 /* 2179 * MSI Functions: 2180 */ 2181 uint64_t 2182 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64) 2183 { 2184 /* PCI MEM 32 resources to perform 32 bit MSI transactions */ 2185 CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0, 2186 ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR); 2187 DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n", 2188 CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS)); 2189 2190 /* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */ 2191 CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0, 2192 ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR); 2193 DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n", 2194 CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS)); 2195 2196 return (H_EOK); 2197 } 2198 2199 uint64_t 2200 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num, 2201 msiqid_t *msiq_id) 2202 { 2203 *msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING, 2204 msi_num, ENTRIES_EQNUM); 2205 2206 return (H_EOK); 2207 } 2208 2209 uint64_t 2210 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num, 2211 msiqid_t msiq_id) 2212 { 2213 CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2214 ENTRIES_EQNUM, msiq_id); 2215 2216 return (H_EOK); 2217 } 2218 2219 uint64_t 2220 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num, 2221 pci_msi_valid_state_t *msi_valid_state) 2222 { 2223 *msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING, 2224 msi_num, ENTRIES_V); 2225 2226 return (H_EOK); 2227 } 2228 2229 uint64_t 2230 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num, 2231 pci_msi_valid_state_t msi_valid_state) 2232 { 2233 uint64_t ret = H_EOK; 2234 2235 switch (msi_valid_state) { 2236 case PCI_MSI_VALID: 2237 CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2238 ENTRIES_V); 2239 break; 2240 case PCI_MSI_INVALID: 2241 CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num, 2242 ENTRIES_V); 2243 break; 2244 default: 2245 ret = H_EINVAL; 2246 } 2247 2248 return (ret); 2249 } 2250 2251 uint64_t 2252 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num, 2253 pci_msi_state_t *msi_state) 2254 { 2255 *msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING, 2256 msi_num, ENTRIES_EQWR_N); 2257 2258 return (H_EOK); 2259 } 2260 2261 uint64_t 2262 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num, 2263 pci_msi_state_t msi_state) 2264 { 2265 uint64_t ret = H_EOK; 2266 2267 switch (msi_state) { 2268 case PCI_MSI_STATE_IDLE: 2269 CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num, 2270 ENTRIES_EQWR_N); 2271 break; 2272 case PCI_MSI_STATE_DELIVERED: 2273 default: 2274 ret = H_EINVAL; 2275 break; 2276 } 2277 2278 return (ret); 2279 } 2280 2281 /* 2282 * MSG Functions: 2283 */ 2284 uint64_t 2285 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2286 msiqid_t *msiq_id) 2287 { 2288 uint64_t ret = H_EOK; 2289 2290 switch (msg_type) { 2291 case PCIE_PME_MSG: 2292 *msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM); 2293 break; 2294 case PCIE_PME_ACK_MSG: 2295 *msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, 2296 EQNUM); 2297 break; 2298 case PCIE_CORR_MSG: 2299 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM); 2300 break; 2301 case PCIE_NONFATAL_MSG: 2302 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, 2303 EQNUM); 2304 break; 2305 case PCIE_FATAL_MSG: 2306 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM); 2307 break; 2308 default: 2309 ret = H_EINVAL; 2310 break; 2311 } 2312 2313 return (ret); 2314 } 2315 2316 uint64_t 2317 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2318 msiqid_t msiq_id) 2319 { 2320 uint64_t ret = H_EOK; 2321 2322 switch (msg_type) { 2323 case PCIE_PME_MSG: 2324 CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id); 2325 break; 2326 case PCIE_PME_ACK_MSG: 2327 CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id); 2328 break; 2329 case PCIE_CORR_MSG: 2330 CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id); 2331 break; 2332 case PCIE_NONFATAL_MSG: 2333 CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id); 2334 break; 2335 case PCIE_FATAL_MSG: 2336 CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id); 2337 break; 2338 default: 2339 ret = H_EINVAL; 2340 break; 2341 } 2342 2343 return (ret); 2344 } 2345 2346 uint64_t 2347 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2348 pcie_msg_valid_state_t *msg_valid_state) 2349 { 2350 uint64_t ret = H_EOK; 2351 2352 switch (msg_type) { 2353 case PCIE_PME_MSG: 2354 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2355 break; 2356 case PCIE_PME_ACK_MSG: 2357 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, 2358 PME_TO_ACK_MAPPING, V); 2359 break; 2360 case PCIE_CORR_MSG: 2361 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2362 break; 2363 case PCIE_NONFATAL_MSG: 2364 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, 2365 ERR_NONFATAL_MAPPING, V); 2366 break; 2367 case PCIE_FATAL_MSG: 2368 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, 2369 V); 2370 break; 2371 default: 2372 ret = H_EINVAL; 2373 break; 2374 } 2375 2376 return (ret); 2377 } 2378 2379 uint64_t 2380 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type, 2381 pcie_msg_valid_state_t msg_valid_state) 2382 { 2383 uint64_t ret = H_EOK; 2384 2385 switch (msg_valid_state) { 2386 case PCIE_MSG_VALID: 2387 switch (msg_type) { 2388 case PCIE_PME_MSG: 2389 CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2390 break; 2391 case PCIE_PME_ACK_MSG: 2392 CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V); 2393 break; 2394 case PCIE_CORR_MSG: 2395 CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2396 break; 2397 case PCIE_NONFATAL_MSG: 2398 CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V); 2399 break; 2400 case PCIE_FATAL_MSG: 2401 CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V); 2402 break; 2403 default: 2404 ret = H_EINVAL; 2405 break; 2406 } 2407 2408 break; 2409 case PCIE_MSG_INVALID: 2410 switch (msg_type) { 2411 case PCIE_PME_MSG: 2412 CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V); 2413 break; 2414 case PCIE_PME_ACK_MSG: 2415 CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V); 2416 break; 2417 case PCIE_CORR_MSG: 2418 CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V); 2419 break; 2420 case PCIE_NONFATAL_MSG: 2421 CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V); 2422 break; 2423 case PCIE_FATAL_MSG: 2424 CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V); 2425 break; 2426 default: 2427 ret = H_EINVAL; 2428 break; 2429 } 2430 break; 2431 default: 2432 ret = H_EINVAL; 2433 } 2434 2435 return (ret); 2436 } 2437 2438 /* 2439 * Suspend/Resume Functions: 2440 * (pec, mmu, ib) 2441 * cb 2442 * Registers saved have all been touched in the XXX_init functions. 2443 */ 2444 uint64_t 2445 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2446 { 2447 uint64_t *config_state; 2448 int total_size; 2449 int i; 2450 2451 if (msiq_suspend(dev_hdl, pxu_p) != H_EOK) 2452 return (H_EIO); 2453 2454 total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE; 2455 config_state = kmem_zalloc(total_size, KM_NOSLEEP); 2456 2457 if (config_state == NULL) { 2458 return (H_EIO); 2459 } 2460 2461 /* 2462 * Soft state for suspend/resume from pxu_t 2463 * uint64_t *pec_config_state; 2464 * uint64_t *mmu_config_state; 2465 * uint64_t *ib_intr_map; 2466 * uint64_t *ib_config_state; 2467 * uint64_t *xcb_config_state; 2468 */ 2469 2470 /* Save the PEC configuration states */ 2471 pxu_p->pec_config_state = config_state; 2472 for (i = 0; i < PEC_KEYS; i++) { 2473 pxu_p->pec_config_state[i] = 2474 CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]); 2475 } 2476 2477 /* Save the MMU configuration states */ 2478 pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS; 2479 for (i = 0; i < MMU_KEYS; i++) { 2480 pxu_p->mmu_config_state[i] = 2481 CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]); 2482 } 2483 2484 /* Save the interrupt mapping registers */ 2485 pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS; 2486 for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) { 2487 pxu_p->ib_intr_map[i] = 2488 CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i); 2489 } 2490 2491 /* Save the IB configuration states */ 2492 pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES; 2493 for (i = 0; i < IB_KEYS; i++) { 2494 pxu_p->ib_config_state[i] = 2495 CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]); 2496 } 2497 2498 return (H_EOK); 2499 } 2500 2501 void 2502 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p) 2503 { 2504 int total_size; 2505 sysino_t sysino; 2506 int i; 2507 2508 /* Make sure that suspend actually did occur */ 2509 if (!pxu_p->pec_config_state) { 2510 return; 2511 } 2512 2513 /* Restore IB configuration states */ 2514 for (i = 0; i < IB_KEYS; i++) { 2515 CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i], 2516 pxu_p->ib_config_state[i]); 2517 } 2518 2519 /* 2520 * Restore the interrupt mapping registers 2521 * And make sure the intrs are idle. 2522 */ 2523 for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) { 2524 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i, 2525 ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE); 2526 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i, 2527 pxu_p->ib_intr_map[i]); 2528 } 2529 2530 /* Restore MMU configuration states */ 2531 /* Clear the cache. */ 2532 CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull); 2533 2534 for (i = 0; i < MMU_KEYS; i++) { 2535 CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i], 2536 pxu_p->mmu_config_state[i]); 2537 } 2538 2539 /* Restore PEC configuration states */ 2540 /* Make sure all reset bits are low until error is detected */ 2541 CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull); 2542 2543 for (i = 0; i < PEC_KEYS; i++) { 2544 CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i], 2545 pxu_p->pec_config_state[i]); 2546 } 2547 2548 /* Enable PCI-E interrupt */ 2549 (void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino); 2550 2551 (void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE); 2552 2553 total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE; 2554 kmem_free(pxu_p->pec_config_state, total_size); 2555 2556 pxu_p->pec_config_state = NULL; 2557 pxu_p->mmu_config_state = NULL; 2558 pxu_p->ib_config_state = NULL; 2559 pxu_p->ib_intr_map = NULL; 2560 2561 msiq_resume(dev_hdl, pxu_p); 2562 } 2563 2564 uint64_t 2565 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2566 { 2567 uint64_t *config_state; 2568 int i; 2569 2570 config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP); 2571 2572 if (config_state == NULL) { 2573 return (H_EIO); 2574 } 2575 2576 /* Save the configuration states */ 2577 pxu_p->xcb_config_state = config_state; 2578 for (i = 0; i < CB_KEYS; i++) { 2579 pxu_p->xcb_config_state[i] = 2580 CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]); 2581 } 2582 2583 return (H_EOK); 2584 } 2585 2586 void 2587 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl, 2588 devino_t devino, pxu_t *pxu_p) 2589 { 2590 sysino_t sysino; 2591 int i; 2592 2593 /* 2594 * No reason to have any reset bits high until an error is 2595 * detected on the link. 2596 */ 2597 CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull); 2598 2599 ASSERT(pxu_p->xcb_config_state); 2600 2601 /* Restore the configuration states */ 2602 for (i = 0; i < CB_KEYS; i++) { 2603 CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i], 2604 pxu_p->xcb_config_state[i]); 2605 } 2606 2607 /* Enable XBC interrupt */ 2608 (void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino); 2609 2610 (void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE); 2611 2612 kmem_free(pxu_p->xcb_config_state, CB_SIZE); 2613 2614 pxu_p->xcb_config_state = NULL; 2615 } 2616 2617 static uint64_t 2618 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p) 2619 { 2620 size_t bufsz; 2621 volatile uint64_t *cur_p; 2622 int i; 2623 2624 bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE; 2625 if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) == 2626 NULL) 2627 return (H_EIO); 2628 2629 cur_p = pxu_p->msiq_config_state; 2630 2631 /* Save each EQ state */ 2632 for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) 2633 *cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i); 2634 2635 /* Save MSI mapping registers */ 2636 for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++) 2637 *cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i); 2638 2639 /* Save all other MSIQ registers */ 2640 for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++) 2641 *cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]); 2642 return (H_EOK); 2643 } 2644 2645 static void 2646 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p) 2647 { 2648 size_t bufsz; 2649 uint64_t *cur_p; 2650 int i; 2651 2652 bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE; 2653 cur_p = pxu_p->msiq_config_state; 2654 /* 2655 * Initialize EQ base address register and 2656 * Interrupt Mondo Data 0 register. 2657 */ 2658 (void) hvio_msiq_init(dev_hdl, pxu_p); 2659 2660 /* Restore EQ states */ 2661 for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) { 2662 if (((*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK) == 2663 EQ_ACTIVE_STATE) { 2664 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET, 2665 i, ENTRIES_EN); 2666 } 2667 } 2668 2669 /* Restore MSI mapping */ 2670 for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++) 2671 CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p); 2672 2673 /* 2674 * Restore all other registers. MSI 32 bit address and 2675 * MSI 64 bit address are restored as part of this. 2676 */ 2677 for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++) 2678 CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p); 2679 2680 kmem_free(pxu_p->msiq_config_state, bufsz); 2681 pxu_p->msiq_config_state = NULL; 2682 } 2683 2684 /* 2685 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 2686 * called by px_goto_l23ready. 2687 * returns DDI_SUCCESS or DDI_FAILURE 2688 */ 2689 int 2690 px_send_pme_turnoff(caddr_t csr_base) 2691 { 2692 volatile uint64_t reg; 2693 2694 /* TBD: Wait for link to be in L1 state (link status reg) */ 2695 2696 reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE); 2697 /* If already pending, return failure */ 2698 if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) { 2699 return (DDI_FAILURE); 2700 } 2701 /* write to PME_Turn_off reg to boradcast */ 2702 reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO); 2703 CSR_XS(csr_base, TLU_PME_TURN_OFF_GENERATE, reg); 2704 return (DDI_SUCCESS); 2705 } 2706