1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "amdgpu_atombios.h" 25 #include "nbif_v6_3_1.h" 26 27 #include "nbif/nbif_6_3_1_offset.h" 28 #include "nbif/nbif_6_3_1_sh_mask.h" 29 #include "pcie/pcie_6_1_0_offset.h" 30 #include "pcie/pcie_6_1_0_sh_mask.h" 31 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev) 35 { 36 WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, 37 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); 38 WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, 39 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); 40 } 41 42 static u32 nbif_v6_3_1_get_rev_id(struct amdgpu_device *adev) 43 { 44 u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); 45 46 tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; 47 tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; 48 49 return tmp; 50 } 51 52 static void nbif_v6_3_1_mc_access_enable(struct amdgpu_device *adev, bool enable) 53 { 54 if (enable) 55 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 56 BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | 57 BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); 58 else 59 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); 60 } 61 62 static u32 nbif_v6_3_1_get_memsize(struct amdgpu_device *adev) 63 { 64 return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); 65 } 66 67 static void nbif_v6_3_1_sdma_doorbell_range(struct amdgpu_device *adev, 68 int instance, bool use_doorbell, 69 int doorbell_index, 70 int doorbell_size) 71 { 72 if (instance == 0) { 73 u32 doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL); 74 75 if (use_doorbell) { 76 doorbell_range = REG_SET_FIELD(doorbell_range, 77 GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, 78 S2A_DOORBELL_PORT2_ENABLE, 79 0x1); 80 doorbell_range = REG_SET_FIELD(doorbell_range, 81 GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, 82 S2A_DOORBELL_PORT2_AWID, 83 0xe); 84 doorbell_range = REG_SET_FIELD(doorbell_range, 85 GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, 86 S2A_DOORBELL_PORT2_RANGE_OFFSET, 87 doorbell_index); 88 doorbell_range = REG_SET_FIELD(doorbell_range, 89 GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, 90 S2A_DOORBELL_PORT2_RANGE_SIZE, 91 doorbell_size); 92 doorbell_range = REG_SET_FIELD(doorbell_range, 93 GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, 94 S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE, 95 0x3); 96 } else 97 doorbell_range = REG_SET_FIELD(doorbell_range, 98 GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, 99 S2A_DOORBELL_PORT2_RANGE_SIZE, 100 0); 101 102 WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, doorbell_range); 103 } 104 } 105 106 static void nbif_v6_3_1_vcn_doorbell_range(struct amdgpu_device *adev, 107 bool use_doorbell, int doorbell_index, 108 int instance) 109 { 110 u32 doorbell_range; 111 112 if (instance) 113 doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL); 114 else 115 doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL); 116 117 if (use_doorbell) { 118 doorbell_range = REG_SET_FIELD(doorbell_range, 119 GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, 120 S2A_DOORBELL_PORT4_ENABLE, 121 0x1); 122 doorbell_range = REG_SET_FIELD(doorbell_range, 123 GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, 124 S2A_DOORBELL_PORT4_AWID, 125 instance ? 0x7 : 0x4); 126 doorbell_range = REG_SET_FIELD(doorbell_range, 127 GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, 128 S2A_DOORBELL_PORT4_RANGE_OFFSET, 129 doorbell_index); 130 doorbell_range = REG_SET_FIELD(doorbell_range, 131 GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, 132 S2A_DOORBELL_PORT4_RANGE_SIZE, 133 8); 134 doorbell_range = REG_SET_FIELD(doorbell_range, 135 GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, 136 S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE, 137 instance ? 0x7 : 0x4); 138 } else 139 doorbell_range = REG_SET_FIELD(doorbell_range, 140 GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, 141 S2A_DOORBELL_PORT4_RANGE_SIZE, 142 0); 143 144 if (instance) 145 WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL, doorbell_range); 146 else 147 WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, doorbell_range); 148 } 149 150 static void nbif_v6_3_1_gc_doorbell_init(struct amdgpu_device *adev) 151 { 152 WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL, 0x30000007); 153 WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL, 0x3000000d); 154 } 155 156 static void nbif_v6_3_1_enable_doorbell_aperture(struct amdgpu_device *adev, 157 bool enable) 158 { 159 WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, 160 BIF_DOORBELL_APER_EN, enable ? 1 : 0); 161 } 162 163 static void 164 nbif_v6_3_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 165 bool enable) 166 { 167 u32 tmp = 0; 168 169 if (enable) { 170 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, 171 DOORBELL_SELFRING_GPA_APER_EN, 1) | 172 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, 173 DOORBELL_SELFRING_GPA_APER_MODE, 1) | 174 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, 175 DOORBELL_SELFRING_GPA_APER_SIZE, 0); 176 177 WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, 178 lower_32_bits(adev->doorbell.base)); 179 WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, 180 upper_32_bits(adev->doorbell.base)); 181 } 182 183 WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp); 184 } 185 186 static void nbif_v6_3_1_ih_doorbell_range(struct amdgpu_device *adev, 187 bool use_doorbell, int doorbell_index) 188 { 189 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL); 190 191 if (use_doorbell) { 192 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 193 GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, 194 S2A_DOORBELL_PORT1_ENABLE, 195 0x1); 196 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 197 GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, 198 S2A_DOORBELL_PORT1_AWID, 199 0x0); 200 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 201 GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, 202 S2A_DOORBELL_PORT1_RANGE_OFFSET, 203 doorbell_index); 204 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 205 GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, 206 S2A_DOORBELL_PORT1_RANGE_SIZE, 207 2); 208 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 209 GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, 210 S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 211 0x0); 212 } else 213 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 214 GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, 215 S2A_DOORBELL_PORT1_RANGE_SIZE, 216 0); 217 218 WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, ih_doorbell_range); 219 } 220 221 static void nbif_v6_3_1_ih_control(struct amdgpu_device *adev) 222 { 223 u32 interrupt_cntl; 224 225 /* setup interrupt control */ 226 WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); 227 228 interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL); 229 /* 230 * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 231 * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 232 */ 233 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, 234 IH_DUMMY_RD_OVERRIDE, 0); 235 236 /* BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ 237 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, 238 IH_REQ_NONSNOOP_EN, 0); 239 240 WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl); 241 } 242 243 static void 244 nbif_v6_3_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, 245 bool enable) 246 { 247 } 248 249 static void 250 nbif_v6_3_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, 251 bool enable) 252 { 253 } 254 255 static void 256 nbif_v6_3_1_get_clockgating_state(struct amdgpu_device *adev, 257 u64 *flags) 258 { 259 } 260 261 static u32 nbif_v6_3_1_get_hdp_flush_req_offset(struct amdgpu_device *adev) 262 { 263 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ); 264 } 265 266 static u32 nbif_v6_3_1_get_hdp_flush_done_offset(struct amdgpu_device *adev) 267 { 268 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE); 269 } 270 271 static u32 nbif_v6_3_1_get_pcie_index_offset(struct amdgpu_device *adev) 272 { 273 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX); 274 } 275 276 static u32 nbif_v6_3_1_get_pcie_data_offset(struct amdgpu_device *adev) 277 { 278 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA); 279 } 280 281 const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg = { 282 .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, 283 .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, 284 .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, 285 .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK, 286 .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK, 287 .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK, 288 .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK, 289 .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK, 290 .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK, 291 .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK, 292 .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK, 293 .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK, 294 }; 295 296 static void nbif_v6_3_1_init_registers(struct amdgpu_device *adev) 297 { 298 uint32_t data; 299 300 data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2); 301 data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK; 302 WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data); 303 } 304 305 static u32 nbif_v6_3_1_get_rom_offset(struct amdgpu_device *adev) 306 { 307 u32 data, rom_offset; 308 309 data = RREG32_SOC15(NBIO, 0, regREGS_ROM_OFFSET_CTRL); 310 rom_offset = REG_GET_FIELD(data, REGS_ROM_OFFSET_CTRL, ROM_OFFSET); 311 312 return rom_offset; 313 } 314 315 #ifdef CONFIG_PCIEASPM 316 static void nbif_v6_3_1_program_ltr(struct amdgpu_device *adev) 317 { 318 uint32_t def, data; 319 u16 devctl2; 320 321 def = RREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); 322 data = 0x35EB; 323 data &= ~RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; 324 data &= ~RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK; 325 if (def != data) 326 WREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); 327 328 def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2); 329 data &= ~RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; 330 if (def != data) 331 WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2, data); 332 333 pcie_capability_read_word(adev->pdev, PCI_EXP_DEVCTL2, &devctl2); 334 335 if (adev->pdev->ltr_path == (devctl2 & PCI_EXP_DEVCTL2_LTR_EN)) 336 return; 337 338 if (adev->pdev->ltr_path) 339 pcie_capability_set_word(adev->pdev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); 340 else 341 pcie_capability_clear_word(adev->pdev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); 342 } 343 #endif 344 345 static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev) 346 { 347 #ifdef CONFIG_PCIEASPM 348 uint32_t def, data; 349 u16 devctl2, ltr; 350 351 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL); 352 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 353 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 354 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 355 if (def != data) 356 WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL, data); 357 358 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL7); 359 data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; 360 if (def != data) 361 WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL7, data); 362 363 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3); 364 data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 365 if (def != data) 366 WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3, data); 367 368 def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3); 369 data &= ~RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; 370 data &= ~RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; 371 if (def != data) 372 WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data); 373 374 def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5); 375 data &= ~RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; 376 if (def != data) 377 WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data); 378 379 pcie_capability_read_word(adev->pdev, PCI_EXP_DEVCTL2, &devctl2); 380 data = def = devctl2; 381 data &= ~PCI_EXP_DEVCTL2_LTR_EN; 382 if (def != data) 383 pcie_capability_set_word(adev->pdev, PCI_EXP_DEVCTL2, (u16)data); 384 385 ltr = pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_LTR); 386 387 if (ltr) { 388 pci_write_config_dword(adev->pdev, ltr + PCI_LTR_MAX_SNOOP_LAT, 0x10011001); 389 } 390 391 #if 0 392 /* regPSWUSP0_PCIE_LC_CNTL2 should be replace by PCIE_LC_CNTL2 or someone else ? */ 393 def = data = RREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2); 394 data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | 395 PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 396 data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; 397 if (def != data) 398 WREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2, data); 399 #endif 400 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL4); 401 data |= PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK; 402 if (def != data) 403 WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL4, data); 404 405 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL); 406 data |= PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK; 407 if (def != data) 408 WREG32_SOC15(PCIE, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL, data); 409 410 nbif_v6_3_1_program_ltr(adev); 411 412 def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3); 413 data |= 0x5DE0 << RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; 414 data |= 0x0010 << RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; 415 if (def != data) 416 WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data); 417 418 def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5); 419 data |= 0x0010 << RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; 420 if (def != data) 421 WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data); 422 423 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL); 424 data |= 0x0 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 425 data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 426 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 427 if (def != data) 428 WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL, data); 429 430 def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3); 431 data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 432 if (def != data) 433 WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3, data); 434 #endif 435 } 436 437 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 438 439 static void nbif_v6_3_1_set_reg_remap(struct amdgpu_device *adev) 440 { 441 if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) { 442 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 443 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 444 } else { 445 adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, 446 regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; 447 adev->rmmio_remap.bus_addr = 0; 448 } 449 } 450 451 const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs = { 452 .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset, 453 .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset, 454 .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset, 455 .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset, 456 .get_rev_id = nbif_v6_3_1_get_rev_id, 457 .mc_access_enable = nbif_v6_3_1_mc_access_enable, 458 .get_memsize = nbif_v6_3_1_get_memsize, 459 .sdma_doorbell_range = nbif_v6_3_1_sdma_doorbell_range, 460 .vcn_doorbell_range = nbif_v6_3_1_vcn_doorbell_range, 461 .gc_doorbell_init = nbif_v6_3_1_gc_doorbell_init, 462 .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture, 463 .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture, 464 .ih_doorbell_range = nbif_v6_3_1_ih_doorbell_range, 465 .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating, 466 .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep, 467 .get_clockgating_state = nbif_v6_3_1_get_clockgating_state, 468 .ih_control = nbif_v6_3_1_ih_control, 469 .init_registers = nbif_v6_3_1_init_registers, 470 .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers, 471 .get_rom_offset = nbif_v6_3_1_get_rom_offset, 472 .program_aspm = nbif_v6_3_1_program_aspm, 473 .set_reg_remap = nbif_v6_3_1_set_reg_remap, 474 }; 475 476 477 static void nbif_v6_3_1_sriov_ih_doorbell_range(struct amdgpu_device *adev, 478 bool use_doorbell, int doorbell_index) 479 { 480 } 481 482 static void nbif_v6_3_1_sriov_sdma_doorbell_range(struct amdgpu_device *adev, 483 int instance, bool use_doorbell, 484 int doorbell_index, 485 int doorbell_size) 486 { 487 } 488 489 static void nbif_v6_3_1_sriov_vcn_doorbell_range(struct amdgpu_device *adev, 490 bool use_doorbell, 491 int doorbell_index, int instance) 492 { 493 } 494 495 static void nbif_v6_3_1_sriov_gc_doorbell_init(struct amdgpu_device *adev) 496 { 497 } 498 499 const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = { 500 .get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset, 501 .get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset, 502 .get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset, 503 .get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset, 504 .get_rev_id = nbif_v6_3_1_get_rev_id, 505 .mc_access_enable = nbif_v6_3_1_mc_access_enable, 506 .get_memsize = nbif_v6_3_1_get_memsize, 507 .sdma_doorbell_range = nbif_v6_3_1_sriov_sdma_doorbell_range, 508 .vcn_doorbell_range = nbif_v6_3_1_sriov_vcn_doorbell_range, 509 .gc_doorbell_init = nbif_v6_3_1_sriov_gc_doorbell_init, 510 .enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture, 511 .enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture, 512 .ih_doorbell_range = nbif_v6_3_1_sriov_ih_doorbell_range, 513 .update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating, 514 .update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep, 515 .get_clockgating_state = nbif_v6_3_1_get_clockgating_state, 516 .ih_control = nbif_v6_3_1_ih_control, 517 .init_registers = nbif_v6_3_1_init_registers, 518 .remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers, 519 .get_rom_offset = nbif_v6_3_1_get_rom_offset, 520 .set_reg_remap = nbif_v6_3_1_set_reg_remap, 521 }; 522 523 static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, 524 struct amdgpu_irq_src *src, 525 unsigned type, 526 enum amdgpu_interrupt_state state) 527 { 528 /* The ras_controller_irq enablement should be done in psp bl when it 529 * tries to enable ras feature. Driver only need to set the correct interrupt 530 * vector for bare-metal and sriov use case respectively 531 */ 532 uint32_t bif_doorbell_int_cntl; 533 534 bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); 535 bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, 536 BIF_BX0_BIF_DOORBELL_INT_CNTL, 537 RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE, 538 (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1); 539 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); 540 541 return 0; 542 } 543 544 static int nbif_v6_3_1_process_err_event_athub_irq(struct amdgpu_device *adev, 545 struct amdgpu_irq_src *source, 546 struct amdgpu_iv_entry *entry) 547 { 548 /* By design, the ih cookie for err_event_athub_irq should be written 549 * to bif ring. since bif ring is not enabled, just leave process callback 550 * as a dummy one. 551 */ 552 return 0; 553 } 554 555 static const struct amdgpu_irq_src_funcs nbif_v6_3_1_ras_err_event_athub_irq_funcs = { 556 .set = nbif_v6_3_1_set_ras_err_event_athub_irq_state, 557 .process = nbif_v6_3_1_process_err_event_athub_irq, 558 }; 559 560 static void nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) 561 { 562 uint32_t bif_doorbell_int_cntl; 563 564 bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); 565 if (REG_GET_FIELD(bif_doorbell_int_cntl, 566 BIF_BX0_BIF_DOORBELL_INT_CNTL, 567 RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { 568 /* driver has to clear the interrupt status when bif ring is disabled */ 569 bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, 570 BIF_BX0_BIF_DOORBELL_INT_CNTL, 571 RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); 572 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); 573 amdgpu_ras_global_ras_isr(adev); 574 } 575 } 576 577 static int nbif_v6_3_1_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev) 578 { 579 int r; 580 581 /* init the irq funcs */ 582 adev->nbio.ras_err_event_athub_irq.funcs = 583 &nbif_v6_3_1_ras_err_event_athub_irq_funcs; 584 adev->nbio.ras_err_event_athub_irq.num_types = 1; 585 586 /* register ras err event athub interrupt 587 * nbif v6_3_1 uses the same irq source as nbio v7_4 588 */ 589 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF, 590 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, 591 &adev->nbio.ras_err_event_athub_irq); 592 593 return r; 594 } 595 596 struct amdgpu_nbio_ras nbif_v6_3_1_ras = { 597 .handle_ras_err_event_athub_intr_no_bifring = 598 nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring, 599 .init_ras_err_event_athub_interrupt = 600 nbif_v6_3_1_init_ras_err_event_athub_interrupt, 601 }; 602