1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "nbio_v6_1.h" 25 26 #include "nbio/nbio_6_1_default.h" 27 #include "nbio/nbio_6_1_offset.h" 28 #include "nbio/nbio_6_1_sh_mask.h" 29 #include "nbio/nbio_6_1_smn.h" 30 #include "vega10_enum.h" 31 #include <uapi/linux/kfd_ioctl.h> 32 33 #define smnPCIE_LC_CNTL 0x11140280 34 #define smnPCIE_LC_CNTL3 0x111402d4 35 #define smnPCIE_LC_CNTL6 0x111402ec 36 #define smnPCIE_LC_CNTL7 0x111402f0 37 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c 38 #define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L 39 #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL 40 #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L 41 #define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123530 42 #define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c 43 #define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 44 #define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 45 #define smnRCC_BIF_STRAP2 0x10123488 46 #define smnRCC_BIF_STRAP3 0x1012348c 47 #define smnRCC_BIF_STRAP5 0x10123494 48 #define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L 49 #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL 50 #define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L 51 #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0 52 #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10 53 #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0 54 55 static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev) 56 { 57 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, 58 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); 59 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, 60 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); 61 } 62 63 static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) 64 { 65 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 66 67 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; 68 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; 69 70 return tmp; 71 } 72 73 static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) 74 { 75 if (enable) 76 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 77 BIF_FB_EN__FB_READ_EN_MASK | 78 BIF_FB_EN__FB_WRITE_EN_MASK); 79 else 80 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); 81 } 82 83 static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) 84 { 85 return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE); 86 } 87 88 static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 89 bool use_doorbell, int doorbell_index, int doorbell_size) 90 { 91 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : 92 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); 93 94 u32 doorbell_range = RREG32(reg); 95 96 if (use_doorbell) { 97 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); 98 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size); 99 } else 100 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); 101 102 WREG32(reg, doorbell_range); 103 104 } 105 106 static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev, 107 bool enable) 108 { 109 WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0); 110 } 111 112 static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 113 bool enable) 114 { 115 u32 tmp = 0; 116 117 if (enable) { 118 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | 119 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | 120 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); 121 122 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, 123 lower_32_bits(adev->doorbell.base)); 124 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, 125 upper_32_bits(adev->doorbell.base)); 126 } 127 128 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp); 129 } 130 131 132 static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev, 133 bool use_doorbell, int doorbell_index) 134 { 135 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE); 136 137 if (use_doorbell) { 138 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); 139 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 140 BIF_IH_DOORBELL_RANGE, SIZE, 6); 141 } else 142 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); 143 144 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range); 145 } 146 147 static void nbio_v6_1_ih_control(struct amdgpu_device *adev) 148 { 149 u32 interrupt_cntl; 150 151 /* setup interrupt control */ 152 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8); 153 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); 154 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 155 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 156 */ 157 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); 158 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ 159 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); 160 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl); 161 } 162 163 static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, 164 bool enable) 165 { 166 uint32_t def, data; 167 168 def = data = RREG32_PCIE(smnCPM_CONTROL); 169 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) { 170 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | 171 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | 172 CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK | 173 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | 174 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | 175 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | 176 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); 177 } else { 178 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | 179 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | 180 CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK | 181 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | 182 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | 183 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | 184 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); 185 } 186 187 if (def != data) 188 WREG32_PCIE(smnCPM_CONTROL, data); 189 } 190 191 static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, 192 bool enable) 193 { 194 uint32_t def, data; 195 196 def = data = RREG32_PCIE(smnPCIE_CNTL2); 197 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { 198 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 199 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 200 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 201 } else { 202 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 203 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 204 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 205 } 206 207 if (def != data) 208 WREG32_PCIE(smnPCIE_CNTL2, data); 209 } 210 211 static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, 212 u64 *flags) 213 { 214 int data; 215 216 /* AMD_CG_SUPPORT_BIF_MGCG */ 217 data = RREG32_PCIE(smnCPM_CONTROL); 218 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) 219 *flags |= AMD_CG_SUPPORT_BIF_MGCG; 220 221 /* AMD_CG_SUPPORT_BIF_LS */ 222 data = RREG32_PCIE(smnPCIE_CNTL2); 223 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 224 *flags |= AMD_CG_SUPPORT_BIF_LS; 225 } 226 227 static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev) 228 { 229 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ); 230 } 231 232 static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev) 233 { 234 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE); 235 } 236 237 static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev) 238 { 239 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); 240 } 241 242 static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev) 243 { 244 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); 245 } 246 247 const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { 248 .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, 249 .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, 250 .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, 251 .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK, 252 .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK, 253 .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK, 254 .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK, 255 .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK, 256 .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK, 257 .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK, 258 .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK, 259 .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 260 }; 261 262 static void nbio_v6_1_init_registers(struct amdgpu_device *adev) 263 { 264 uint32_t def, data; 265 266 def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); 267 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); 268 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); 269 270 if (def != data) 271 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); 272 273 def = data = RREG32_PCIE(smnPCIE_CI_CNTL); 274 data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); 275 276 if (def != data) 277 WREG32_PCIE(smnPCIE_CI_CNTL, data); 278 } 279 280 #ifdef CONFIG_PCIEASPM 281 static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) 282 { 283 uint32_t def, data; 284 285 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); 286 287 def = data = RREG32_PCIE(smnRCC_BIF_STRAP2); 288 data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; 289 if (def != data) 290 WREG32_PCIE(smnRCC_BIF_STRAP2, data); 291 292 def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); 293 data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; 294 if (def != data) 295 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); 296 297 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); 298 data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; 299 if (def != data) 300 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); 301 } 302 #endif 303 304 static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) 305 { 306 #ifdef CONFIG_PCIEASPM 307 uint32_t def, data; 308 309 def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 310 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 311 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 312 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 313 if (def != data) 314 WREG32_PCIE(smnPCIE_LC_CNTL, data); 315 316 def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); 317 data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; 318 if (def != data) 319 WREG32_PCIE(smnPCIE_LC_CNTL7, data); 320 321 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); 322 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; 323 if (def != data) 324 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); 325 326 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); 327 data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 328 if (def != data) 329 WREG32_PCIE(smnPCIE_LC_CNTL3, data); 330 331 def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); 332 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; 333 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; 334 if (def != data) 335 WREG32_PCIE(smnRCC_BIF_STRAP3, data); 336 337 def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); 338 data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; 339 if (def != data) 340 WREG32_PCIE(smnRCC_BIF_STRAP5, data); 341 342 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); 343 data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; 344 if (def != data) 345 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); 346 347 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); 348 349 def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); 350 data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | 351 PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 352 data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; 353 if (def != data) 354 WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); 355 356 def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); 357 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | 358 PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; 359 if (def != data) 360 WREG32_PCIE(smnPCIE_LC_CNTL6, data); 361 362 /* Don't bother about LTR if LTR is not enabled 363 * in the path */ 364 if (adev->pdev->ltr_path) 365 nbio_v6_1_program_ltr(adev); 366 367 def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); 368 data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; 369 data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; 370 if (def != data) 371 WREG32_PCIE(smnRCC_BIF_STRAP3, data); 372 373 def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); 374 data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; 375 if (def != data) 376 WREG32_PCIE(smnRCC_BIF_STRAP5, data); 377 378 def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 379 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 380 data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 381 data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; 382 if (def != data) 383 WREG32_PCIE(smnPCIE_LC_CNTL, data); 384 385 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); 386 data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 387 if (def != data) 388 WREG32_PCIE(smnPCIE_LC_CNTL3, data); 389 #endif 390 } 391 392 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 393 394 static void nbio_v6_1_set_reg_remap(struct amdgpu_device *adev) 395 { 396 if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) { 397 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 398 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 399 } else { 400 adev->rmmio_remap.reg_offset = 401 SOC15_REG_OFFSET(NBIO, 0, 402 mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; 403 adev->rmmio_remap.bus_addr = 0; 404 } 405 } 406 407 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { 408 .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, 409 .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, 410 .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset, 411 .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset, 412 .get_rev_id = nbio_v6_1_get_rev_id, 413 .mc_access_enable = nbio_v6_1_mc_access_enable, 414 .get_memsize = nbio_v6_1_get_memsize, 415 .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range, 416 .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture, 417 .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture, 418 .ih_doorbell_range = nbio_v6_1_ih_doorbell_range, 419 .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating, 420 .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep, 421 .get_clockgating_state = nbio_v6_1_get_clockgating_state, 422 .ih_control = nbio_v6_1_ih_control, 423 .init_registers = nbio_v6_1_init_registers, 424 .remap_hdp_registers = nbio_v6_1_remap_hdp_registers, 425 .program_aspm = nbio_v6_1_program_aspm, 426 .set_reg_remap = nbio_v6_1_set_reg_remap, 427 }; 428