1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "mmhub_v2_0.h" 26 27 #include "mmhub/mmhub_2_0_0_offset.h" 28 #include "mmhub/mmhub_2_0_0_sh_mask.h" 29 #include "mmhub/mmhub_2_0_0_default.h" 30 #include "navi10_enum.h" 31 32 #include "soc15_common.h" 33 34 void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 35 uint64_t page_table_base) 36 { 37 /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */ 38 int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 39 - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 40 41 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 42 offset * vmid, lower_32_bits(page_table_base)); 43 44 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 45 offset * vmid, upper_32_bits(page_table_base)); 46 } 47 48 static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) 49 { 50 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 51 52 mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); 53 54 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 55 (u32)(adev->gmc.gart_start >> 12)); 56 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 57 (u32)(adev->gmc.gart_start >> 44)); 58 59 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 60 (u32)(adev->gmc.gart_end >> 12)); 61 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 62 (u32)(adev->gmc.gart_end >> 44)); 63 } 64 65 static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) 66 { 67 uint64_t value; 68 uint32_t tmp; 69 70 /* Disable AGP. */ 71 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); 72 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0); 73 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF); 74 75 if (!amdgpu_sriov_vf(adev)) { 76 /* 77 * the new L1 policy will block SRIOV guest from writing 78 * these regs, and they will be programed at host. 79 * so skip programing these regs. 80 */ 81 /* Program the system aperture low logical page number. */ 82 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, 83 adev->gmc.vram_start >> 18); 84 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 85 adev->gmc.vram_end >> 18); 86 } 87 88 /* Set default page address. */ 89 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 90 adev->vm_manager.vram_base_offset; 91 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 92 (u32)(value >> 12)); 93 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 94 (u32)(value >> 44)); 95 96 /* Program "protection fault". */ 97 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 98 (u32)(adev->dummy_page_addr >> 12)); 99 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 100 (u32)((u64)adev->dummy_page_addr >> 44)); 101 102 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2); 103 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, 104 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 105 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); 106 } 107 108 static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) 109 { 110 uint32_t tmp; 111 112 /* Setup TLB control */ 113 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); 114 115 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 116 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 117 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 118 ENABLE_ADVANCED_DRIVER_MODEL, 1); 119 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 120 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 121 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); 122 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 123 MTYPE, MTYPE_UC); /* UC, uncached */ 124 125 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); 126 } 127 128 static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) 129 { 130 uint32_t tmp; 131 132 /* Setup L2 cache */ 133 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); 134 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); 135 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); 136 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, 137 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 138 /* XXX for emulation, Refer to closed source code.*/ 139 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 140 0); 141 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); 142 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 143 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); 144 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); 145 146 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2); 147 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 148 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 149 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); 150 151 tmp = mmMMVM_L2_CNTL3_DEFAULT; 152 if (adev->gmc.translate_further) { 153 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); 154 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, 155 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 156 } else { 157 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); 158 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, 159 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 160 } 161 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); 162 163 tmp = mmMMVM_L2_CNTL4_DEFAULT; 164 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 165 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 166 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp); 167 } 168 169 static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) 170 { 171 uint32_t tmp; 172 173 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); 174 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 175 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 176 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 177 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 178 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); 179 } 180 181 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) 182 { 183 WREG32_SOC15(MMHUB, 0, 184 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 185 0xFFFFFFFF); 186 WREG32_SOC15(MMHUB, 0, 187 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 188 0x0000000F); 189 190 WREG32_SOC15(MMHUB, 0, 191 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); 192 WREG32_SOC15(MMHUB, 0, 193 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); 194 195 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 196 0); 197 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 198 0); 199 } 200 201 static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) 202 { 203 int i; 204 uint32_t tmp; 205 206 for (i = 0; i <= 14; i++) { 207 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i); 208 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 209 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 210 adev->vm_manager.num_level); 211 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 212 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 213 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 214 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 215 1); 216 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 217 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 218 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 219 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 220 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 221 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 222 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 223 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 224 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 225 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 226 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 227 PAGE_TABLE_BLOCK_SIZE, 228 adev->vm_manager.block_size - 9); 229 /* Send no-retry XNACK on fault to suppress VM fault storm. */ 230 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 231 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 232 !amdgpu_noretry); 233 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i, tmp); 234 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); 235 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); 236 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, 237 lower_32_bits(adev->vm_manager.max_pfn - 1)); 238 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, 239 upper_32_bits(adev->vm_manager.max_pfn - 1)); 240 } 241 } 242 243 static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) 244 { 245 unsigned i; 246 247 for (i = 0; i < 18; ++i) { 248 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 249 2 * i, 0xffffffff); 250 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 251 2 * i, 0x1f); 252 } 253 } 254 255 int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) 256 { 257 /* GART Enable. */ 258 mmhub_v2_0_init_gart_aperture_regs(adev); 259 mmhub_v2_0_init_system_aperture_regs(adev); 260 mmhub_v2_0_init_tlb_regs(adev); 261 mmhub_v2_0_init_cache_regs(adev); 262 263 mmhub_v2_0_enable_system_domain(adev); 264 mmhub_v2_0_disable_identity_aperture(adev); 265 mmhub_v2_0_setup_vmid_config(adev); 266 mmhub_v2_0_program_invalidation(adev); 267 268 return 0; 269 } 270 271 void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) 272 { 273 u32 tmp; 274 u32 i; 275 276 /* Disable all tables */ 277 for (i = 0; i < 16; i++) 278 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, i, 0); 279 280 /* Setup TLB control */ 281 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); 282 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 283 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 284 ENABLE_ADVANCED_DRIVER_MODEL, 0); 285 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); 286 287 /* Setup L2 cache */ 288 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); 289 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); 290 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); 291 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0); 292 } 293 294 /** 295 * mmhub_v2_0_set_fault_enable_default - update GART/VM fault handling 296 * 297 * @adev: amdgpu_device pointer 298 * @value: true redirects VM faults to the default page 299 */ 300 void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) 301 { 302 u32 tmp; 303 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); 304 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 305 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 306 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 307 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 308 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 309 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 310 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 311 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 312 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 313 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 314 value); 315 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 316 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 317 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 318 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 319 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 320 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 321 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 322 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 323 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 324 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 325 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 326 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 327 if (!value) { 328 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 329 CRASH_ON_NO_RETRY_FAULT, 1); 330 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 331 CRASH_ON_RETRY_FAULT, 1); 332 } 333 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); 334 } 335 336 void mmhub_v2_0_init(struct amdgpu_device *adev) 337 { 338 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; 339 340 hub->ctx0_ptb_addr_lo32 = 341 SOC15_REG_OFFSET(MMHUB, 0, 342 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 343 hub->ctx0_ptb_addr_hi32 = 344 SOC15_REG_OFFSET(MMHUB, 0, 345 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 346 hub->vm_inv_eng0_sem = 347 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); 348 hub->vm_inv_eng0_req = 349 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); 350 hub->vm_inv_eng0_ack = 351 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK); 352 hub->vm_context0_cntl = 353 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); 354 hub->vm_l2_pro_fault_status = 355 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS); 356 hub->vm_l2_pro_fault_cntl = 357 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); 358 359 } 360 361 static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 362 bool enable) 363 { 364 uint32_t def, data, def1, data1; 365 366 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 367 368 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 369 370 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 371 data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; 372 373 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 374 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 375 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 376 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 377 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 378 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 379 380 } else { 381 data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; 382 383 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 384 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 385 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 386 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 387 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 388 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 389 } 390 391 if (def != data) 392 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); 393 394 if (def1 != data1) 395 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); 396 } 397 398 static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, 399 bool enable) 400 { 401 uint32_t def, data; 402 403 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 404 405 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 406 data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 407 else 408 data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 409 410 if (def != data) 411 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); 412 } 413 414 int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, 415 enum amd_clockgating_state state) 416 { 417 if (amdgpu_sriov_vf(adev)) 418 return 0; 419 420 switch (adev->asic_type) { 421 case CHIP_NAVI10: 422 case CHIP_NAVI14: 423 case CHIP_NAVI12: 424 mmhub_v2_0_update_medium_grain_clock_gating(adev, 425 state == AMD_CG_STATE_GATE); 426 mmhub_v2_0_update_medium_grain_light_sleep(adev, 427 state == AMD_CG_STATE_GATE); 428 break; 429 default: 430 break; 431 } 432 433 return 0; 434 } 435 436 void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) 437 { 438 int data, data1; 439 440 if (amdgpu_sriov_vf(adev)) 441 *flags = 0; 442 443 data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 444 445 data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 446 447 /* AMD_CG_SUPPORT_MC_MGCG */ 448 if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && 449 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 450 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 451 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 452 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 453 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 454 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) 455 *flags |= AMD_CG_SUPPORT_MC_MGCG; 456 457 /* AMD_CG_SUPPORT_MC_LS */ 458 if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) 459 *flags |= AMD_CG_SUPPORT_MC_LS; 460 } 461