1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "mmhub_v2_3.h" 26 27 #include "mmhub/mmhub_2_3_0_offset.h" 28 #include "mmhub/mmhub_2_3_0_sh_mask.h" 29 #include "mmhub/mmhub_2_3_0_default.h" 30 #include "navi10_enum.h" 31 32 #include "soc15_common.h" 33 34 static const char *mmhub_client_ids_vangogh[][2] = { 35 [0][0] = "MP0", 36 [1][0] = "MP1", 37 [2][0] = "DCEDMC", 38 [3][0] = "DCEVGA", 39 [13][0] = "UTCL2", 40 [26][0] = "OSS", 41 [27][0] = "HDP", 42 [28][0] = "VCN", 43 [29][0] = "VCNU", 44 [30][0] = "JPEG", 45 [0][1] = "MP0", 46 [1][1] = "MP1", 47 [2][1] = "DCEDMC", 48 [3][1] = "DCEVGA", 49 [4][1] = "DCEDWB", 50 [5][1] = "XDP", 51 [26][1] = "OSS", 52 [27][1] = "HDP", 53 [28][1] = "VCN", 54 [29][1] = "VCNU", 55 [30][1] = "JPEG", 56 }; 57 58 static uint32_t mmhub_v2_3_get_invalidate_req(unsigned int vmid, 59 uint32_t flush_type) 60 { 61 u32 req = 0; 62 63 /* invalidate using legacy mode on vmid*/ 64 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, 65 PER_VMID_INVALIDATE_REQ, 1 << vmid); 66 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 67 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 68 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 69 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 70 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 71 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 72 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, 73 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 74 75 return req; 76 } 77 78 static void 79 mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev, 80 uint32_t status) 81 { 82 uint32_t cid, rw; 83 const char *mmhub_cid = NULL; 84 85 cid = REG_GET_FIELD(status, 86 MMVM_L2_PROTECTION_FAULT_STATUS, CID); 87 rw = REG_GET_FIELD(status, 88 MMVM_L2_PROTECTION_FAULT_STATUS, RW); 89 90 dev_err(adev->dev, 91 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 92 status); 93 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 94 case IP_VERSION(2, 3, 0): 95 case IP_VERSION(2, 4, 0): 96 case IP_VERSION(2, 4, 1): 97 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vangogh) ? 98 mmhub_client_ids_vangogh[cid][rw] : NULL; 99 break; 100 default: 101 mmhub_cid = NULL; 102 break; 103 } 104 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", 105 mmhub_cid ? mmhub_cid : "unknown", cid); 106 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 107 REG_GET_FIELD(status, 108 MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 109 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 110 REG_GET_FIELD(status, 111 MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 112 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 113 REG_GET_FIELD(status, 114 MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 115 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 116 REG_GET_FIELD(status, 117 MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 118 dev_err(adev->dev, "\t RW: 0x%x\n", rw); 119 } 120 121 static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev, 122 uint32_t vmid, 123 uint64_t page_table_base) 124 { 125 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 126 127 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 128 hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); 129 130 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 131 hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); 132 } 133 134 static void mmhub_v2_3_init_gart_aperture_regs(struct amdgpu_device *adev) 135 { 136 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 137 138 mmhub_v2_3_setup_vm_pt_regs(adev, 0, pt_base); 139 140 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 141 (u32)(adev->gmc.gart_start >> 12)); 142 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 143 (u32)(adev->gmc.gart_start >> 44)); 144 145 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 146 (u32)(adev->gmc.gart_end >> 12)); 147 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 148 (u32)(adev->gmc.gart_end >> 44)); 149 } 150 151 static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev) 152 { 153 uint64_t value; 154 uint32_t tmp; 155 156 /* Disable AGP. */ 157 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); 158 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); 159 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); 160 161 /* Program the system aperture low logical page number. */ 162 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, 163 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 164 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 165 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 166 167 /* Set default page address. */ 168 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 169 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 170 (u32)(value >> 12)); 171 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 172 (u32)(value >> 44)); 173 174 /* Program "protection fault". */ 175 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 176 (u32)(adev->dummy_page_addr >> 12)); 177 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 178 (u32)((u64)adev->dummy_page_addr >> 44)); 179 180 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2); 181 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, 182 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 183 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); 184 } 185 186 static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev) 187 { 188 uint32_t tmp; 189 190 /* Setup TLB control */ 191 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); 192 193 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 194 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 195 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 196 ENABLE_ADVANCED_DRIVER_MODEL, 1); 197 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 198 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 199 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 200 MTYPE, MTYPE_UC); /* UC, uncached */ 201 202 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); 203 } 204 205 static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev) 206 { 207 uint32_t tmp; 208 209 /* Setup L2 cache */ 210 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); 211 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); 212 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); 213 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, 214 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 215 /* XXX for emulation, Refer to closed source code.*/ 216 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 217 0); 218 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); 219 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 220 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); 221 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); 222 223 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2); 224 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 225 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 226 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); 227 228 tmp = mmMMVM_L2_CNTL3_DEFAULT; 229 if (adev->gmc.translate_further) { 230 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); 231 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, 232 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 233 } else { 234 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); 235 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, 236 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 237 } 238 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); 239 240 tmp = mmMMVM_L2_CNTL4_DEFAULT; 241 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 242 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 243 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp); 244 245 tmp = mmMMVM_L2_CNTL5_DEFAULT; 246 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); 247 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp); 248 } 249 250 static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev) 251 { 252 uint32_t tmp; 253 254 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); 255 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 256 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 257 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 258 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 259 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); 260 } 261 262 static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev) 263 { 264 WREG32_SOC15(MMHUB, 0, 265 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 266 0xFFFFFFFF); 267 WREG32_SOC15(MMHUB, 0, 268 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 269 0x0000000F); 270 271 WREG32_SOC15(MMHUB, 0, 272 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); 273 WREG32_SOC15(MMHUB, 0, 274 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); 275 276 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 277 0); 278 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 279 0); 280 } 281 282 static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) 283 { 284 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 285 int i; 286 uint32_t tmp; 287 288 for (i = 0; i <= 14; i++) { 289 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance); 290 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 291 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 292 adev->vm_manager.num_level); 293 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 294 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 295 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 296 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 297 1); 298 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 299 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 300 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 301 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 302 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 303 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 304 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 305 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 306 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 307 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 308 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 309 PAGE_TABLE_BLOCK_SIZE, 310 adev->vm_manager.block_size - 9); 311 /* Send no-retry XNACK on fault to suppress VM fault storm. */ 312 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, 313 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 314 !adev->gmc.noretry); 315 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, 316 i * hub->ctx_distance, tmp); 317 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, 318 i * hub->ctx_addr_distance, 0); 319 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, 320 i * hub->ctx_addr_distance, 0); 321 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, 322 i * hub->ctx_addr_distance, 323 lower_32_bits(adev->vm_manager.max_pfn - 1)); 324 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, 325 i * hub->ctx_addr_distance, 326 upper_32_bits(adev->vm_manager.max_pfn - 1)); 327 } 328 329 hub->vm_cntx_cntl = tmp; 330 } 331 332 static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev) 333 { 334 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 335 unsigned int i; 336 337 for (i = 0; i < 18; ++i) { 338 WREG32_SOC15_OFFSET(MMHUB, 0, 339 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 340 i * hub->eng_addr_distance, 0xffffffff); 341 WREG32_SOC15_OFFSET(MMHUB, 0, 342 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 343 i * hub->eng_addr_distance, 0x1f); 344 } 345 } 346 347 static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev) 348 { 349 if (amdgpu_sriov_vf(adev)) { 350 /* 351 * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 352 * VF copy registers so vbios post doesn't program them, for 353 * SRIOV driver need to program them 354 */ 355 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE, 356 adev->gmc.vram_start >> 24); 357 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP, 358 adev->gmc.vram_end >> 24); 359 } 360 361 /* GART Enable. */ 362 mmhub_v2_3_init_gart_aperture_regs(adev); 363 mmhub_v2_3_init_system_aperture_regs(adev); 364 mmhub_v2_3_init_tlb_regs(adev); 365 mmhub_v2_3_init_cache_regs(adev); 366 367 mmhub_v2_3_enable_system_domain(adev); 368 mmhub_v2_3_disable_identity_aperture(adev); 369 mmhub_v2_3_setup_vmid_config(adev); 370 mmhub_v2_3_program_invalidation(adev); 371 372 return 0; 373 } 374 375 static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev) 376 { 377 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 378 u32 tmp; 379 u32 i; 380 381 /* Disable all tables */ 382 for (i = 0; i < AMDGPU_NUM_VMID; i++) 383 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, 384 i * hub->ctx_distance, 0); 385 386 /* Setup TLB control */ 387 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); 388 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 389 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, 390 ENABLE_ADVANCED_DRIVER_MODEL, 0); 391 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); 392 393 /* Setup L2 cache */ 394 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); 395 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); 396 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); 397 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0); 398 } 399 400 /** 401 * mmhub_v2_3_set_fault_enable_default - update GART/VM fault handling 402 * 403 * @adev: amdgpu_device pointer 404 * @value: true redirects VM faults to the default page 405 */ 406 static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev, 407 bool value) 408 { 409 u32 tmp; 410 411 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); 412 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 413 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 414 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 415 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 416 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 417 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 418 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 419 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 420 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 421 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 422 value); 423 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 424 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 425 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 426 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 427 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 428 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 429 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 430 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 431 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 432 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 433 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 434 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 435 if (!value) { 436 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 437 CRASH_ON_NO_RETRY_FAULT, 1); 438 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, 439 CRASH_ON_RETRY_FAULT, 1); 440 } 441 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); 442 } 443 444 static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = { 445 .print_l2_protection_fault_status = mmhub_v2_3_print_l2_protection_fault_status, 446 .get_invalidate_req = mmhub_v2_3_get_invalidate_req, 447 }; 448 449 static void mmhub_v2_3_init(struct amdgpu_device *adev) 450 { 451 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 452 453 hub->ctx0_ptb_addr_lo32 = 454 SOC15_REG_OFFSET(MMHUB, 0, 455 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 456 hub->ctx0_ptb_addr_hi32 = 457 SOC15_REG_OFFSET(MMHUB, 0, 458 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 459 hub->vm_inv_eng0_sem = 460 SOC15_REG_OFFSET(MMHUB, 0, 461 mmMMVM_INVALIDATE_ENG0_SEM); 462 hub->vm_inv_eng0_req = 463 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); 464 hub->vm_inv_eng0_ack = 465 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK); 466 hub->vm_context0_cntl = 467 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); 468 hub->vm_l2_pro_fault_status = 469 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS); 470 hub->vm_l2_pro_fault_cntl = 471 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); 472 473 hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL; 474 hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - 475 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 476 hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ - 477 mmMMVM_INVALIDATE_ENG0_REQ; 478 hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - 479 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; 480 481 hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 482 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 483 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 484 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 485 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 486 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 487 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 488 489 hub->vmhub_funcs = &mmhub_v2_3_vmhub_funcs; 490 } 491 492 static void 493 mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, 494 bool enable) 495 { 496 uint32_t def, data, def1, data1; 497 498 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); 499 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 500 501 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 502 data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; 503 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 504 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 505 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 506 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 507 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 508 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 509 510 } else { 511 data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; 512 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 513 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 514 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 515 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 516 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 517 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 518 } 519 520 if (def != data) 521 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); 522 if (def1 != data1) 523 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); 524 } 525 526 static void 527 mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, 528 bool enable) 529 { 530 uint32_t def, data, def1, data1, def2, data2; 531 532 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); 533 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); 534 def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); 535 536 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { 537 data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; 538 data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 539 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 540 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 541 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 542 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 543 data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 544 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 545 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 546 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 547 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 548 } else { 549 data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; 550 data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 551 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 552 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 553 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 554 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 555 data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 556 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 557 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 558 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 559 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 560 } 561 562 if (def != data) 563 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); 564 if (def1 != data1) 565 WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1); 566 if (def2 != data2) 567 WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2); 568 } 569 570 static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, 571 enum amd_clockgating_state state) 572 { 573 if (amdgpu_sriov_vf(adev)) 574 return 0; 575 576 mmhub_v2_3_update_medium_grain_clock_gating(adev, 577 state == AMD_CG_STATE_GATE); 578 mmhub_v2_3_update_medium_grain_light_sleep(adev, 579 state == AMD_CG_STATE_GATE); 580 581 return 0; 582 } 583 584 static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u64 *flags) 585 { 586 int data, data1, data2, data3; 587 588 if (amdgpu_sriov_vf(adev)) 589 *flags = 0; 590 591 data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 592 data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); 593 data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); 594 data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); 595 596 /* AMD_CG_SUPPORT_MC_MGCG */ 597 if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 598 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 599 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 600 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 601 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 602 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) 603 && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) { 604 *flags |= AMD_CG_SUPPORT_MC_MGCG; 605 } 606 607 /* AMD_CG_SUPPORT_MC_LS */ 608 if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK) 609 && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 610 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 611 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 612 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 613 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)) 614 && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 615 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 616 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 617 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 618 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))) 619 *flags |= AMD_CG_SUPPORT_MC_LS; 620 } 621 622 const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = { 623 .init = mmhub_v2_3_init, 624 .gart_enable = mmhub_v2_3_gart_enable, 625 .set_fault_enable_default = mmhub_v2_3_set_fault_enable_default, 626 .gart_disable = mmhub_v2_3_gart_disable, 627 .set_clockgating = mmhub_v2_3_set_clockgating, 628 .get_clockgating = mmhub_v2_3_get_clockgating, 629 .setup_vm_pt_regs = mmhub_v2_3_setup_vm_pt_regs, 630 }; 631