1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "mmhub_v2_3.h"
26
27 #include "mmhub/mmhub_2_3_0_offset.h"
28 #include "mmhub/mmhub_2_3_0_sh_mask.h"
29 #include "mmhub/mmhub_2_3_0_default.h"
30 #include "navi10_enum.h"
31
32 #include "soc15_common.h"
33
34 static const char *mmhub_client_ids_vangogh[][2] = {
35 [0][0] = "MP0",
36 [1][0] = "MP1",
37 [2][0] = "DCEDMC",
38 [3][0] = "DCEVGA",
39 [13][0] = "UTCL2",
40 [26][0] = "OSS",
41 [27][0] = "HDP",
42 [28][0] = "VCN",
43 [29][0] = "VCNU",
44 [30][0] = "JPEG",
45 [0][1] = "MP0",
46 [1][1] = "MP1",
47 [2][1] = "DCEDMC",
48 [3][1] = "DCEVGA",
49 [4][1] = "DCEDWB",
50 [5][1] = "XDP",
51 [26][1] = "OSS",
52 [27][1] = "HDP",
53 [28][1] = "VCN",
54 [29][1] = "VCNU",
55 [30][1] = "JPEG",
56 };
57
mmhub_v2_3_get_invalidate_req(unsigned int vmid,uint32_t flush_type)58 static uint32_t mmhub_v2_3_get_invalidate_req(unsigned int vmid,
59 uint32_t flush_type)
60 {
61 u32 req = 0;
62
63 /* invalidate using legacy mode on vmid*/
64 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
65 PER_VMID_INVALIDATE_REQ, 1 << vmid);
66 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
67 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
68 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
69 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
70 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
71 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
72 req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
73 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
74
75 return req;
76 }
77
78 static void
mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device * adev,uint32_t status)79 mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
80 uint32_t status)
81 {
82 uint32_t cid, rw;
83 const char *mmhub_cid;
84
85 cid = REG_GET_FIELD(status,
86 MMVM_L2_PROTECTION_FAULT_STATUS, CID);
87 rw = REG_GET_FIELD(status,
88 MMVM_L2_PROTECTION_FAULT_STATUS, RW);
89
90 dev_err(adev->dev,
91 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
92 status);
93 mmhub_cid = amdgpu_mmhub_client_name(&adev->mmhub, cid, rw);
94 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
95 mmhub_cid ? mmhub_cid : "unknown", cid);
96 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
97 REG_GET_FIELD(status,
98 MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
99 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
100 REG_GET_FIELD(status,
101 MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
102 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
103 REG_GET_FIELD(status,
104 MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
105 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
106 REG_GET_FIELD(status,
107 MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
108 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
109 }
110
mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base)111 static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev,
112 uint32_t vmid,
113 uint64_t page_table_base)
114 {
115 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
116
117 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
118 hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
119
120 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
121 hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
122 }
123
mmhub_v2_3_init_gart_aperture_regs(struct amdgpu_device * adev)124 static void mmhub_v2_3_init_gart_aperture_regs(struct amdgpu_device *adev)
125 {
126 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
127
128 mmhub_v2_3_setup_vm_pt_regs(adev, 0, pt_base);
129
130 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
131 (u32)(adev->gmc.gart_start >> 12));
132 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
133 (u32)(adev->gmc.gart_start >> 44));
134
135 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
136 (u32)(adev->gmc.gart_end >> 12));
137 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
138 (u32)(adev->gmc.gart_end >> 44));
139 }
140
mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device * adev)141 static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
142 {
143 uint64_t value;
144 uint32_t tmp;
145
146 /* Disable AGP. */
147 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
148 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
149 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
150
151 /* Program the system aperture low logical page number. */
152 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
153 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
154 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
155 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
156
157 /* Set default page address. */
158 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
159 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
160 (u32)(value >> 12));
161 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
162 (u32)(value >> 44));
163
164 /* Program "protection fault". */
165 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
166 (u32)(adev->dummy_page_addr >> 12));
167 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
168 (u32)((u64)adev->dummy_page_addr >> 44));
169
170 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2);
171 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
172 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
173 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
174 }
175
mmhub_v2_3_init_tlb_regs(struct amdgpu_device * adev)176 static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
177 {
178 uint32_t tmp;
179
180 /* Setup TLB control */
181 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
182
183 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
184 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
185 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
186 ENABLE_ADVANCED_DRIVER_MODEL, 1);
187 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
188 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
189 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
190 MTYPE, MTYPE_UC); /* UC, uncached */
191
192 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
193 }
194
mmhub_v2_3_init_cache_regs(struct amdgpu_device * adev)195 static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev)
196 {
197 uint32_t tmp;
198
199 /* Setup L2 cache */
200 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
201 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
202 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
203 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
204 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
205 /* XXX for emulation, Refer to closed source code.*/
206 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
207 0);
208 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
209 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
210 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
211 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
212
213 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2);
214 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
215 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
216 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
217
218 tmp = mmMMVM_L2_CNTL3_DEFAULT;
219 if (adev->gmc.translate_further) {
220 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
221 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
222 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
223 } else {
224 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
225 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
226 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
227 }
228 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
229
230 tmp = mmMMVM_L2_CNTL4_DEFAULT;
231 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
232 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
233 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp);
234
235 tmp = mmMMVM_L2_CNTL5_DEFAULT;
236 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
237 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp);
238 }
239
mmhub_v2_3_enable_system_domain(struct amdgpu_device * adev)240 static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev)
241 {
242 uint32_t tmp;
243
244 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
245 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
246 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
247 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
248 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
249 WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
250 }
251
mmhub_v2_3_disable_identity_aperture(struct amdgpu_device * adev)252 static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev)
253 {
254 WREG32_SOC15(MMHUB, 0,
255 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
256 0xFFFFFFFF);
257 WREG32_SOC15(MMHUB, 0,
258 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
259 0x0000000F);
260
261 WREG32_SOC15(MMHUB, 0,
262 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
263 WREG32_SOC15(MMHUB, 0,
264 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
265
266 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
267 0);
268 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
269 0);
270 }
271
mmhub_v2_3_setup_vmid_config(struct amdgpu_device * adev)272 static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
273 {
274 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
275 int i;
276 uint32_t tmp;
277
278 for (i = 0; i <= 14; i++) {
279 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance);
280 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
281 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
282 adev->vm_manager.num_level);
283 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
284 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
285 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
286 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
287 1);
288 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
289 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
290 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
291 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
292 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
293 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
294 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
295 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
296 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
297 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
298 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
299 PAGE_TABLE_BLOCK_SIZE,
300 adev->vm_manager.block_size - 9);
301 /* Send no-retry XNACK on fault to suppress VM fault storm. */
302 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
303 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
304 !adev->gmc.noretry);
305 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
306 i * hub->ctx_distance, tmp);
307 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
308 i * hub->ctx_addr_distance, 0);
309 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
310 i * hub->ctx_addr_distance, 0);
311 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
312 i * hub->ctx_addr_distance,
313 lower_32_bits(adev->vm_manager.max_pfn - 1));
314 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
315 i * hub->ctx_addr_distance,
316 upper_32_bits(adev->vm_manager.max_pfn - 1));
317 }
318
319 hub->vm_cntx_cntl = tmp;
320 }
321
mmhub_v2_3_program_invalidation(struct amdgpu_device * adev)322 static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev)
323 {
324 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
325 unsigned int i;
326
327 for (i = 0; i < 18; ++i) {
328 WREG32_SOC15_OFFSET(MMHUB, 0,
329 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
330 i * hub->eng_addr_distance, 0xffffffff);
331 WREG32_SOC15_OFFSET(MMHUB, 0,
332 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
333 i * hub->eng_addr_distance, 0x1f);
334 }
335 }
336
mmhub_v2_3_gart_enable(struct amdgpu_device * adev)337 static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev)
338 {
339 if (amdgpu_sriov_vf(adev)) {
340 /*
341 * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
342 * VF copy registers so vbios post doesn't program them, for
343 * SRIOV driver need to program them
344 */
345 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
346 adev->gmc.vram_start >> 24);
347 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
348 adev->gmc.vram_end >> 24);
349 }
350
351 /* GART Enable. */
352 mmhub_v2_3_init_gart_aperture_regs(adev);
353 mmhub_v2_3_init_system_aperture_regs(adev);
354 mmhub_v2_3_init_tlb_regs(adev);
355 mmhub_v2_3_init_cache_regs(adev);
356
357 mmhub_v2_3_enable_system_domain(adev);
358 mmhub_v2_3_disable_identity_aperture(adev);
359 mmhub_v2_3_setup_vmid_config(adev);
360 mmhub_v2_3_program_invalidation(adev);
361
362 return 0;
363 }
364
mmhub_v2_3_gart_disable(struct amdgpu_device * adev)365 static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev)
366 {
367 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
368 u32 tmp;
369 u32 i;
370
371 /* Disable all tables */
372 for (i = 0; i < AMDGPU_NUM_VMID; i++)
373 WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
374 i * hub->ctx_distance, 0);
375
376 /* Setup TLB control */
377 tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL);
378 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
379 tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
380 ENABLE_ADVANCED_DRIVER_MODEL, 0);
381 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp);
382
383 /* Setup L2 cache */
384 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
385 tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
386 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
387 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0);
388 }
389
390 /**
391 * mmhub_v2_3_set_fault_enable_default - update GART/VM fault handling
392 *
393 * @adev: amdgpu_device pointer
394 * @value: true redirects VM faults to the default page
395 */
mmhub_v2_3_set_fault_enable_default(struct amdgpu_device * adev,bool value)396 static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev,
397 bool value)
398 {
399 u32 tmp;
400
401 tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
402 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
403 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
404 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
405 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
406 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
407 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
408 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
409 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
410 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
411 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
412 value);
413 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
414 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
415 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
416 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
417 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
418 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
419 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
420 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
421 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
422 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
423 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
424 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
425 if (!value) {
426 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
427 CRASH_ON_NO_RETRY_FAULT, 1);
428 tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
429 CRASH_ON_RETRY_FAULT, 1);
430 }
431 WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
432 }
433
434 static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = {
435 .print_l2_protection_fault_status = mmhub_v2_3_print_l2_protection_fault_status,
436 .get_invalidate_req = mmhub_v2_3_get_invalidate_req,
437 };
438
mmhub_v2_3_init(struct amdgpu_device * adev)439 static void mmhub_v2_3_init(struct amdgpu_device *adev)
440 {
441 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
442
443 hub->ctx0_ptb_addr_lo32 =
444 SOC15_REG_OFFSET(MMHUB, 0,
445 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
446 hub->ctx0_ptb_addr_hi32 =
447 SOC15_REG_OFFSET(MMHUB, 0,
448 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
449 hub->vm_inv_eng0_sem =
450 SOC15_REG_OFFSET(MMHUB, 0,
451 mmMMVM_INVALIDATE_ENG0_SEM);
452 hub->vm_inv_eng0_req =
453 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
454 hub->vm_inv_eng0_ack =
455 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK);
456 hub->vm_context0_cntl =
457 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
458 hub->vm_l2_pro_fault_status =
459 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS);
460 hub->vm_l2_pro_fault_cntl =
461 SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
462
463 hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL;
464 hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
465 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
466 hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ -
467 mmMMVM_INVALIDATE_ENG0_REQ;
468 hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
469 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
470
471 hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
473 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
478
479 hub->vmhub_funcs = &mmhub_v2_3_vmhub_funcs;
480
481 amdgpu_mmhub_init_client_info(&adev->mmhub,
482 mmhub_client_ids_vangogh,
483 ARRAY_SIZE(mmhub_client_ids_vangogh));
484 }
485
486 static void
mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)487 mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
488 bool enable)
489 {
490 uint32_t def, data, def1, data1;
491
492 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
493 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
494
495 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
496 data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
497 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
498 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
499 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
500 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
501 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
502 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
503
504 } else {
505 data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
506 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
507 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
508 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
509 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
510 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
511 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
512 }
513
514 if (def != data)
515 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
516 if (def1 != data1)
517 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
518 }
519
520 static void
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device * adev,bool enable)521 mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
522 bool enable)
523 {
524 uint32_t def, data, def1, data1, def2, data2;
525
526 def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
527 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
528 def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
529
530 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
531 data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
532 data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
533 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
534 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
535 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
536 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
537 data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
538 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
539 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
540 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
541 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
542 } else {
543 data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
544 data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
545 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
546 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
547 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
548 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
549 data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
550 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
551 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
552 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
553 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
554 }
555
556 if (def != data)
557 WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
558 if (def1 != data1)
559 WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
560 if (def2 != data2)
561 WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
562 }
563
mmhub_v2_3_set_clockgating(struct amdgpu_device * adev,enum amd_clockgating_state state)564 static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
565 enum amd_clockgating_state state)
566 {
567 if (amdgpu_sriov_vf(adev))
568 return 0;
569
570 mmhub_v2_3_update_medium_grain_clock_gating(adev,
571 state == AMD_CG_STATE_GATE);
572 mmhub_v2_3_update_medium_grain_light_sleep(adev,
573 state == AMD_CG_STATE_GATE);
574
575 return 0;
576 }
577
mmhub_v2_3_get_clockgating(struct amdgpu_device * adev,u64 * flags)578 static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u64 *flags)
579 {
580 int data, data1, data2, data3;
581
582 if (amdgpu_sriov_vf(adev))
583 *flags = 0;
584
585 data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
586 data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
587 data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
588 data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
589
590 /* AMD_CG_SUPPORT_MC_MGCG */
591 if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
592 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
593 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
594 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
595 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
596 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
597 && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
598 *flags |= AMD_CG_SUPPORT_MC_MGCG;
599 }
600
601 /* AMD_CG_SUPPORT_MC_LS */
602 if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
603 && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
604 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
605 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
606 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
607 DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
608 && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
609 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
610 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
611 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
612 DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
613 *flags |= AMD_CG_SUPPORT_MC_LS;
614 }
615
616 const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
617 .init = mmhub_v2_3_init,
618 .gart_enable = mmhub_v2_3_gart_enable,
619 .set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
620 .gart_disable = mmhub_v2_3_gart_disable,
621 .set_clockgating = mmhub_v2_3_set_clockgating,
622 .get_clockgating = mmhub_v2_3_get_clockgating,
623 .setup_vm_pt_regs = mmhub_v2_3_setup_vm_pt_regs,
624 };
625