1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include <drm/drm_cache.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v12_0.h"
31 #include "athub/athub_4_1_0_sh_mask.h"
32 #include "athub/athub_4_1_0_offset.h"
33 #include "oss/osssys_7_0_0_offset.h"
34 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
35 #include "soc24_enum.h"
36 #include "soc24.h"
37 #include "soc15d.h"
38 #include "soc15_common.h"
39 #include "nbif_v6_3_1.h"
40 #include "gfxhub_v12_0.h"
41 #include "mmhub_v4_1_0.h"
42 #include "athub_v4_1_0.h"
43
44
gmc_v12_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)45 static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
46 struct amdgpu_irq_src *src,
47 unsigned type,
48 enum amdgpu_interrupt_state state)
49 {
50 return 0;
51 }
52
gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)53 static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
54 struct amdgpu_irq_src *src, unsigned type,
55 enum amdgpu_interrupt_state state)
56 {
57 switch (state) {
58 case AMDGPU_IRQ_STATE_DISABLE:
59 /* MM HUB */
60 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
61 /* GFX HUB */
62 /* This works because this interrupt is only
63 * enabled at init/resume and disabled in
64 * fini/suspend, so the overall state doesn't
65 * change over the course of suspend/resume.
66 */
67 if (!adev->in_s0ix)
68 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
69 break;
70 case AMDGPU_IRQ_STATE_ENABLE:
71 /* MM HUB */
72 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
73 /* GFX HUB */
74 /* This works because this interrupt is only
75 * enabled at init/resume and disabled in
76 * fini/suspend, so the overall state doesn't
77 * change over the course of suspend/resume.
78 */
79 if (!adev->in_s0ix)
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
81 break;
82 default:
83 break;
84 }
85
86 return 0;
87 }
88
gmc_v12_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)89 static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
90 struct amdgpu_irq_src *source,
91 struct amdgpu_iv_entry *entry)
92 {
93 struct amdgpu_vmhub *hub;
94 uint32_t status = 0;
95 u64 addr;
96
97 addr = (u64)entry->src_data[0] << 12;
98 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
99
100 if (entry->client_id == SOC21_IH_CLIENTID_VMC)
101 hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
102 else
103 hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
104
105 if (!amdgpu_sriov_vf(adev)) {
106 /*
107 * Issue a dummy read to wait for the status register to
108 * be updated to avoid reading an incorrect value due to
109 * the new fast GRBM interface.
110 */
111 if (entry->vmid_src == AMDGPU_GFXHUB(0))
112 RREG32(hub->vm_l2_pro_fault_status);
113
114 status = RREG32(hub->vm_l2_pro_fault_status);
115 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
116
117 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
118 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
119 }
120
121 if (printk_ratelimit()) {
122 struct amdgpu_task_info *task_info;
123
124 dev_err(adev->dev,
125 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
126 entry->vmid_src ? "mmhub" : "gfxhub",
127 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
128 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
129 if (task_info) {
130 dev_err(adev->dev,
131 " in process %s pid %d thread %s pid %d)\n",
132 task_info->process_name, task_info->tgid,
133 task_info->task_name, task_info->pid);
134 amdgpu_vm_put_task_info(task_info);
135 }
136
137 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
138 addr, entry->client_id);
139
140 if (!amdgpu_sriov_vf(adev))
141 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
142 }
143
144 return 0;
145 }
146
147 static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = {
148 .set = gmc_v12_0_vm_fault_interrupt_state,
149 .process = gmc_v12_0_process_interrupt,
150 };
151
152 static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = {
153 .set = gmc_v12_0_ecc_interrupt_state,
154 .process = amdgpu_umc_process_ecc_irq,
155 };
156
gmc_v12_0_set_irq_funcs(struct amdgpu_device * adev)157 static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev)
158 {
159 adev->gmc.vm_fault.num_types = 1;
160 adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs;
161
162 if (!amdgpu_sriov_vf(adev)) {
163 adev->gmc.ecc_irq.num_types = 1;
164 adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs;
165 }
166 }
167
168 /**
169 * gmc_v12_0_use_invalidate_semaphore - judge whether to use semaphore
170 *
171 * @adev: amdgpu_device pointer
172 * @vmhub: vmhub type
173 *
174 */
gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)175 static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev,
176 uint32_t vmhub)
177 {
178 return ((vmhub == AMDGPU_MMHUB0(0)) &&
179 (!amdgpu_sriov_vf(adev)));
180 }
181
gmc_v12_0_get_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)182 static bool gmc_v12_0_get_vmid_pasid_mapping_info(
183 struct amdgpu_device *adev,
184 uint8_t vmid, uint16_t *p_pasid)
185 {
186 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
187
188 return !!(*p_pasid);
189 }
190
191 /*
192 * GART
193 * VMID 0 is the physical GPU addresses as used by the kernel.
194 * VMIDs 1-15 are used for userspace clients and are handled
195 * by the amdgpu vm/hsa code.
196 */
197
gmc_v12_0_flush_vm_hub(struct amdgpu_device * adev,uint32_t vmid,unsigned int vmhub,uint32_t flush_type)198 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
199 unsigned int vmhub, uint32_t flush_type)
200 {
201 bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub);
202 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
203 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
204 u32 tmp;
205 /* Use register 17 for GART */
206 const unsigned eng = 17;
207 unsigned int i;
208 unsigned char hub_ip = 0;
209
210 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
211 GC_HWIP : MMHUB_HWIP;
212
213 spin_lock(&adev->gmc.invalidate_lock);
214 /*
215 * It may lose gpuvm invalidate acknowldege state across power-gating
216 * off cycle, add semaphore acquire before invalidation and semaphore
217 * release after invalidation to avoid entering power gated state
218 * to WA the Issue
219 */
220
221 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
222 if (use_semaphore) {
223 for (i = 0; i < adev->usec_timeout; i++) {
224 /* a read return value of 1 means semaphore acuqire */
225 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
226 hub->eng_distance * eng, hub_ip);
227 if (tmp & 0x1)
228 break;
229 udelay(1);
230 }
231
232 if (i >= adev->usec_timeout)
233 dev_err(adev->dev,
234 "Timeout waiting for sem acquire in VM flush!\n");
235 }
236
237 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
238
239 /* Wait for ACK with a delay.*/
240 for (i = 0; i < adev->usec_timeout; i++) {
241 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
242 hub->eng_distance * eng, hub_ip);
243 tmp &= 1 << vmid;
244 if (tmp)
245 break;
246
247 udelay(1);
248 }
249
250 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
251 if (use_semaphore)
252 /*
253 * add semaphore release after invalidation,
254 * write with 0 means semaphore release
255 */
256 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
257 hub->eng_distance * eng, 0, hub_ip);
258
259 /* Issue additional private vm invalidation to MMHUB */
260 if ((vmhub != AMDGPU_GFXHUB(0)) &&
261 (hub->vm_l2_bank_select_reserved_cid2) &&
262 !amdgpu_sriov_vf(adev)) {
263 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
264 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
265 inv_req |= (1 << 25);
266 /* Issue private invalidation */
267 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
268 /* Read back to ensure invalidation is done*/
269 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
270 }
271
272 spin_unlock(&adev->gmc.invalidate_lock);
273
274 if (i < adev->usec_timeout)
275 return;
276
277 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
278 }
279
280 /**
281 * gmc_v12_0_flush_gpu_tlb - gart tlb flush callback
282 *
283 * @adev: amdgpu_device pointer
284 * @vmid: vm instance to flush
285 * @vmhub: which hub to flush
286 * @flush_type: the flush type
287 *
288 * Flush the TLB for the requested page table.
289 */
gmc_v12_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)290 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
291 uint32_t vmhub, uint32_t flush_type)
292 {
293 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
294 return;
295
296 /* flush hdp cache */
297 adev->hdp.funcs->flush_hdp(adev, NULL);
298
299 /* This is necessary for SRIOV as well as for GFXOFF to function
300 * properly under bare metal
301 */
302 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
303 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
304 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
305 const unsigned eng = 17;
306 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
307 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
308 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
309
310 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
311 1 << vmid, GET_INST(GC, 0));
312 return;
313 }
314
315 mutex_lock(&adev->mman.gtt_window_lock);
316 gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
317 mutex_unlock(&adev->mman.gtt_window_lock);
318 return;
319 }
320
321 /**
322 * gmc_v12_0_flush_gpu_tlb_pasid - tlb flush via pasid
323 *
324 * @adev: amdgpu_device pointer
325 * @pasid: pasid to be flush
326 * @flush_type: the flush type
327 * @all_hub: flush all hubs
328 * @inst: is used to select which instance of KIQ to use for the invalidation
329 *
330 * Flush the TLB for the requested pasid.
331 */
gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)332 static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
333 uint16_t pasid, uint32_t flush_type,
334 bool all_hub, uint32_t inst)
335 {
336 uint16_t queried;
337 int vmid, i;
338
339 for (vmid = 1; vmid < 16; vmid++) {
340 bool valid;
341
342 valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid,
343 &queried);
344 if (!valid || queried != pasid)
345 continue;
346
347 if (all_hub) {
348 for_each_set_bit(i, adev->vmhubs_mask,
349 AMDGPU_MAX_VMHUBS)
350 gmc_v12_0_flush_gpu_tlb(adev, vmid, i,
351 flush_type);
352 } else {
353 gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
354 flush_type);
355 }
356 }
357 }
358
gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)359 static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
360 unsigned vmid, uint64_t pd_addr)
361 {
362 bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
363 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
364 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
365 unsigned eng = ring->vm_inv_eng;
366
367 /*
368 * It may lose gpuvm invalidate acknowldege state across power-gating
369 * off cycle, add semaphore acquire before invalidation and semaphore
370 * release after invalidation to avoid entering power gated state
371 * to WA the Issue
372 */
373
374 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
375 if (use_semaphore)
376 /* a read return value of 1 means semaphore acuqire */
377 amdgpu_ring_emit_reg_wait(ring,
378 hub->vm_inv_eng0_sem +
379 hub->eng_distance * eng, 0x1, 0x1);
380
381 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
382 (hub->ctx_addr_distance * vmid),
383 lower_32_bits(pd_addr));
384
385 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
386 (hub->ctx_addr_distance * vmid),
387 upper_32_bits(pd_addr));
388
389 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
390 hub->eng_distance * eng,
391 hub->vm_inv_eng0_ack +
392 hub->eng_distance * eng,
393 req, 1 << vmid);
394
395 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
396 if (use_semaphore)
397 /*
398 * add semaphore release after invalidation,
399 * write with 0 means semaphore release
400 */
401 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
402 hub->eng_distance * eng, 0);
403
404 return pd_addr;
405 }
406
gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)407 static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
408 unsigned pasid)
409 {
410 struct amdgpu_device *adev = ring->adev;
411 uint32_t reg;
412
413 /* MES fw manages IH_VMID_x_LUT updating */
414 if (ring->is_mes_queue)
415 return;
416
417 if (ring->vm_hub == AMDGPU_GFXHUB(0))
418 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
419 else
420 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
421
422 amdgpu_ring_emit_wreg(ring, reg, pasid);
423 }
424
425 /*
426 * PTE format:
427 * 63 P
428 * 62:59 reserved
429 * 58 D
430 * 57 G
431 * 56 T
432 * 55:54 M
433 * 53:52 SW
434 * 51:48 reserved for future
435 * 47:12 4k physical page base address
436 * 11:7 fragment
437 * 6 write
438 * 5 read
439 * 4 exe
440 * 3 Z
441 * 2 snooped
442 * 1 system
443 * 0 valid
444 *
445 * PDE format:
446 * 63 P
447 * 62:58 block fragment size
448 * 57 reserved
449 * 56 A
450 * 55:54 M
451 * 53:52 reserved
452 * 51:48 reserved for future
453 * 47:6 physical base address of PD or PTE
454 * 5:3 reserved
455 * 2 C
456 * 1 system
457 * 0 valid
458 */
459
gmc_v12_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)460 static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
461 {
462 switch (flags) {
463 case AMDGPU_VM_MTYPE_DEFAULT:
464 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
465 case AMDGPU_VM_MTYPE_NC:
466 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
467 case AMDGPU_VM_MTYPE_UC:
468 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
469 default:
470 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
471 }
472 }
473
gmc_v12_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)474 static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
475 uint64_t *addr, uint64_t *flags)
476 {
477 if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
478 *addr = adev->vm_manager.vram_base_offset + *addr -
479 adev->gmc.vram_start;
480 BUG_ON(*addr & 0xFFFF00000000003FULL);
481
482 if (!adev->gmc.translate_further)
483 return;
484
485 if (level == AMDGPU_VM_PDB1) {
486 /* Set the block fragment size */
487 if (!(*flags & AMDGPU_PDE_PTE_GFX12))
488 *flags |= AMDGPU_PDE_BFS_GFX12(0x9);
489
490 } else if (level == AMDGPU_VM_PDB0) {
491 if (*flags & AMDGPU_PDE_PTE_GFX12)
492 *flags &= ~AMDGPU_PDE_PTE_GFX12;
493 }
494 }
495
gmc_v12_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)496 static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
497 struct amdgpu_bo_va_mapping *mapping,
498 uint64_t *flags)
499 {
500 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
501 struct amdgpu_device *bo_adev;
502 bool coherent, is_system;
503
504
505 *flags &= ~AMDGPU_PTE_EXECUTABLE;
506 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
507
508 *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
509 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
510
511 if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
512 *flags |= AMDGPU_PTE_PRT_GFX12;
513 *flags |= AMDGPU_PTE_SNOOPED;
514 *flags |= AMDGPU_PTE_SYSTEM;
515 *flags |= AMDGPU_PTE_IS_PTE;
516 *flags &= ~AMDGPU_PTE_VALID;
517 }
518
519 if (!bo)
520 return;
521
522 if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
523 AMDGPU_GEM_CREATE_UNCACHED))
524 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
525
526 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
527 coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
528 is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
529 (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
530
531 if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
532 *flags |= AMDGPU_PTE_DCC;
533
534 /* WA for HW bug */
535 if (is_system || ((bo_adev != adev) && coherent))
536 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
537
538 }
539
gmc_v12_0_get_vbios_fb_size(struct amdgpu_device * adev)540 static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
541 {
542 return 0;
543 }
544
gmc_v12_0_get_dcc_alignment(struct amdgpu_device * adev)545 static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
546 {
547 unsigned int max_tex_channel_caches, alignment;
548
549 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
550 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
551 return 0;
552
553 max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
554 if (is_power_of_2(max_tex_channel_caches))
555 alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
556 else
557 alignment = roundup_pow_of_two(max_tex_channel_caches);
558
559 return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
560 }
561
562 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
563 .flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
564 .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
565 .emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
566 .emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
567 .map_mtype = gmc_v12_0_map_mtype,
568 .get_vm_pde = gmc_v12_0_get_vm_pde,
569 .get_vm_pte = gmc_v12_0_get_vm_pte,
570 .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
571 .get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
572 };
573
gmc_v12_0_set_gmc_funcs(struct amdgpu_device * adev)574 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
575 {
576 adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs;
577 }
578
gmc_v12_0_set_umc_funcs(struct amdgpu_device * adev)579 static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
580 {
581 }
582
583
gmc_v12_0_set_mmhub_funcs(struct amdgpu_device * adev)584 static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev)
585 {
586 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
587 case IP_VERSION(4, 1, 0):
588 adev->mmhub.funcs = &mmhub_v4_1_0_funcs;
589 break;
590 default:
591 break;
592 }
593 }
594
gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device * adev)595 static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
596 {
597 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
598 case IP_VERSION(12, 0, 0):
599 case IP_VERSION(12, 0, 1):
600 adev->gfxhub.funcs = &gfxhub_v12_0_funcs;
601 break;
602 default:
603 break;
604 }
605 }
606
gmc_v12_0_early_init(void * handle)607 static int gmc_v12_0_early_init(void *handle)
608 {
609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610
611 gmc_v12_0_set_gfxhub_funcs(adev);
612 gmc_v12_0_set_mmhub_funcs(adev);
613 gmc_v12_0_set_gmc_funcs(adev);
614 gmc_v12_0_set_irq_funcs(adev);
615 gmc_v12_0_set_umc_funcs(adev);
616
617 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
618 adev->gmc.shared_aperture_end =
619 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
620 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
621 adev->gmc.private_aperture_end =
622 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
623
624 return 0;
625 }
626
gmc_v12_0_late_init(void * handle)627 static int gmc_v12_0_late_init(void *handle)
628 {
629 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
630 int r;
631
632 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
633 if (r)
634 return r;
635
636 r = amdgpu_gmc_ras_late_init(adev);
637 if (r)
638 return r;
639
640 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
641 }
642
gmc_v12_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)643 static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev,
644 struct amdgpu_gmc *mc)
645 {
646 u64 base = 0;
647
648 base = adev->mmhub.funcs->get_fb_location(adev);
649
650 amdgpu_gmc_set_agp_default(adev, mc);
651 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
652 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
653 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
654 amdgpu_gmc_agp_location(adev, mc);
655
656 /* base offset of vram pages */
657 if (amdgpu_sriov_vf(adev))
658 adev->vm_manager.vram_base_offset = 0;
659 else
660 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
661 }
662
663 /**
664 * gmc_v12_0_mc_init - initialize the memory controller driver params
665 *
666 * @adev: amdgpu_device pointer
667 *
668 * Look up the amount of vram, vram width, and decide how to place
669 * vram and gart within the GPU's physical address space.
670 * Returns 0 for success.
671 */
gmc_v12_0_mc_init(struct amdgpu_device * adev)672 static int gmc_v12_0_mc_init(struct amdgpu_device *adev)
673 {
674 int r;
675
676 /* size in MB on si */
677 adev->gmc.mc_vram_size =
678 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
679 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
680
681 if (!(adev->flags & AMD_IS_APU)) {
682 r = amdgpu_device_resize_fb_bar(adev);
683 if (r)
684 return r;
685 }
686
687 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
688 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
689
690 #ifdef CONFIG_X86_64
691 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
692 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
693 adev->gmc.aper_size = adev->gmc.real_vram_size;
694 }
695 #endif
696 /* In case the PCI BAR is larger than the actual amount of vram */
697 adev->gmc.visible_vram_size = adev->gmc.aper_size;
698 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
699 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
700
701 /* set the gart size */
702 if (amdgpu_gart_size == -1) {
703 adev->gmc.gart_size = 512ULL << 20;
704 } else
705 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
706
707 gmc_v12_0_vram_gtt_location(adev, &adev->gmc);
708
709 return 0;
710 }
711
gmc_v12_0_gart_init(struct amdgpu_device * adev)712 static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
713 {
714 int r;
715
716 if (adev->gart.bo) {
717 WARN(1, "PCIE GART already initialized\n");
718 return 0;
719 }
720
721 /* Initialize common gart structure */
722 r = amdgpu_gart_init(adev);
723 if (r)
724 return r;
725
726 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
727 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) |
728 AMDGPU_PTE_EXECUTABLE |
729 AMDGPU_PTE_IS_PTE;
730
731 return amdgpu_gart_table_vram_alloc(adev);
732 }
733
gmc_v12_0_sw_init(void * handle)734 static int gmc_v12_0_sw_init(void *handle)
735 {
736 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
738
739 adev->mmhub.funcs->init(adev);
740
741 adev->gfxhub.funcs->init(adev);
742
743 spin_lock_init(&adev->gmc.invalidate_lock);
744
745 r = amdgpu_atomfirmware_get_vram_info(adev,
746 &vram_width, &vram_type, &vram_vendor);
747 adev->gmc.vram_width = vram_width;
748
749 adev->gmc.vram_type = vram_type;
750 adev->gmc.vram_vendor = vram_vendor;
751
752 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
753 case IP_VERSION(12, 0, 0):
754 case IP_VERSION(12, 0, 1):
755 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
756 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
757 /*
758 * To fulfill 4-level page support,
759 * vm size is 256TB (48bit), maximum size,
760 * block size 512 (9bit)
761 */
762 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
763 break;
764 default:
765 break;
766 }
767
768 /* This interrupt is VMC page fault.*/
769 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
770 VMC_1_0__SRCID__VM_FAULT,
771 &adev->gmc.vm_fault);
772
773 if (r)
774 return r;
775
776 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
777 UTCL2_1_0__SRCID__FAULT,
778 &adev->gmc.vm_fault);
779 if (r)
780 return r;
781
782 if (!amdgpu_sriov_vf(adev)) {
783 /* interrupt sent to DF. */
784 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
785 &adev->gmc.ecc_irq);
786 if (r)
787 return r;
788 }
789
790 /*
791 * Set the internal MC address mask This is the max address of the GPU's
792 * internal address space.
793 */
794 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
795
796 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
797 if (r) {
798 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
799 return r;
800 }
801
802 adev->need_swiotlb = drm_need_swiotlb(44);
803
804 r = gmc_v12_0_mc_init(adev);
805 if (r)
806 return r;
807
808 amdgpu_gmc_get_vbios_allocations(adev);
809
810 /* Memory manager */
811 r = amdgpu_bo_init(adev);
812 if (r)
813 return r;
814
815 r = gmc_v12_0_gart_init(adev);
816 if (r)
817 return r;
818
819 /*
820 * number of VMs
821 * VMID 0 is reserved for System
822 * amdgpu graphics/compute will use VMIDs 1-7
823 * amdkfd will use VMIDs 8-15
824 */
825 adev->vm_manager.first_kfd_vmid = 8;
826
827 amdgpu_vm_manager_init(adev);
828
829 return 0;
830 }
831
832 /**
833 * gmc_v12_0_gart_fini - vm fini callback
834 *
835 * @adev: amdgpu_device pointer
836 *
837 * Tears down the driver GART/VM setup (CIK).
838 */
gmc_v12_0_gart_fini(struct amdgpu_device * adev)839 static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
840 {
841 amdgpu_gart_table_vram_free(adev);
842 }
843
gmc_v12_0_sw_fini(void * handle)844 static int gmc_v12_0_sw_fini(void *handle)
845 {
846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
847
848 amdgpu_vm_manager_fini(adev);
849 gmc_v12_0_gart_fini(adev);
850 amdgpu_gem_force_release(adev);
851 amdgpu_bo_fini(adev);
852
853 return 0;
854 }
855
gmc_v12_0_init_golden_registers(struct amdgpu_device * adev)856 static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev)
857 {
858 }
859
860 /**
861 * gmc_v12_0_gart_enable - gart enable
862 *
863 * @adev: amdgpu_device pointer
864 */
gmc_v12_0_gart_enable(struct amdgpu_device * adev)865 static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
866 {
867 int r;
868 bool value;
869
870 if (adev->gart.bo == NULL) {
871 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
872 return -EINVAL;
873 }
874
875 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
876
877 r = adev->mmhub.funcs->gart_enable(adev);
878 if (r)
879 return r;
880
881 /* Flush HDP after it is initialized */
882 adev->hdp.funcs->flush_hdp(adev, NULL);
883
884 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
885 false : true;
886
887 adev->mmhub.funcs->set_fault_enable_default(adev, value);
888 gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
889
890 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
891 (unsigned)(adev->gmc.gart_size >> 20),
892 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
893
894 return 0;
895 }
896
gmc_v12_0_hw_init(void * handle)897 static int gmc_v12_0_hw_init(void *handle)
898 {
899 int r;
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
901
902 /* The sequence of these two function calls matters.*/
903 gmc_v12_0_init_golden_registers(adev);
904
905 r = gmc_v12_0_gart_enable(adev);
906 if (r)
907 return r;
908
909 if (adev->umc.funcs && adev->umc.funcs->init_registers)
910 adev->umc.funcs->init_registers(adev);
911
912 return 0;
913 }
914
915 /**
916 * gmc_v12_0_gart_disable - gart disable
917 *
918 * @adev: amdgpu_device pointer
919 *
920 * This disables all VM page table.
921 */
gmc_v12_0_gart_disable(struct amdgpu_device * adev)922 static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
923 {
924 adev->mmhub.funcs->gart_disable(adev);
925 }
926
gmc_v12_0_hw_fini(void * handle)927 static int gmc_v12_0_hw_fini(void *handle)
928 {
929 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
930
931 if (amdgpu_sriov_vf(adev)) {
932 /* full access mode, so don't touch any GMC register */
933 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
934 return 0;
935 }
936
937 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
938
939 if (adev->gmc.ecc_irq.funcs &&
940 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
941 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
942
943 gmc_v12_0_gart_disable(adev);
944
945 return 0;
946 }
947
gmc_v12_0_suspend(void * handle)948 static int gmc_v12_0_suspend(void *handle)
949 {
950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
951
952 gmc_v12_0_hw_fini(adev);
953
954 return 0;
955 }
956
gmc_v12_0_resume(void * handle)957 static int gmc_v12_0_resume(void *handle)
958 {
959 int r;
960 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
961
962 r = gmc_v12_0_hw_init(adev);
963 if (r)
964 return r;
965
966 amdgpu_vmid_reset_all(adev);
967
968 return 0;
969 }
970
gmc_v12_0_is_idle(void * handle)971 static bool gmc_v12_0_is_idle(void *handle)
972 {
973 /* MC is always ready in GMC v11.*/
974 return true;
975 }
976
gmc_v12_0_wait_for_idle(void * handle)977 static int gmc_v12_0_wait_for_idle(void *handle)
978 {
979 /* There is no need to wait for MC idle in GMC v11.*/
980 return 0;
981 }
982
gmc_v12_0_soft_reset(void * handle)983 static int gmc_v12_0_soft_reset(void *handle)
984 {
985 return 0;
986 }
987
gmc_v12_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)988 static int gmc_v12_0_set_clockgating_state(void *handle,
989 enum amd_clockgating_state state)
990 {
991 int r;
992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993
994 r = adev->mmhub.funcs->set_clockgating(adev, state);
995 if (r)
996 return r;
997
998 return athub_v4_1_0_set_clockgating(adev, state);
999 }
1000
gmc_v12_0_get_clockgating_state(void * handle,u64 * flags)1001 static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
1002 {
1003 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1004
1005 adev->mmhub.funcs->get_clockgating(adev, flags);
1006
1007 athub_v4_1_0_get_clockgating(adev, flags);
1008 }
1009
gmc_v12_0_set_powergating_state(void * handle,enum amd_powergating_state state)1010 static int gmc_v12_0_set_powergating_state(void *handle,
1011 enum amd_powergating_state state)
1012 {
1013 return 0;
1014 }
1015
1016 const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
1017 .name = "gmc_v12_0",
1018 .early_init = gmc_v12_0_early_init,
1019 .sw_init = gmc_v12_0_sw_init,
1020 .hw_init = gmc_v12_0_hw_init,
1021 .late_init = gmc_v12_0_late_init,
1022 .sw_fini = gmc_v12_0_sw_fini,
1023 .hw_fini = gmc_v12_0_hw_fini,
1024 .suspend = gmc_v12_0_suspend,
1025 .resume = gmc_v12_0_resume,
1026 .is_idle = gmc_v12_0_is_idle,
1027 .wait_for_idle = gmc_v12_0_wait_for_idle,
1028 .soft_reset = gmc_v12_0_soft_reset,
1029 .set_clockgating_state = gmc_v12_0_set_clockgating_state,
1030 .set_powergating_state = gmc_v12_0_set_powergating_state,
1031 .get_clockgating_state = gmc_v12_0_get_clockgating_state,
1032 };
1033
1034 const struct amdgpu_ip_block_version gmc_v12_0_ip_block = {
1035 .type = AMD_IP_BLOCK_TYPE_GMC,
1036 .major = 12,
1037 .minor = 0,
1038 .rev = 0,
1039 .funcs = &gmc_v12_0_ip_funcs,
1040 };
1041