xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c (revision df02351331671abb26788bc13f6d276e26ae068f)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 
26 #include <drm/drm_cache.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v12_0.h"
31 #include "athub/athub_4_1_0_sh_mask.h"
32 #include "athub/athub_4_1_0_offset.h"
33 #include "oss/osssys_7_0_0_offset.h"
34 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
35 #include "soc24_enum.h"
36 #include "soc24.h"
37 #include "soc15d.h"
38 #include "soc15_common.h"
39 #include "nbif_v6_3_1.h"
40 #include "gfxhub_v12_0.h"
41 #include "mmhub_v4_1_0.h"
42 #include "athub_v4_1_0.h"
43 #include "umc_v8_14.h"
44 
gmc_v12_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)45 static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
46 					 struct amdgpu_irq_src *src,
47 					 unsigned type,
48 					 enum amdgpu_interrupt_state state)
49 {
50 	return 0;
51 }
52 
gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)53 static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
54 					      struct amdgpu_irq_src *src, unsigned type,
55 					      enum amdgpu_interrupt_state state)
56 {
57 	switch (state) {
58 	case AMDGPU_IRQ_STATE_DISABLE:
59 		/* MM HUB */
60 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
61 		/* GFX HUB */
62 		/* This works because this interrupt is only
63 		 * enabled at init/resume and disabled in
64 		 * fini/suspend, so the overall state doesn't
65 		 * change over the course of suspend/resume.
66 		 */
67 		if (!adev->in_s0ix)
68 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
69 		break;
70 	case AMDGPU_IRQ_STATE_ENABLE:
71 		/* MM HUB */
72 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
73 		/* GFX HUB */
74 		/* This works because this interrupt is only
75 		 * enabled at init/resume and disabled in
76 		 * fini/suspend, so the overall state doesn't
77 		 * change over the course of suspend/resume.
78 		 */
79 		if (!adev->in_s0ix)
80 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
81 		break;
82 	default:
83 		break;
84 	}
85 
86 	return 0;
87 }
88 
gmc_v12_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)89 static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
90 				       struct amdgpu_irq_src *source,
91 				       struct amdgpu_iv_entry *entry)
92 {
93 	struct amdgpu_vmhub *hub;
94 	uint32_t status = 0;
95 	u64 addr;
96 
97 	addr = (u64)entry->src_data[0] << 12;
98 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
99 
100 	if (entry->client_id == SOC21_IH_CLIENTID_VMC)
101 		hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
102 	else
103 		hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
104 
105 	if (!amdgpu_sriov_vf(adev)) {
106 		/*
107 		 * Issue a dummy read to wait for the status register to
108 		 * be updated to avoid reading an incorrect value due to
109 		 * the new fast GRBM interface.
110 		 */
111 		if (entry->vmid_src == AMDGPU_GFXHUB(0))
112 			RREG32(hub->vm_l2_pro_fault_status);
113 
114 		status = RREG32(hub->vm_l2_pro_fault_status);
115 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
116 
117 		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
118 					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
119 	}
120 
121 	if (printk_ratelimit()) {
122 		struct amdgpu_task_info *task_info;
123 
124 		dev_err(adev->dev,
125 			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
126 			entry->vmid_src ? "mmhub" : "gfxhub",
127 			entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
128 		task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
129 		if (task_info) {
130 			dev_err(adev->dev,
131 				" in process %s pid %d thread %s pid %d)\n",
132 				task_info->process_name, task_info->tgid,
133 				task_info->task_name, task_info->pid);
134 			amdgpu_vm_put_task_info(task_info);
135 		}
136 
137 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
138 				addr, entry->client_id);
139 
140 		/* Only print L2 fault status if the status register could be read and
141 		 * contains useful information
142 		 */
143 		if (status != 0)
144 			hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
145 	}
146 
147 	return 0;
148 }
149 
150 static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = {
151 	.set = gmc_v12_0_vm_fault_interrupt_state,
152 	.process = gmc_v12_0_process_interrupt,
153 };
154 
155 static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = {
156 	.set = gmc_v12_0_ecc_interrupt_state,
157 	.process = amdgpu_umc_process_ecc_irq,
158 };
159 
gmc_v12_0_set_irq_funcs(struct amdgpu_device * adev)160 static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev)
161 {
162 	adev->gmc.vm_fault.num_types = 1;
163 	adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs;
164 
165 	if (!amdgpu_sriov_vf(adev)) {
166 		adev->gmc.ecc_irq.num_types = 1;
167 		adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs;
168 	}
169 }
170 
171 /**
172  * gmc_v12_0_use_invalidate_semaphore - judge whether to use semaphore
173  *
174  * @adev: amdgpu_device pointer
175  * @vmhub: vmhub type
176  *
177  */
gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)178 static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev,
179 				       uint32_t vmhub)
180 {
181 	return ((vmhub == AMDGPU_MMHUB0(0)) &&
182 		(!amdgpu_sriov_vf(adev)));
183 }
184 
gmc_v12_0_get_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)185 static bool gmc_v12_0_get_vmid_pasid_mapping_info(
186 					struct amdgpu_device *adev,
187 					uint8_t vmid, uint16_t *p_pasid)
188 {
189 	*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
190 
191 	return !!(*p_pasid);
192 }
193 
194 /*
195  * GART
196  * VMID 0 is the physical GPU addresses as used by the kernel.
197  * VMIDs 1-15 are used for userspace clients and are handled
198  * by the amdgpu vm/hsa code.
199  */
200 
gmc_v12_0_flush_vm_hub(struct amdgpu_device * adev,uint32_t vmid,unsigned int vmhub,uint32_t flush_type)201 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
202 				   unsigned int vmhub, uint32_t flush_type)
203 {
204 	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub);
205 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
206 	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
207 	u32 tmp;
208 	/* Use register 17 for GART */
209 	const unsigned eng = 17;
210 	unsigned int i;
211 	unsigned char hub_ip = 0;
212 
213 	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
214 		   GC_HWIP : MMHUB_HWIP;
215 
216 	spin_lock(&adev->gmc.invalidate_lock);
217 	/*
218 	 * It may lose gpuvm invalidate acknowldege state across power-gating
219 	 * off cycle, add semaphore acquire before invalidation and semaphore
220 	 * release after invalidation to avoid entering power gated state
221 	 * to WA the Issue
222 	 */
223 
224 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
225 	if (use_semaphore) {
226 		for (i = 0; i < adev->usec_timeout; i++) {
227 			/* a read return value of 1 means semaphore acuqire */
228 			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
229 					    hub->eng_distance * eng, hub_ip);
230 			if (tmp & 0x1)
231 				break;
232 			udelay(1);
233 		}
234 
235 		if (i >= adev->usec_timeout)
236 			dev_err(adev->dev,
237 				"Timeout waiting for sem acquire in VM flush!\n");
238 	}
239 
240 	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
241 
242 	/* Wait for ACK with a delay.*/
243 	for (i = 0; i < adev->usec_timeout; i++) {
244 		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
245 				    hub->eng_distance * eng, hub_ip);
246 		tmp &= 1 << vmid;
247 		if (tmp)
248 			break;
249 
250 		udelay(1);
251 	}
252 
253 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
254 	if (use_semaphore)
255 		/*
256 		 * add semaphore release after invalidation,
257 		 * write with 0 means semaphore release
258 		 */
259 		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
260 			      hub->eng_distance * eng, 0, hub_ip);
261 
262 	/* Issue additional private vm invalidation to MMHUB */
263 	if ((vmhub != AMDGPU_GFXHUB(0)) &&
264 	    (hub->vm_l2_bank_select_reserved_cid2) &&
265 		!amdgpu_sriov_vf(adev)) {
266 		inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
267 		/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
268 		inv_req |= (1 << 25);
269 		/* Issue private invalidation */
270 		WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
271 		/* Read back to ensure invalidation is done*/
272 		RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
273 	}
274 
275 	spin_unlock(&adev->gmc.invalidate_lock);
276 
277 	if (i < adev->usec_timeout)
278 		return;
279 
280 	dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
281 }
282 
283 /**
284  * gmc_v12_0_flush_gpu_tlb - gart tlb flush callback
285  *
286  * @adev: amdgpu_device pointer
287  * @vmid: vm instance to flush
288  * @vmhub: which hub to flush
289  * @flush_type: the flush type
290  *
291  * Flush the TLB for the requested page table.
292  */
gmc_v12_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)293 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
294 					uint32_t vmhub, uint32_t flush_type)
295 {
296 	if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
297 		return;
298 
299 	/* flush hdp cache */
300 	adev->hdp.funcs->flush_hdp(adev, NULL);
301 
302 	/* This is necessary for SRIOV as well as for GFXOFF to function
303 	 * properly under bare metal
304 	 */
305 	if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
306 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
307 		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
308 		const unsigned eng = 17;
309 		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
310 		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
311 		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
312 
313 		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
314 				1 << vmid, GET_INST(GC, 0));
315 		return;
316 	}
317 
318 	mutex_lock(&adev->mman.gtt_window_lock);
319 	gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
320 	mutex_unlock(&adev->mman.gtt_window_lock);
321 	return;
322 }
323 
324 /**
325  * gmc_v12_0_flush_gpu_tlb_pasid - tlb flush via pasid
326  *
327  * @adev: amdgpu_device pointer
328  * @pasid: pasid to be flush
329  * @flush_type: the flush type
330  * @all_hub: flush all hubs
331  * @inst: is used to select which instance of KIQ to use for the invalidation
332  *
333  * Flush the TLB for the requested pasid.
334  */
gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)335 static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
336 					  uint16_t pasid, uint32_t flush_type,
337 					  bool all_hub, uint32_t inst)
338 {
339 	uint16_t queried;
340 	int vmid, i;
341 
342 	for (vmid = 1; vmid < 16; vmid++) {
343 		bool valid;
344 
345 		valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid,
346 							      &queried);
347 		if (!valid || queried != pasid)
348 			continue;
349 
350 		if (all_hub) {
351 			for_each_set_bit(i, adev->vmhubs_mask,
352 					 AMDGPU_MAX_VMHUBS)
353 				gmc_v12_0_flush_gpu_tlb(adev, vmid, i,
354 							flush_type);
355 		} else {
356 			gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
357 						flush_type);
358 		}
359 	}
360 }
361 
gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)362 static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
363 					     unsigned vmid, uint64_t pd_addr)
364 {
365 	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
366 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
367 	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
368 	unsigned eng = ring->vm_inv_eng;
369 
370 	/*
371 	 * It may lose gpuvm invalidate acknowldege state across power-gating
372 	 * off cycle, add semaphore acquire before invalidation and semaphore
373 	 * release after invalidation to avoid entering power gated state
374 	 * to WA the Issue
375 	 */
376 
377 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
378 	if (use_semaphore)
379 		/* a read return value of 1 means semaphore acuqire */
380 		amdgpu_ring_emit_reg_wait(ring,
381 					  hub->vm_inv_eng0_sem +
382 					  hub->eng_distance * eng, 0x1, 0x1);
383 
384 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
385 			      (hub->ctx_addr_distance * vmid),
386 			      lower_32_bits(pd_addr));
387 
388 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
389 			      (hub->ctx_addr_distance * vmid),
390 			      upper_32_bits(pd_addr));
391 
392 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
393 					    hub->eng_distance * eng,
394 					    hub->vm_inv_eng0_ack +
395 					    hub->eng_distance * eng,
396 					    req, 1 << vmid);
397 
398 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
399 	if (use_semaphore)
400 		/*
401 		 * add semaphore release after invalidation,
402 		 * write with 0 means semaphore release
403 		 */
404 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
405 				      hub->eng_distance * eng, 0);
406 
407 	return pd_addr;
408 }
409 
gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)410 static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
411 					 unsigned pasid)
412 {
413 	struct amdgpu_device *adev = ring->adev;
414 	uint32_t reg;
415 
416 	/* MES fw manages IH_VMID_x_LUT updating */
417 	if (ring->is_mes_queue)
418 		return;
419 
420 	if (ring->vm_hub == AMDGPU_GFXHUB(0))
421 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
422 	else
423 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
424 
425 	amdgpu_ring_emit_wreg(ring, reg, pasid);
426 }
427 
428 /*
429  * PTE format:
430  * 63 P
431  * 62:59 reserved
432  * 58 D
433  * 57 G
434  * 56 T
435  * 55:54 M
436  * 53:52 SW
437  * 51:48 reserved for future
438  * 47:12 4k physical page base address
439  * 11:7 fragment
440  * 6 write
441  * 5 read
442  * 4 exe
443  * 3 Z
444  * 2 snooped
445  * 1 system
446  * 0 valid
447  *
448  * PDE format:
449  * 63 P
450  * 62:58 block fragment size
451  * 57 reserved
452  * 56 A
453  * 55:54 M
454  * 53:52 reserved
455  * 51:48 reserved for future
456  * 47:6 physical base address of PD or PTE
457  * 5:3 reserved
458  * 2 C
459  * 1 system
460  * 0 valid
461  */
462 
gmc_v12_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)463 static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
464 {
465 	switch (flags) {
466 	case AMDGPU_VM_MTYPE_DEFAULT:
467 		return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
468 	case AMDGPU_VM_MTYPE_NC:
469 		return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
470 	case AMDGPU_VM_MTYPE_UC:
471 		return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
472 	default:
473 		return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
474 	}
475 }
476 
gmc_v12_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)477 static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
478 				 uint64_t *addr, uint64_t *flags)
479 {
480 	if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
481 		*addr = adev->vm_manager.vram_base_offset + *addr -
482 			adev->gmc.vram_start;
483 	BUG_ON(*addr & 0xFFFF00000000003FULL);
484 
485 	if (!adev->gmc.translate_further)
486 		return;
487 
488 	if (level == AMDGPU_VM_PDB1) {
489 		/* Set the block fragment size */
490 		if (!(*flags & AMDGPU_PDE_PTE_GFX12))
491 			*flags |= AMDGPU_PDE_BFS_GFX12(0x9);
492 
493 	} else if (level == AMDGPU_VM_PDB0) {
494 		if (*flags & AMDGPU_PDE_PTE_GFX12)
495 			*flags &= ~AMDGPU_PDE_PTE_GFX12;
496 	}
497 }
498 
gmc_v12_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)499 static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
500 				 struct amdgpu_bo_va_mapping *mapping,
501 				 uint64_t *flags)
502 {
503 	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
504 
505 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
506 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
507 
508 	*flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
509 	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
510 
511 	if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
512 		*flags |= AMDGPU_PTE_PRT_GFX12;
513 		*flags |= AMDGPU_PTE_SNOOPED;
514 		*flags |= AMDGPU_PTE_SYSTEM;
515 		*flags |= AMDGPU_PTE_IS_PTE;
516 		*flags &= ~AMDGPU_PTE_VALID;
517 	}
518 
519 	if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
520 		*flags |= AMDGPU_PTE_DCC;
521 
522 	if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
523 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
524 }
525 
gmc_v12_0_get_vbios_fb_size(struct amdgpu_device * adev)526 static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
527 {
528 	return 0;
529 }
530 
gmc_v12_0_get_dcc_alignment(struct amdgpu_device * adev)531 static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
532 {
533 	unsigned int max_tex_channel_caches, alignment;
534 
535 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
536 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
537 		return 0;
538 
539 	max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
540 	if (is_power_of_2(max_tex_channel_caches))
541 		alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
542 	else
543 		alignment = roundup_pow_of_two(max_tex_channel_caches);
544 
545 	return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
546 }
547 
548 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
549 	.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
550 	.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
551 	.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
552 	.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
553 	.map_mtype = gmc_v12_0_map_mtype,
554 	.get_vm_pde = gmc_v12_0_get_vm_pde,
555 	.get_vm_pte = gmc_v12_0_get_vm_pte,
556 	.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
557 	.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
558 };
559 
gmc_v12_0_set_gmc_funcs(struct amdgpu_device * adev)560 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
561 {
562 	adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs;
563 }
564 
gmc_v12_0_set_umc_funcs(struct amdgpu_device * adev)565 static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
566 {
567 	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
568 	case IP_VERSION(8, 14, 0):
569 		adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
570 		adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
571 		adev->umc.node_inst_num = 0;
572 		adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
573 		adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
574 		adev->umc.ras = &umc_v8_14_ras;
575 		break;
576 	default:
577 		break;
578 	}
579 }
580 
581 
gmc_v12_0_set_mmhub_funcs(struct amdgpu_device * adev)582 static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev)
583 {
584 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
585 	case IP_VERSION(4, 1, 0):
586 		adev->mmhub.funcs = &mmhub_v4_1_0_funcs;
587 		break;
588 	default:
589 		break;
590 	}
591 }
592 
gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device * adev)593 static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
594 {
595 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
596 	case IP_VERSION(12, 0, 0):
597 	case IP_VERSION(12, 0, 1):
598 		adev->gfxhub.funcs = &gfxhub_v12_0_funcs;
599 		break;
600 	default:
601 		break;
602 	}
603 }
604 
gmc_v12_0_early_init(struct amdgpu_ip_block * ip_block)605 static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block)
606 {
607 	struct amdgpu_device *adev = ip_block->adev;
608 
609 	gmc_v12_0_set_gfxhub_funcs(adev);
610 	gmc_v12_0_set_mmhub_funcs(adev);
611 	gmc_v12_0_set_gmc_funcs(adev);
612 	gmc_v12_0_set_irq_funcs(adev);
613 	gmc_v12_0_set_umc_funcs(adev);
614 
615 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
616 	adev->gmc.shared_aperture_end =
617 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
618 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
619 	adev->gmc.private_aperture_end =
620 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
621 
622 	return 0;
623 }
624 
gmc_v12_0_late_init(struct amdgpu_ip_block * ip_block)625 static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block)
626 {
627 	struct amdgpu_device *adev = ip_block->adev;
628 	int r;
629 
630 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
631 	if (r)
632 		return r;
633 
634 	r = amdgpu_gmc_ras_late_init(adev);
635 	if (r)
636 		return r;
637 
638 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
639 }
640 
gmc_v12_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)641 static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev,
642 					struct amdgpu_gmc *mc)
643 {
644 	u64 base = 0;
645 
646 	base = adev->mmhub.funcs->get_fb_location(adev);
647 
648 	amdgpu_gmc_set_agp_default(adev, mc);
649 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
650 	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
651 	if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
652 		amdgpu_gmc_agp_location(adev, mc);
653 
654 	/* base offset of vram pages */
655 	if (amdgpu_sriov_vf(adev))
656 		adev->vm_manager.vram_base_offset = 0;
657 	else
658 		adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
659 }
660 
661 /**
662  * gmc_v12_0_mc_init - initialize the memory controller driver params
663  *
664  * @adev: amdgpu_device pointer
665  *
666  * Look up the amount of vram, vram width, and decide how to place
667  * vram and gart within the GPU's physical address space.
668  * Returns 0 for success.
669  */
gmc_v12_0_mc_init(struct amdgpu_device * adev)670 static int gmc_v12_0_mc_init(struct amdgpu_device *adev)
671 {
672 	int r;
673 
674 	/* size in MB on si */
675 	adev->gmc.mc_vram_size =
676 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
677 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
678 
679 	if (!(adev->flags & AMD_IS_APU)) {
680 		r = amdgpu_device_resize_fb_bar(adev);
681 		if (r)
682 			return r;
683 	}
684 
685 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
686 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
687 
688 #ifdef CONFIG_X86_64
689 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
690 		adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
691 		adev->gmc.aper_size = adev->gmc.real_vram_size;
692 	}
693 #endif
694 	/* In case the PCI BAR is larger than the actual amount of vram */
695 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
696 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
697 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
698 
699 	/* set the gart size */
700 	if (amdgpu_gart_size == -1) {
701 		adev->gmc.gart_size = 512ULL << 20;
702 	} else
703 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
704 
705 	gmc_v12_0_vram_gtt_location(adev, &adev->gmc);
706 
707 	return 0;
708 }
709 
gmc_v12_0_gart_init(struct amdgpu_device * adev)710 static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
711 {
712 	int r;
713 
714 	if (adev->gart.bo) {
715 		WARN(1, "PCIE GART already initialized\n");
716 		return 0;
717 	}
718 
719 	/* Initialize common gart structure */
720 	r = amdgpu_gart_init(adev);
721 	if (r)
722 		return r;
723 
724 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
725 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) |
726 				    AMDGPU_PTE_EXECUTABLE |
727 				    AMDGPU_PTE_IS_PTE;
728 
729 	return amdgpu_gart_table_vram_alloc(adev);
730 }
731 
gmc_v12_0_sw_init(struct amdgpu_ip_block * ip_block)732 static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
733 {
734 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
735 	struct amdgpu_device *adev = ip_block->adev;
736 
737 	adev->mmhub.funcs->init(adev);
738 
739 	adev->gfxhub.funcs->init(adev);
740 
741 	spin_lock_init(&adev->gmc.invalidate_lock);
742 
743 	r = amdgpu_atomfirmware_get_vram_info(adev,
744 					      &vram_width, &vram_type, &vram_vendor);
745 	adev->gmc.vram_width = vram_width;
746 
747 	adev->gmc.vram_type = vram_type;
748 	adev->gmc.vram_vendor = vram_vendor;
749 
750 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
751 	case IP_VERSION(12, 0, 0):
752 	case IP_VERSION(12, 0, 1):
753 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
754 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
755 		/*
756 		 * To fulfill 4-level page support,
757 		 * vm size is 256TB (48bit), maximum size,
758 		 * block size 512 (9bit)
759 		 */
760 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
761 		break;
762 	default:
763 		break;
764 	}
765 
766 	/* This interrupt is VMC page fault.*/
767 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
768 			      VMC_1_0__SRCID__VM_FAULT,
769 			      &adev->gmc.vm_fault);
770 
771 	if (r)
772 		return r;
773 
774 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
775 			      UTCL2_1_0__SRCID__FAULT,
776 			      &adev->gmc.vm_fault);
777 	if (r)
778 		return r;
779 
780 	if (!amdgpu_sriov_vf(adev)) {
781 		/* interrupt sent to DF. */
782 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
783 				      &adev->gmc.ecc_irq);
784 		if (r)
785 			return r;
786 	}
787 
788 	/*
789 	 * Set the internal MC address mask This is the max address of the GPU's
790 	 * internal address space.
791 	 */
792 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
793 
794 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
795 	if (r) {
796 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
797 		return r;
798 	}
799 
800 	adev->need_swiotlb = drm_need_swiotlb(44);
801 
802 	r = gmc_v12_0_mc_init(adev);
803 	if (r)
804 		return r;
805 
806 	amdgpu_gmc_get_vbios_allocations(adev);
807 
808 	/* Memory manager */
809 	r = amdgpu_bo_init(adev);
810 	if (r)
811 		return r;
812 
813 	r = gmc_v12_0_gart_init(adev);
814 	if (r)
815 		return r;
816 
817 	/*
818 	 * number of VMs
819 	 * VMID 0 is reserved for System
820 	 * amdgpu graphics/compute will use VMIDs 1-7
821 	 * amdkfd will use VMIDs 8-15
822 	 */
823 	adev->vm_manager.first_kfd_vmid = 8;
824 
825 	amdgpu_vm_manager_init(adev);
826 
827 	r = amdgpu_gmc_ras_sw_init(adev);
828 	if (r)
829 		return r;
830 
831 	return 0;
832 }
833 
834 /**
835  * gmc_v12_0_gart_fini - vm fini callback
836  *
837  * @adev: amdgpu_device pointer
838  *
839  * Tears down the driver GART/VM setup (CIK).
840  */
gmc_v12_0_gart_fini(struct amdgpu_device * adev)841 static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
842 {
843 	amdgpu_gart_table_vram_free(adev);
844 }
845 
gmc_v12_0_sw_fini(struct amdgpu_ip_block * ip_block)846 static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
847 {
848 	struct amdgpu_device *adev = ip_block->adev;
849 
850 	amdgpu_vm_manager_fini(adev);
851 	gmc_v12_0_gart_fini(adev);
852 	amdgpu_gem_force_release(adev);
853 	amdgpu_bo_fini(adev);
854 
855 	return 0;
856 }
857 
gmc_v12_0_init_golden_registers(struct amdgpu_device * adev)858 static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev)
859 {
860 }
861 
862 /**
863  * gmc_v12_0_gart_enable - gart enable
864  *
865  * @adev: amdgpu_device pointer
866  */
gmc_v12_0_gart_enable(struct amdgpu_device * adev)867 static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
868 {
869 	int r;
870 	bool value;
871 
872 	if (adev->gart.bo == NULL) {
873 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
874 		return -EINVAL;
875 	}
876 
877 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
878 
879 	r = adev->mmhub.funcs->gart_enable(adev);
880 	if (r)
881 		return r;
882 
883 	/* Flush HDP after it is initialized */
884 	adev->hdp.funcs->flush_hdp(adev, NULL);
885 
886 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
887 		false : true;
888 
889 	adev->mmhub.funcs->set_fault_enable_default(adev, value);
890 	gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
891 
892 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
893 		 (unsigned)(adev->gmc.gart_size >> 20),
894 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
895 
896 	return 0;
897 }
898 
gmc_v12_0_hw_init(struct amdgpu_ip_block * ip_block)899 static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
900 {
901 	int r;
902 	struct amdgpu_device *adev = ip_block->adev;
903 
904 	/* The sequence of these two function calls matters.*/
905 	gmc_v12_0_init_golden_registers(adev);
906 
907 	r = gmc_v12_0_gart_enable(adev);
908 	if (r)
909 		return r;
910 
911 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
912 		adev->umc.funcs->init_registers(adev);
913 
914 	return 0;
915 }
916 
917 /**
918  * gmc_v12_0_gart_disable - gart disable
919  *
920  * @adev: amdgpu_device pointer
921  *
922  * This disables all VM page table.
923  */
gmc_v12_0_gart_disable(struct amdgpu_device * adev)924 static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
925 {
926 	adev->mmhub.funcs->gart_disable(adev);
927 }
928 
gmc_v12_0_hw_fini(struct amdgpu_ip_block * ip_block)929 static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
930 {
931 	struct amdgpu_device *adev = ip_block->adev;
932 
933 	if (amdgpu_sriov_vf(adev)) {
934 		/* full access mode, so don't touch any GMC register */
935 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
936 		return 0;
937 	}
938 
939 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
940 
941 	if (adev->gmc.ecc_irq.funcs &&
942 		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
943 		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
944 
945 	gmc_v12_0_gart_disable(adev);
946 
947 	return 0;
948 }
949 
gmc_v12_0_suspend(struct amdgpu_ip_block * ip_block)950 static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
951 {
952 	gmc_v12_0_hw_fini(ip_block);
953 
954 	return 0;
955 }
956 
gmc_v12_0_resume(struct amdgpu_ip_block * ip_block)957 static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
958 {
959 	int r;
960 
961 	r = gmc_v12_0_hw_init(ip_block);
962 	if (r)
963 		return r;
964 
965 	amdgpu_vmid_reset_all(ip_block->adev);
966 
967 	return 0;
968 }
969 
gmc_v12_0_is_idle(struct amdgpu_ip_block * ip_block)970 static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
971 {
972 	/* MC is always ready in GMC v11.*/
973 	return true;
974 }
975 
gmc_v12_0_wait_for_idle(struct amdgpu_ip_block * ip_block)976 static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
977 {
978 	/* There is no need to wait for MC idle in GMC v11.*/
979 	return 0;
980 }
981 
gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)982 static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
983 					   enum amd_clockgating_state state)
984 {
985 	int r;
986 	struct amdgpu_device *adev = ip_block->adev;
987 
988 	r = adev->mmhub.funcs->set_clockgating(adev, state);
989 	if (r)
990 		return r;
991 
992 	return athub_v4_1_0_set_clockgating(adev, state);
993 }
994 
gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)995 static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
996 {
997 	struct amdgpu_device *adev = ip_block->adev;
998 
999 	adev->mmhub.funcs->get_clockgating(adev, flags);
1000 
1001 	athub_v4_1_0_get_clockgating(adev, flags);
1002 }
1003 
gmc_v12_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1004 static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1005 					   enum amd_powergating_state state)
1006 {
1007 	return 0;
1008 }
1009 
1010 const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
1011 	.name = "gmc_v12_0",
1012 	.early_init = gmc_v12_0_early_init,
1013 	.sw_init = gmc_v12_0_sw_init,
1014 	.hw_init = gmc_v12_0_hw_init,
1015 	.late_init = gmc_v12_0_late_init,
1016 	.sw_fini = gmc_v12_0_sw_fini,
1017 	.hw_fini = gmc_v12_0_hw_fini,
1018 	.suspend = gmc_v12_0_suspend,
1019 	.resume = gmc_v12_0_resume,
1020 	.is_idle = gmc_v12_0_is_idle,
1021 	.wait_for_idle = gmc_v12_0_wait_for_idle,
1022 	.set_clockgating_state = gmc_v12_0_set_clockgating_state,
1023 	.set_powergating_state = gmc_v12_0_set_powergating_state,
1024 	.get_clockgating_state = gmc_v12_0_get_clockgating_state,
1025 };
1026 
1027 const struct amdgpu_ip_block_version gmc_v12_0_ip_block = {
1028 	.type = AMD_IP_BLOCK_TYPE_GMC,
1029 	.major = 12,
1030 	.minor = 0,
1031 	.rev = 0,
1032 	.funcs = &gmc_v12_0_ip_funcs,
1033 };
1034