xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 
26 #include <drm/drm_cache.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v12_0.h"
31 #include "athub/athub_4_1_0_sh_mask.h"
32 #include "athub/athub_4_1_0_offset.h"
33 #include "oss/osssys_7_0_0_offset.h"
34 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
35 #include "soc24_enum.h"
36 #include "soc24.h"
37 #include "soc15d.h"
38 #include "soc15_common.h"
39 #include "nbif_v6_3_1.h"
40 #include "gfxhub_v12_0.h"
41 #include "mmhub_v4_1_0.h"
42 #include "athub_v4_1_0.h"
43 #include "umc_v8_14.h"
44 
gmc_v12_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)45 static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
46 					 struct amdgpu_irq_src *src,
47 					 unsigned type,
48 					 enum amdgpu_interrupt_state state)
49 {
50 	return 0;
51 }
52 
gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)53 static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
54 					      struct amdgpu_irq_src *src, unsigned type,
55 					      enum amdgpu_interrupt_state state)
56 {
57 	switch (state) {
58 	case AMDGPU_IRQ_STATE_DISABLE:
59 		/* MM HUB */
60 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
61 		/* GFX HUB */
62 		/* This works because this interrupt is only
63 		 * enabled at init/resume and disabled in
64 		 * fini/suspend, so the overall state doesn't
65 		 * change over the course of suspend/resume.
66 		 */
67 		if (!adev->in_s0ix)
68 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
69 		break;
70 	case AMDGPU_IRQ_STATE_ENABLE:
71 		/* MM HUB */
72 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
73 		/* GFX HUB */
74 		/* This works because this interrupt is only
75 		 * enabled at init/resume and disabled in
76 		 * fini/suspend, so the overall state doesn't
77 		 * change over the course of suspend/resume.
78 		 */
79 		if (!adev->in_s0ix)
80 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
81 		break;
82 	default:
83 		break;
84 	}
85 
86 	return 0;
87 }
88 
gmc_v12_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)89 static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
90 				       struct amdgpu_irq_src *source,
91 				       struct amdgpu_iv_entry *entry)
92 {
93 	struct amdgpu_vmhub *hub;
94 	bool retry_fault = !!(entry->src_data[1] &
95 			      AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
96 	bool write_fault = !!(entry->src_data[1] &
97 			      AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
98 	uint32_t status = 0;
99 	u64 addr;
100 
101 	addr = (u64)entry->src_data[0] << 12;
102 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
103 
104 	if (entry->client_id == SOC21_IH_CLIENTID_VMC)
105 		hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
106 	else
107 		hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
108 
109 	if (retry_fault) {
110 		/* Returning 1 here also prevents sending the IV to the KFD */
111 
112 		/* Process it only if it's the first fault for this address */
113 		if (entry->ih != &adev->irq.ih_soft &&
114 		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
115 					     entry->timestamp))
116 			return 1;
117 
118 		/* Delegate it to a different ring if the hardware hasn't
119 		 * already done it.
120 		 */
121 		if (entry->ih == &adev->irq.ih) {
122 			amdgpu_irq_delegate(adev, entry, 8);
123 			return 1;
124 		}
125 
126 		/* Try to handle the recoverable page faults by filling page
127 		 * tables
128 		 */
129 		if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
130 					   entry->timestamp, write_fault))
131 			return 1;
132 	}
133 
134 	if (!amdgpu_sriov_vf(adev)) {
135 		/*
136 		 * Issue a dummy read to wait for the status register to
137 		 * be updated to avoid reading an incorrect value due to
138 		 * the new fast GRBM interface.
139 		 */
140 		if (entry->vmid_src == AMDGPU_GFXHUB(0))
141 			RREG32(hub->vm_l2_pro_fault_status);
142 
143 		status = RREG32(hub->vm_l2_pro_fault_status);
144 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
145 
146 		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
147 					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
148 	}
149 
150 	if (printk_ratelimit()) {
151 		struct amdgpu_task_info *task_info;
152 
153 		dev_err(adev->dev,
154 			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
155 			entry->vmid_src ? "mmhub" : "gfxhub",
156 			entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
157 		task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
158 		if (task_info) {
159 			amdgpu_vm_print_task_info(adev, task_info);
160 			amdgpu_vm_put_task_info(task_info);
161 		}
162 
163 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
164 				addr, entry->client_id);
165 
166 		/* Only print L2 fault status if the status register could be read and
167 		 * contains useful information
168 		 */
169 		if (status != 0)
170 			hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
171 	}
172 
173 	return 0;
174 }
175 
176 static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = {
177 	.set = gmc_v12_0_vm_fault_interrupt_state,
178 	.process = gmc_v12_0_process_interrupt,
179 };
180 
181 static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = {
182 	.set = gmc_v12_0_ecc_interrupt_state,
183 	.process = amdgpu_umc_process_ecc_irq,
184 };
185 
gmc_v12_0_set_irq_funcs(struct amdgpu_device * adev)186 static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev)
187 {
188 	adev->gmc.vm_fault.num_types = 1;
189 	adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs;
190 
191 	if (!amdgpu_sriov_vf(adev)) {
192 		adev->gmc.ecc_irq.num_types = 1;
193 		adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs;
194 	}
195 }
196 
197 /**
198  * gmc_v12_0_use_invalidate_semaphore - judge whether to use semaphore
199  *
200  * @adev: amdgpu_device pointer
201  * @vmhub: vmhub type
202  *
203  */
gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)204 static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev,
205 				       uint32_t vmhub)
206 {
207 	return ((vmhub == AMDGPU_MMHUB0(0)) &&
208 		(!amdgpu_sriov_vf(adev)));
209 }
210 
gmc_v12_0_get_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)211 static bool gmc_v12_0_get_vmid_pasid_mapping_info(
212 					struct amdgpu_device *adev,
213 					uint8_t vmid, uint16_t *p_pasid)
214 {
215 	*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
216 
217 	return !!(*p_pasid);
218 }
219 
220 /*
221  * GART
222  * VMID 0 is the physical GPU addresses as used by the kernel.
223  * VMIDs 1-15 are used for userspace clients and are handled
224  * by the amdgpu vm/hsa code.
225  */
226 
gmc_v12_0_flush_vm_hub(struct amdgpu_device * adev,uint32_t vmid,unsigned int vmhub,uint32_t flush_type)227 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
228 				   unsigned int vmhub, uint32_t flush_type)
229 {
230 	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub);
231 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
232 	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
233 	u32 tmp;
234 	/* Use register 17 for GART */
235 	const unsigned eng = 17;
236 	unsigned int i;
237 	unsigned char hub_ip = 0;
238 
239 	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
240 		   GC_HWIP : MMHUB_HWIP;
241 
242 	spin_lock(&adev->gmc.invalidate_lock);
243 	/*
244 	 * It may lose gpuvm invalidate acknowldege state across power-gating
245 	 * off cycle, add semaphore acquire before invalidation and semaphore
246 	 * release after invalidation to avoid entering power gated state
247 	 * to WA the Issue
248 	 */
249 
250 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
251 	if (use_semaphore) {
252 		for (i = 0; i < adev->usec_timeout; i++) {
253 			/* a read return value of 1 means semaphore acuqire */
254 			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
255 					    hub->eng_distance * eng, hub_ip);
256 			if (tmp & 0x1)
257 				break;
258 			udelay(1);
259 		}
260 
261 		if (i >= adev->usec_timeout)
262 			dev_err(adev->dev,
263 				"Timeout waiting for sem acquire in VM flush!\n");
264 	}
265 
266 	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
267 
268 	/* Wait for ACK with a delay.*/
269 	for (i = 0; i < adev->usec_timeout; i++) {
270 		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
271 				    hub->eng_distance * eng, hub_ip);
272 		tmp &= 1 << vmid;
273 		if (tmp)
274 			break;
275 
276 		udelay(1);
277 	}
278 
279 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
280 	if (use_semaphore)
281 		/*
282 		 * add semaphore release after invalidation,
283 		 * write with 0 means semaphore release
284 		 */
285 		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
286 			      hub->eng_distance * eng, 0, hub_ip);
287 
288 	/* Issue additional private vm invalidation to MMHUB */
289 	if ((vmhub != AMDGPU_GFXHUB(0)) &&
290 	    (hub->vm_l2_bank_select_reserved_cid2) &&
291 		!amdgpu_sriov_vf(adev)) {
292 		inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
293 		/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
294 		inv_req |= (1 << 25);
295 		/* Issue private invalidation */
296 		WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
297 		/* Read back to ensure invalidation is done*/
298 		RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
299 	}
300 
301 	spin_unlock(&adev->gmc.invalidate_lock);
302 
303 	if (i < adev->usec_timeout)
304 		return;
305 
306 	dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
307 }
308 
309 /**
310  * gmc_v12_0_flush_gpu_tlb - gart tlb flush callback
311  *
312  * @adev: amdgpu_device pointer
313  * @vmid: vm instance to flush
314  * @vmhub: which hub to flush
315  * @flush_type: the flush type
316  *
317  * Flush the TLB for the requested page table.
318  */
gmc_v12_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)319 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
320 					uint32_t vmhub, uint32_t flush_type)
321 {
322 	if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
323 		return;
324 
325 	/* flush hdp cache */
326 	amdgpu_device_flush_hdp(adev, NULL);
327 
328 	/* This is necessary for SRIOV as well as for GFXOFF to function
329 	 * properly under bare metal
330 	 */
331 	if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
332 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
333 		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
334 		const unsigned eng = 17;
335 		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
336 		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
337 		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
338 
339 		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
340 				1 << vmid, GET_INST(GC, 0));
341 		return;
342 	}
343 
344 	gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
345 	return;
346 }
347 
348 /**
349  * gmc_v12_0_flush_gpu_tlb_pasid - tlb flush via pasid
350  *
351  * @adev: amdgpu_device pointer
352  * @pasid: pasid to be flush
353  * @flush_type: the flush type
354  * @all_hub: flush all hubs
355  * @inst: is used to select which instance of KIQ to use for the invalidation
356  *
357  * Flush the TLB for the requested pasid.
358  */
gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)359 static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
360 					  uint16_t pasid, uint32_t flush_type,
361 					  bool all_hub, uint32_t inst)
362 {
363 	uint16_t queried;
364 	int vmid, i;
365 
366 	if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
367 	    (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
368 		struct mes_inv_tlbs_pasid_input input = {0};
369 		input.pasid = pasid;
370 		input.flush_type = flush_type;
371 		input.hub_id = AMDGPU_GFXHUB(0);
372 		/* MES will invalidate all gc_hub for the device from master */
373 		adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
374 		if (all_hub) {
375 			/* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
376 			input.hub_id = AMDGPU_MMHUB0(0);
377 			adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
378 		}
379 		return;
380 	}
381 
382 	for (vmid = 1; vmid < 16; vmid++) {
383 		bool valid;
384 
385 		valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid,
386 							      &queried);
387 		if (!valid || queried != pasid)
388 			continue;
389 
390 		if (all_hub) {
391 			for_each_set_bit(i, adev->vmhubs_mask,
392 					 AMDGPU_MAX_VMHUBS)
393 				gmc_v12_0_flush_gpu_tlb(adev, vmid, i,
394 							flush_type);
395 		} else {
396 			gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
397 						flush_type);
398 		}
399 	}
400 }
401 
gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)402 static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
403 					     unsigned vmid, uint64_t pd_addr)
404 {
405 	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
406 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
407 	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
408 	unsigned eng = ring->vm_inv_eng;
409 
410 	/*
411 	 * It may lose gpuvm invalidate acknowldege state across power-gating
412 	 * off cycle, add semaphore acquire before invalidation and semaphore
413 	 * release after invalidation to avoid entering power gated state
414 	 * to WA the Issue
415 	 */
416 
417 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
418 	if (use_semaphore)
419 		/* a read return value of 1 means semaphore acuqire */
420 		amdgpu_ring_emit_reg_wait(ring,
421 					  hub->vm_inv_eng0_sem +
422 					  hub->eng_distance * eng, 0x1, 0x1);
423 
424 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
425 			      (hub->ctx_addr_distance * vmid),
426 			      lower_32_bits(pd_addr));
427 
428 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
429 			      (hub->ctx_addr_distance * vmid),
430 			      upper_32_bits(pd_addr));
431 
432 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
433 					    hub->eng_distance * eng,
434 					    hub->vm_inv_eng0_ack +
435 					    hub->eng_distance * eng,
436 					    req, 1 << vmid);
437 
438 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
439 	if (use_semaphore)
440 		/*
441 		 * add semaphore release after invalidation,
442 		 * write with 0 means semaphore release
443 		 */
444 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
445 				      hub->eng_distance * eng, 0);
446 
447 	return pd_addr;
448 }
449 
gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)450 static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
451 					 unsigned pasid)
452 {
453 	struct amdgpu_device *adev = ring->adev;
454 	uint32_t reg;
455 
456 	if (ring->vm_hub == AMDGPU_GFXHUB(0))
457 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
458 	else
459 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
460 
461 	amdgpu_ring_emit_wreg(ring, reg, pasid);
462 }
463 
464 /*
465  * PTE format:
466  * 63 P
467  * 62:59 reserved
468  * 58 D
469  * 57 G
470  * 56 T
471  * 55:54 M
472  * 53:52 SW
473  * 51:48 reserved for future
474  * 47:12 4k physical page base address
475  * 11:7 fragment
476  * 6 write
477  * 5 read
478  * 4 exe
479  * 3 Z
480  * 2 snooped
481  * 1 system
482  * 0 valid
483  *
484  * PDE format:
485  * 63 P
486  * 62:58 block fragment size
487  * 57 reserved
488  * 56 A
489  * 55:54 M
490  * 53:52 reserved
491  * 51:48 reserved for future
492  * 47:6 physical base address of PD or PTE
493  * 5:3 reserved
494  * 2 C
495  * 1 system
496  * 0 valid
497  */
498 
gmc_v12_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)499 static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
500 				 uint64_t *addr, uint64_t *flags)
501 {
502 	if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
503 		*addr = adev->vm_manager.vram_base_offset + *addr -
504 			adev->gmc.vram_start;
505 	BUG_ON(*addr & 0xFFFF00000000003FULL);
506 
507 	if (!adev->gmc.translate_further)
508 		return;
509 
510 	if (level == AMDGPU_VM_PDB1) {
511 		/* Set the block fragment size */
512 		if (!(*flags & AMDGPU_PDE_PTE_GFX12))
513 			*flags |= AMDGPU_PDE_BFS_GFX12(0x9);
514 
515 	} else if (level == AMDGPU_VM_PDB0) {
516 		if (*flags & AMDGPU_PDE_PTE_GFX12)
517 			*flags &= ~AMDGPU_PDE_PTE_GFX12;
518 	}
519 }
520 
gmc_v12_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,uint32_t vm_flags,uint64_t * flags)521 static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
522 				 struct amdgpu_vm *vm,
523 				 struct amdgpu_bo *bo,
524 				 uint32_t vm_flags,
525 				 uint64_t *flags)
526 {
527 	if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
528 		*flags |= AMDGPU_PTE_EXECUTABLE;
529 	else
530 		*flags &= ~AMDGPU_PTE_EXECUTABLE;
531 
532 	switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
533 	case AMDGPU_VM_MTYPE_DEFAULT:
534 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
535 		break;
536 	case AMDGPU_VM_MTYPE_NC:
537 	default:
538 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
539 		break;
540 	case AMDGPU_VM_MTYPE_UC:
541 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
542 		break;
543 	}
544 
545 	if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
546 		*flags |= AMDGPU_PTE_NOALLOC;
547 	else
548 		*flags &= ~AMDGPU_PTE_NOALLOC;
549 
550 	if (vm_flags & AMDGPU_VM_PAGE_PRT) {
551 		*flags |= AMDGPU_PTE_PRT_GFX12;
552 		*flags |= AMDGPU_PTE_SNOOPED;
553 		*flags |= AMDGPU_PTE_SYSTEM;
554 		*flags |= AMDGPU_PTE_IS_PTE;
555 		*flags &= ~AMDGPU_PTE_VALID;
556 	}
557 
558 	if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
559 		*flags |= AMDGPU_PTE_DCC;
560 
561 	if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
562 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
563 }
564 
gmc_v12_0_get_vbios_fb_size(struct amdgpu_device * adev)565 static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
566 {
567 	return 0;
568 }
569 
gmc_v12_0_get_dcc_alignment(struct amdgpu_device * adev)570 static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
571 {
572 	unsigned int max_tex_channel_caches, alignment;
573 
574 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
575 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
576 		return 0;
577 
578 	max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
579 	if (is_power_of_2(max_tex_channel_caches))
580 		alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
581 	else
582 		alignment = roundup_pow_of_two(max_tex_channel_caches);
583 
584 	return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
585 }
586 
587 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
588 	.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
589 	.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
590 	.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
591 	.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
592 	.get_vm_pde = gmc_v12_0_get_vm_pde,
593 	.get_vm_pte = gmc_v12_0_get_vm_pte,
594 	.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
595 	.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
596 };
597 
gmc_v12_0_set_gmc_funcs(struct amdgpu_device * adev)598 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
599 {
600 	adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs;
601 }
602 
gmc_v12_0_set_umc_funcs(struct amdgpu_device * adev)603 static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
604 {
605 	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
606 	case IP_VERSION(8, 14, 0):
607 		adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
608 		adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
609 		adev->umc.node_inst_num = 0;
610 		adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
611 		adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
612 		adev->umc.ras = &umc_v8_14_ras;
613 		break;
614 	default:
615 		break;
616 	}
617 }
618 
619 
gmc_v12_0_set_mmhub_funcs(struct amdgpu_device * adev)620 static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev)
621 {
622 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
623 	case IP_VERSION(4, 1, 0):
624 		adev->mmhub.funcs = &mmhub_v4_1_0_funcs;
625 		break;
626 	default:
627 		break;
628 	}
629 }
630 
gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device * adev)631 static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
632 {
633 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
634 	case IP_VERSION(12, 0, 0):
635 	case IP_VERSION(12, 0, 1):
636 		adev->gfxhub.funcs = &gfxhub_v12_0_funcs;
637 		break;
638 	default:
639 		break;
640 	}
641 }
642 
gmc_v12_0_early_init(struct amdgpu_ip_block * ip_block)643 static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block)
644 {
645 	struct amdgpu_device *adev = ip_block->adev;
646 
647 	gmc_v12_0_set_gfxhub_funcs(adev);
648 	gmc_v12_0_set_mmhub_funcs(adev);
649 	gmc_v12_0_set_gmc_funcs(adev);
650 	gmc_v12_0_set_irq_funcs(adev);
651 	gmc_v12_0_set_umc_funcs(adev);
652 
653 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
654 	adev->gmc.shared_aperture_end =
655 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
656 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
657 	adev->gmc.private_aperture_end =
658 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
659 
660 	return 0;
661 }
662 
gmc_v12_0_late_init(struct amdgpu_ip_block * ip_block)663 static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block)
664 {
665 	struct amdgpu_device *adev = ip_block->adev;
666 	int r;
667 
668 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
669 	if (r)
670 		return r;
671 
672 	r = amdgpu_gmc_ras_late_init(adev);
673 	if (r)
674 		return r;
675 
676 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
677 }
678 
gmc_v12_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)679 static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev,
680 					struct amdgpu_gmc *mc)
681 {
682 	u64 base = 0;
683 
684 	base = adev->mmhub.funcs->get_fb_location(adev);
685 
686 	amdgpu_gmc_set_agp_default(adev, mc);
687 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
688 	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
689 	if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
690 		amdgpu_gmc_agp_location(adev, mc);
691 
692 	/* base offset of vram pages */
693 	if (amdgpu_sriov_vf(adev))
694 		adev->vm_manager.vram_base_offset = 0;
695 	else
696 		adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
697 }
698 
699 /**
700  * gmc_v12_0_mc_init - initialize the memory controller driver params
701  *
702  * @adev: amdgpu_device pointer
703  *
704  * Look up the amount of vram, vram width, and decide how to place
705  * vram and gart within the GPU's physical address space.
706  * Returns 0 for success.
707  */
gmc_v12_0_mc_init(struct amdgpu_device * adev)708 static int gmc_v12_0_mc_init(struct amdgpu_device *adev)
709 {
710 	int r;
711 
712 	/* size in MB on si */
713 	adev->gmc.mc_vram_size =
714 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
715 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
716 
717 	if (!(adev->flags & AMD_IS_APU)) {
718 		r = amdgpu_device_resize_fb_bar(adev);
719 		if (r)
720 			return r;
721 	}
722 
723 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
724 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
725 
726 #ifdef CONFIG_X86_64
727 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
728 		adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
729 		adev->gmc.aper_size = adev->gmc.real_vram_size;
730 	}
731 #endif
732 	/* In case the PCI BAR is larger than the actual amount of vram */
733 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
734 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
735 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
736 
737 	/* set the gart size */
738 	if (amdgpu_gart_size == -1) {
739 		adev->gmc.gart_size = 512ULL << 20;
740 	} else
741 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
742 
743 	gmc_v12_0_vram_gtt_location(adev, &adev->gmc);
744 
745 	return 0;
746 }
747 
gmc_v12_0_gart_init(struct amdgpu_device * adev)748 static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
749 {
750 	int r;
751 
752 	if (adev->gart.bo) {
753 		WARN(1, "PCIE GART already initialized\n");
754 		return 0;
755 	}
756 
757 	/* Initialize common gart structure */
758 	r = amdgpu_gart_init(adev);
759 	if (r)
760 		return r;
761 
762 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
763 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) |
764 				    AMDGPU_PTE_EXECUTABLE |
765 				    AMDGPU_PTE_IS_PTE;
766 
767 	return amdgpu_gart_table_vram_alloc(adev);
768 }
769 
gmc_v12_0_sw_init(struct amdgpu_ip_block * ip_block)770 static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
771 {
772 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
773 	struct amdgpu_device *adev = ip_block->adev;
774 
775 	adev->mmhub.funcs->init(adev);
776 
777 	adev->gfxhub.funcs->init(adev);
778 
779 	spin_lock_init(&adev->gmc.invalidate_lock);
780 
781 	r = amdgpu_atomfirmware_get_vram_info(adev,
782 					      &vram_width, &vram_type, &vram_vendor);
783 	adev->gmc.vram_width = vram_width;
784 
785 	adev->gmc.vram_type = vram_type;
786 	adev->gmc.vram_vendor = vram_vendor;
787 
788 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
789 	case IP_VERSION(12, 0, 0):
790 	case IP_VERSION(12, 0, 1):
791 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
792 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
793 		/*
794 		 * To fulfill 4-level page support,
795 		 * vm size is 256TB (48bit), maximum size,
796 		 * block size 512 (9bit)
797 		 */
798 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
799 		break;
800 	default:
801 		break;
802 	}
803 
804 	/* This interrupt is VMC page fault.*/
805 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
806 			      VMC_1_0__SRCID__VM_FAULT,
807 			      &adev->gmc.vm_fault);
808 
809 	if (r)
810 		return r;
811 
812 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
813 			      UTCL2_1_0__SRCID__FAULT,
814 			      &adev->gmc.vm_fault);
815 	if (r)
816 		return r;
817 
818 	if (!amdgpu_sriov_vf(adev)) {
819 		/* interrupt sent to DF. */
820 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
821 				      &adev->gmc.ecc_irq);
822 		if (r)
823 			return r;
824 	}
825 
826 	/*
827 	 * Set the internal MC address mask This is the max address of the GPU's
828 	 * internal address space.
829 	 */
830 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
831 
832 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
833 	if (r) {
834 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
835 		return r;
836 	}
837 
838 	adev->need_swiotlb = drm_need_swiotlb(44);
839 
840 	r = gmc_v12_0_mc_init(adev);
841 	if (r)
842 		return r;
843 
844 	amdgpu_gmc_get_vbios_allocations(adev);
845 
846 	/* Memory manager */
847 	r = amdgpu_bo_init(adev);
848 	if (r)
849 		return r;
850 
851 	r = gmc_v12_0_gart_init(adev);
852 	if (r)
853 		return r;
854 
855 	/*
856 	 * number of VMs
857 	 * VMID 0 is reserved for System
858 	 * amdgpu graphics/compute will use VMIDs 1-7
859 	 * amdkfd will use VMIDs 8-15
860 	 */
861 	adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
862 
863 	amdgpu_vm_manager_init(adev);
864 
865 	r = amdgpu_gmc_ras_sw_init(adev);
866 	if (r)
867 		return r;
868 
869 	return 0;
870 }
871 
872 /**
873  * gmc_v12_0_gart_fini - vm fini callback
874  *
875  * @adev: amdgpu_device pointer
876  *
877  * Tears down the driver GART/VM setup (CIK).
878  */
gmc_v12_0_gart_fini(struct amdgpu_device * adev)879 static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
880 {
881 	amdgpu_gart_table_vram_free(adev);
882 }
883 
gmc_v12_0_sw_fini(struct amdgpu_ip_block * ip_block)884 static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
885 {
886 	struct amdgpu_device *adev = ip_block->adev;
887 
888 	amdgpu_vm_manager_fini(adev);
889 	gmc_v12_0_gart_fini(adev);
890 	amdgpu_gem_force_release(adev);
891 	amdgpu_bo_fini(adev);
892 
893 	return 0;
894 }
895 
gmc_v12_0_init_golden_registers(struct amdgpu_device * adev)896 static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev)
897 {
898 }
899 
900 /**
901  * gmc_v12_0_gart_enable - gart enable
902  *
903  * @adev: amdgpu_device pointer
904  */
gmc_v12_0_gart_enable(struct amdgpu_device * adev)905 static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
906 {
907 	int r;
908 	bool value;
909 
910 	if (adev->gart.bo == NULL) {
911 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
912 		return -EINVAL;
913 	}
914 
915 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
916 
917 	r = adev->mmhub.funcs->gart_enable(adev);
918 	if (r)
919 		return r;
920 
921 	/* Flush HDP after it is initialized */
922 	amdgpu_device_flush_hdp(adev, NULL);
923 
924 	value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
925 
926 	adev->mmhub.funcs->set_fault_enable_default(adev, value);
927 	gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
928 
929 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
930 		 (unsigned)(adev->gmc.gart_size >> 20),
931 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
932 
933 	return 0;
934 }
935 
gmc_v12_0_hw_init(struct amdgpu_ip_block * ip_block)936 static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
937 {
938 	int r;
939 	struct amdgpu_device *adev = ip_block->adev;
940 
941 	/* The sequence of these two function calls matters.*/
942 	gmc_v12_0_init_golden_registers(adev);
943 
944 	r = gmc_v12_0_gart_enable(adev);
945 	if (r)
946 		return r;
947 
948 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
949 		adev->umc.funcs->init_registers(adev);
950 
951 	return 0;
952 }
953 
954 /**
955  * gmc_v12_0_gart_disable - gart disable
956  *
957  * @adev: amdgpu_device pointer
958  *
959  * This disables all VM page table.
960  */
gmc_v12_0_gart_disable(struct amdgpu_device * adev)961 static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
962 {
963 	adev->mmhub.funcs->gart_disable(adev);
964 }
965 
gmc_v12_0_hw_fini(struct amdgpu_ip_block * ip_block)966 static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
967 {
968 	struct amdgpu_device *adev = ip_block->adev;
969 
970 	if (amdgpu_sriov_vf(adev)) {
971 		/* full access mode, so don't touch any GMC register */
972 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
973 		return 0;
974 	}
975 
976 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
977 
978 	if (adev->gmc.ecc_irq.funcs &&
979 		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
980 		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
981 
982 	gmc_v12_0_gart_disable(adev);
983 
984 	return 0;
985 }
986 
gmc_v12_0_suspend(struct amdgpu_ip_block * ip_block)987 static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
988 {
989 	gmc_v12_0_hw_fini(ip_block);
990 
991 	return 0;
992 }
993 
gmc_v12_0_resume(struct amdgpu_ip_block * ip_block)994 static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
995 {
996 	int r;
997 
998 	r = gmc_v12_0_hw_init(ip_block);
999 	if (r)
1000 		return r;
1001 
1002 	amdgpu_vmid_reset_all(ip_block->adev);
1003 
1004 	return 0;
1005 }
1006 
gmc_v12_0_is_idle(struct amdgpu_ip_block * ip_block)1007 static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
1008 {
1009 	/* MC is always ready in GMC v11.*/
1010 	return true;
1011 }
1012 
gmc_v12_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1013 static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1014 {
1015 	/* There is no need to wait for MC idle in GMC v11.*/
1016 	return 0;
1017 }
1018 
gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1019 static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1020 					   enum amd_clockgating_state state)
1021 {
1022 	int r;
1023 	struct amdgpu_device *adev = ip_block->adev;
1024 
1025 	r = adev->mmhub.funcs->set_clockgating(adev, state);
1026 	if (r)
1027 		return r;
1028 
1029 	return athub_v4_1_0_set_clockgating(adev, state);
1030 }
1031 
gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)1032 static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1033 {
1034 	struct amdgpu_device *adev = ip_block->adev;
1035 
1036 	adev->mmhub.funcs->get_clockgating(adev, flags);
1037 
1038 	athub_v4_1_0_get_clockgating(adev, flags);
1039 }
1040 
gmc_v12_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1041 static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1042 					   enum amd_powergating_state state)
1043 {
1044 	return 0;
1045 }
1046 
1047 const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
1048 	.name = "gmc_v12_0",
1049 	.early_init = gmc_v12_0_early_init,
1050 	.sw_init = gmc_v12_0_sw_init,
1051 	.hw_init = gmc_v12_0_hw_init,
1052 	.late_init = gmc_v12_0_late_init,
1053 	.sw_fini = gmc_v12_0_sw_fini,
1054 	.hw_fini = gmc_v12_0_hw_fini,
1055 	.suspend = gmc_v12_0_suspend,
1056 	.resume = gmc_v12_0_resume,
1057 	.is_idle = gmc_v12_0_is_idle,
1058 	.wait_for_idle = gmc_v12_0_wait_for_idle,
1059 	.set_clockgating_state = gmc_v12_0_set_clockgating_state,
1060 	.set_powergating_state = gmc_v12_0_set_powergating_state,
1061 	.get_clockgating_state = gmc_v12_0_get_clockgating_state,
1062 };
1063 
1064 const struct amdgpu_ip_block_version gmc_v12_0_ip_block = {
1065 	.type = AMD_IP_BLOCK_TYPE_GMC,
1066 	.major = 12,
1067 	.minor = 0,
1068 	.rev = 0,
1069 	.funcs = &gmc_v12_0_ip_funcs,
1070 };
1071