xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c (revision 00e08fb2e7ce88e2ae366cbc79997d71d014b0ac)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 
26 #include <drm/drm_cache.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v12_0.h"
31 #include "gmc_v12_1.h"
32 #include "athub/athub_4_1_0_sh_mask.h"
33 #include "athub/athub_4_1_0_offset.h"
34 #include "oss/osssys_7_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "soc24_enum.h"
37 #include "soc24.h"
38 #include "soc15d.h"
39 #include "soc15_common.h"
40 #include "nbif_v6_3_1.h"
41 #include "gfxhub_v12_0.h"
42 #include "gfxhub_v12_1.h"
43 #include "mmhub_v4_1_0.h"
44 #include "mmhub_v4_2_0.h"
45 #include "athub_v4_1_0.h"
46 #include "umc_v8_14.h"
47 
48 static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
49 					 struct amdgpu_irq_src *src,
50 					 unsigned type,
51 					 enum amdgpu_interrupt_state state)
52 {
53 	return 0;
54 }
55 
56 static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
57 					      struct amdgpu_irq_src *src, unsigned type,
58 					      enum amdgpu_interrupt_state state)
59 {
60 	switch (state) {
61 	case AMDGPU_IRQ_STATE_DISABLE:
62 		/* MM HUB */
63 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
64 		/* GFX HUB */
65 		/* This works because this interrupt is only
66 		 * enabled at init/resume and disabled in
67 		 * fini/suspend, so the overall state doesn't
68 		 * change over the course of suspend/resume.
69 		 */
70 		if (!adev->in_s0ix)
71 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
72 		break;
73 	case AMDGPU_IRQ_STATE_ENABLE:
74 		/* MM HUB */
75 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
76 		/* GFX HUB */
77 		/* This works because this interrupt is only
78 		 * enabled at init/resume and disabled in
79 		 * fini/suspend, so the overall state doesn't
80 		 * change over the course of suspend/resume.
81 		 */
82 		if (!adev->in_s0ix)
83 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
84 		break;
85 	default:
86 		break;
87 	}
88 
89 	return 0;
90 }
91 
92 static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
93 				       struct amdgpu_irq_src *source,
94 				       struct amdgpu_iv_entry *entry)
95 {
96 	struct amdgpu_vmhub *hub;
97 	bool retry_fault = !!(entry->src_data[1] &
98 			      AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
99 	bool write_fault = !!(entry->src_data[1] &
100 			      AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
101 	uint32_t status = 0;
102 	u64 addr;
103 
104 	addr = (u64)entry->src_data[0] << 12;
105 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
106 
107 	if (entry->client_id == SOC21_IH_CLIENTID_VMC)
108 		hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
109 	else
110 		hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
111 
112 	if (retry_fault) {
113 		int ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, 0, 0,
114 							write_fault);
115 		/* Returning 1 here also prevents sending the IV to the KFD */
116 		if (ret == 1)
117 			return 1;
118 	}
119 
120 	if (!amdgpu_sriov_vf(adev)) {
121 		/*
122 		 * Issue a dummy read to wait for the status register to
123 		 * be updated to avoid reading an incorrect value due to
124 		 * the new fast GRBM interface.
125 		 */
126 		if (entry->vmid_src == AMDGPU_GFXHUB(0))
127 			RREG32(hub->vm_l2_pro_fault_status);
128 
129 		status = RREG32(hub->vm_l2_pro_fault_status);
130 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
131 
132 		amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
133 					     entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
134 	}
135 
136 	if (printk_ratelimit()) {
137 		struct amdgpu_task_info *task_info;
138 
139 		dev_err(adev->dev,
140 			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
141 			entry->vmid_src ? "mmhub" : "gfxhub",
142 			entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
143 		task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
144 		if (task_info) {
145 			amdgpu_vm_print_task_info(adev, task_info);
146 			amdgpu_vm_put_task_info(task_info);
147 		}
148 
149 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
150 				addr, entry->client_id);
151 
152 		/* Only print L2 fault status if the status register could be read and
153 		 * contains useful information
154 		 */
155 		if (status != 0)
156 			hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
157 	}
158 
159 	return 0;
160 }
161 
162 static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = {
163 	.set = gmc_v12_0_vm_fault_interrupt_state,
164 	.process = gmc_v12_0_process_interrupt,
165 };
166 
167 static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = {
168 	.set = gmc_v12_0_ecc_interrupt_state,
169 	.process = amdgpu_umc_process_ecc_irq,
170 };
171 
172 static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev)
173 {
174 	adev->gmc.vm_fault.num_types = 1;
175 	adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs;
176 
177 	if (!amdgpu_sriov_vf(adev)) {
178 		adev->gmc.ecc_irq.num_types = 1;
179 		adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs;
180 	}
181 }
182 
183 /**
184  * gmc_v12_0_use_invalidate_semaphore - judge whether to use semaphore
185  *
186  * @adev: amdgpu_device pointer
187  * @vmhub: vmhub type
188  *
189  */
190 static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev,
191 				       uint32_t vmhub)
192 {
193 	return ((vmhub == AMDGPU_MMHUB0(0)) &&
194 		(!amdgpu_sriov_vf(adev)));
195 }
196 
197 static bool gmc_v12_0_get_vmid_pasid_mapping_info(
198 					struct amdgpu_device *adev,
199 					uint8_t vmid, uint16_t *p_pasid)
200 {
201 	*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
202 
203 	return !!(*p_pasid);
204 }
205 
206 /*
207  * GART
208  * VMID 0 is the physical GPU addresses as used by the kernel.
209  * VMIDs 1-15 are used for userspace clients and are handled
210  * by the amdgpu vm/hsa code.
211  */
212 
213 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
214 				   unsigned int vmhub, uint32_t flush_type)
215 {
216 	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub);
217 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
218 	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
219 	u32 tmp;
220 	/* Use register 17 for GART */
221 	const unsigned eng = 17;
222 	unsigned int i;
223 	unsigned char hub_ip = 0;
224 
225 	hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
226 		   GC_HWIP : MMHUB_HWIP;
227 
228 	spin_lock(&adev->gmc.invalidate_lock);
229 	/*
230 	 * It may lose gpuvm invalidate acknowldege state across power-gating
231 	 * off cycle, add semaphore acquire before invalidation and semaphore
232 	 * release after invalidation to avoid entering power gated state
233 	 * to WA the Issue
234 	 */
235 
236 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
237 	if (use_semaphore) {
238 		for (i = 0; i < adev->usec_timeout; i++) {
239 			/* a read return value of 1 means semaphore acuqire */
240 			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
241 					    hub->eng_distance * eng, hub_ip);
242 			if (tmp & 0x1)
243 				break;
244 			udelay(1);
245 		}
246 
247 		if (i >= adev->usec_timeout)
248 			dev_err(adev->dev,
249 				"Timeout waiting for sem acquire in VM flush!\n");
250 	}
251 
252 	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
253 
254 	/* Wait for ACK with a delay.*/
255 	for (i = 0; i < adev->usec_timeout; i++) {
256 		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
257 				    hub->eng_distance * eng, hub_ip);
258 		tmp &= 1 << vmid;
259 		if (tmp)
260 			break;
261 
262 		udelay(1);
263 	}
264 
265 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
266 	if (use_semaphore)
267 		/*
268 		 * add semaphore release after invalidation,
269 		 * write with 0 means semaphore release
270 		 */
271 		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
272 			      hub->eng_distance * eng, 0, hub_ip);
273 
274 	/* Issue additional private vm invalidation to MMHUB */
275 	if ((vmhub != AMDGPU_GFXHUB(0)) &&
276 	    (hub->vm_l2_bank_select_reserved_cid2) &&
277 		!amdgpu_sriov_vf(adev)) {
278 		inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
279 		/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
280 		inv_req |= (1 << 25);
281 		/* Issue private invalidation */
282 		WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
283 		/* Read back to ensure invalidation is done*/
284 		RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
285 	}
286 
287 	spin_unlock(&adev->gmc.invalidate_lock);
288 
289 	if (i < adev->usec_timeout)
290 		return;
291 
292 	dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
293 }
294 
295 /**
296  * gmc_v12_0_flush_gpu_tlb - gart tlb flush callback
297  *
298  * @adev: amdgpu_device pointer
299  * @vmid: vm instance to flush
300  * @vmhub: which hub to flush
301  * @flush_type: the flush type
302  *
303  * Flush the TLB for the requested page table.
304  */
305 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
306 					uint32_t vmhub, uint32_t flush_type)
307 {
308 	if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
309 		return;
310 
311 	/* flush hdp cache */
312 	amdgpu_device_flush_hdp(adev, NULL);
313 
314 	/* This is necessary for SRIOV as well as for GFXOFF to function
315 	 * properly under bare metal
316 	 */
317 	if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
318 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
319 		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
320 		const unsigned eng = 17;
321 		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
322 		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
323 		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
324 
325 		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
326 				1 << vmid, GET_INST(GC, 0));
327 		return;
328 	}
329 
330 	gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
331 	return;
332 }
333 
334 /**
335  * gmc_v12_0_flush_gpu_tlb_pasid - tlb flush via pasid
336  *
337  * @adev: amdgpu_device pointer
338  * @pasid: pasid to be flush
339  * @flush_type: the flush type
340  * @all_hub: flush all hubs
341  * @inst: is used to select which instance of KIQ to use for the invalidation
342  *
343  * Flush the TLB for the requested pasid.
344  */
345 static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
346 					  uint16_t pasid, uint32_t flush_type,
347 					  bool all_hub, uint32_t inst)
348 {
349 	uint16_t queried;
350 	int vmid, i;
351 
352 	if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
353 	    (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
354 		struct mes_inv_tlbs_pasid_input input = {0};
355 		input.pasid = pasid;
356 		input.flush_type = flush_type;
357 		input.hub_id = AMDGPU_GFXHUB(0);
358 		/* MES will invalidate all gc_hub for the device from master */
359 		adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
360 		if (all_hub) {
361 			/* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
362 			input.hub_id = AMDGPU_MMHUB0(0);
363 			adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
364 		}
365 		return;
366 	}
367 
368 	for (vmid = 1; vmid < 16; vmid++) {
369 		bool valid;
370 
371 		valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid,
372 							      &queried);
373 		if (!valid || queried != pasid)
374 			continue;
375 
376 		if (all_hub) {
377 			for_each_set_bit(i, adev->vmhubs_mask,
378 					 AMDGPU_MAX_VMHUBS)
379 				gmc_v12_0_flush_gpu_tlb(adev, vmid, i,
380 							flush_type);
381 		} else {
382 			gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
383 						flush_type);
384 		}
385 	}
386 }
387 
388 static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
389 					     unsigned vmid, uint64_t pd_addr)
390 {
391 	bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
392 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
393 	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
394 	unsigned eng = ring->vm_inv_eng;
395 
396 	/*
397 	 * It may lose gpuvm invalidate acknowldege state across power-gating
398 	 * off cycle, add semaphore acquire before invalidation and semaphore
399 	 * release after invalidation to avoid entering power gated state
400 	 * to WA the Issue
401 	 */
402 
403 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
404 	if (use_semaphore)
405 		/* a read return value of 1 means semaphore acuqire */
406 		amdgpu_ring_emit_reg_wait(ring,
407 					  hub->vm_inv_eng0_sem +
408 					  hub->eng_distance * eng, 0x1, 0x1);
409 
410 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
411 			      (hub->ctx_addr_distance * vmid),
412 			      lower_32_bits(pd_addr));
413 
414 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
415 			      (hub->ctx_addr_distance * vmid),
416 			      upper_32_bits(pd_addr));
417 
418 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
419 					    hub->eng_distance * eng,
420 					    hub->vm_inv_eng0_ack +
421 					    hub->eng_distance * eng,
422 					    req, 1 << vmid);
423 
424 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
425 	if (use_semaphore)
426 		/*
427 		 * add semaphore release after invalidation,
428 		 * write with 0 means semaphore release
429 		 */
430 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
431 				      hub->eng_distance * eng, 0);
432 
433 	return pd_addr;
434 }
435 
436 static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
437 					 unsigned pasid)
438 {
439 	struct amdgpu_device *adev = ring->adev;
440 	uint32_t reg;
441 
442 	if (ring->vm_hub == AMDGPU_GFXHUB(0))
443 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
444 	else
445 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
446 
447 	amdgpu_ring_emit_wreg(ring, reg, pasid);
448 }
449 
450 /*
451  * PTE format:
452  * 63 P
453  * 62:59 reserved
454  * 58 D
455  * 57 G
456  * 56 T
457  * 55:54 M
458  * 53:52 SW
459  * 51:48 reserved for future
460  * 47:12 4k physical page base address
461  * 11:7 fragment
462  * 6 write
463  * 5 read
464  * 4 exe
465  * 3 Z
466  * 2 snooped
467  * 1 system
468  * 0 valid
469  *
470  * PDE format:
471  * 63 P
472  * 62:58 block fragment size
473  * 57 reserved
474  * 56 A
475  * 55:54 M
476  * 53:52 reserved
477  * 51:48 reserved for future
478  * 47:6 physical base address of PD or PTE
479  * 5:3 reserved
480  * 2 C
481  * 1 system
482  * 0 valid
483  */
484 
485 static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
486 				 uint64_t *addr, uint64_t *flags)
487 {
488 	if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
489 		*addr = adev->vm_manager.vram_base_offset + *addr -
490 			adev->gmc.vram_start;
491 	BUG_ON(*addr & 0xFFFF00000000003FULL);
492 
493 	if (!adev->gmc.translate_further)
494 		return;
495 
496 	if (level == AMDGPU_VM_PDB1) {
497 		/* Set the block fragment size */
498 		if (!(*flags & AMDGPU_PDE_PTE_GFX12))
499 			*flags |= AMDGPU_PDE_BFS_GFX12(0x9);
500 
501 	} else if (level == AMDGPU_VM_PDB0) {
502 		if (*flags & AMDGPU_PDE_PTE_GFX12)
503 			*flags &= ~AMDGPU_PDE_PTE_GFX12;
504 	}
505 }
506 
507 static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
508 				 struct amdgpu_vm *vm,
509 				 struct amdgpu_bo *bo,
510 				 uint32_t vm_flags,
511 				 uint64_t *flags)
512 {
513 	if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
514 		*flags |= AMDGPU_PTE_EXECUTABLE;
515 	else
516 		*flags &= ~AMDGPU_PTE_EXECUTABLE;
517 
518 	switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
519 	case AMDGPU_VM_MTYPE_DEFAULT:
520 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
521 		break;
522 	case AMDGPU_VM_MTYPE_NC:
523 	default:
524 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
525 		break;
526 	case AMDGPU_VM_MTYPE_UC:
527 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
528 		break;
529 	}
530 
531 	if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
532 		*flags |= AMDGPU_PTE_NOALLOC;
533 	else
534 		*flags &= ~AMDGPU_PTE_NOALLOC;
535 
536 	if (vm_flags & AMDGPU_VM_PAGE_PRT) {
537 		*flags |= AMDGPU_PTE_PRT_GFX12;
538 		*flags |= AMDGPU_PTE_SNOOPED;
539 		*flags |= AMDGPU_PTE_SYSTEM;
540 		*flags |= AMDGPU_PTE_IS_PTE;
541 		*flags &= ~AMDGPU_PTE_VALID;
542 	}
543 
544 	if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
545 		*flags |= AMDGPU_PTE_DCC;
546 
547 	if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
548 		*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
549 }
550 
551 static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
552 {
553 	return 0;
554 }
555 
556 static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
557 {
558 	unsigned int max_tex_channel_caches, alignment;
559 
560 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
561 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
562 		return 0;
563 
564 	max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
565 	if (is_power_of_2(max_tex_channel_caches))
566 		alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
567 	else
568 		alignment = roundup_pow_of_two(max_tex_channel_caches);
569 
570 	return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
571 }
572 
573 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
574 	.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
575 	.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
576 	.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
577 	.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
578 	.get_vm_pde = gmc_v12_0_get_vm_pde,
579 	.get_vm_pte = gmc_v12_0_get_vm_pte,
580 	.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
581 	.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
582 };
583 
584 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
585 {
586 	adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs;
587 }
588 
589 static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
590 {
591 	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
592 	case IP_VERSION(8, 14, 0):
593 		adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
594 		adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
595 		adev->umc.node_inst_num = 0;
596 		adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
597 		adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
598 		adev->umc.ras = &umc_v8_14_ras;
599 		break;
600 	default:
601 		break;
602 	}
603 }
604 
605 
606 static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev)
607 {
608 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
609 	case IP_VERSION(4, 1, 0):
610 		adev->mmhub.funcs = &mmhub_v4_1_0_funcs;
611 		break;
612 	case IP_VERSION(4, 2, 0):
613 		adev->mmhub.funcs = &mmhub_v4_2_0_funcs;
614 		break;
615 	default:
616 		break;
617 	}
618 }
619 
620 static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
621 {
622 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
623 	case IP_VERSION(12, 0, 0):
624 	case IP_VERSION(12, 0, 1):
625 		adev->gfxhub.funcs = &gfxhub_v12_0_funcs;
626 		break;
627 	case IP_VERSION(12, 1, 0):
628 		adev->gfxhub.funcs = &gfxhub_v12_1_funcs;
629 		break;
630 	default:
631 		break;
632 	}
633 }
634 
635 static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block)
636 {
637 	struct amdgpu_device *adev = ip_block->adev;
638 
639 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
640 	case IP_VERSION(12, 1, 0):
641 		gmc_v12_1_set_gmc_funcs(adev);
642 		adev->gmc.init_pte_flags = AMDGPU_PTE_IS_PTE;
643 		break;
644 	default:
645 		gmc_v12_0_set_gmc_funcs(adev);
646 		break;
647 	}
648 	gmc_v12_0_set_gfxhub_funcs(adev);
649 	gmc_v12_0_set_mmhub_funcs(adev);
650 	gmc_v12_0_set_irq_funcs(adev);
651 	gmc_v12_0_set_umc_funcs(adev);
652 
653 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
654 	adev->gmc.shared_aperture_end =
655 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
656 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
657 	adev->gmc.private_aperture_end =
658 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
659 	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
660 
661 	return 0;
662 }
663 
664 static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block)
665 {
666 	struct amdgpu_device *adev = ip_block->adev;
667 	int r;
668 
669 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
670 	if (r)
671 		return r;
672 
673 	r = amdgpu_gmc_ras_late_init(adev);
674 	if (r)
675 		return r;
676 
677 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
678 }
679 
680 static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev,
681 					struct amdgpu_gmc *mc)
682 {
683 	u64 base = 0;
684 
685 	base = adev->mmhub.funcs->get_fb_location(adev);
686 
687 	amdgpu_gmc_set_agp_default(adev, mc);
688 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
689 	amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
690 	if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
691 		amdgpu_gmc_agp_location(adev, mc);
692 
693 	/* base offset of vram pages */
694 	if (amdgpu_sriov_vf(adev))
695 		adev->vm_manager.vram_base_offset = 0;
696 	else
697 		adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
698 }
699 
700 /**
701  * gmc_v12_0_mc_init - initialize the memory controller driver params
702  *
703  * @adev: amdgpu_device pointer
704  *
705  * Look up the amount of vram, vram width, and decide how to place
706  * vram and gart within the GPU's physical address space.
707  * Returns 0 for success.
708  */
709 static int gmc_v12_0_mc_init(struct amdgpu_device *adev)
710 {
711 	int r;
712 
713 	/* size in MB on si */
714 	adev->gmc.mc_vram_size =
715 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
716 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
717 
718 	if (!(adev->flags & AMD_IS_APU)) {
719 		r = amdgpu_device_resize_fb_bar(adev);
720 		if (r)
721 			return r;
722 	}
723 
724 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
725 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
726 
727 #ifdef CONFIG_X86_64
728 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
729 		adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
730 		adev->gmc.aper_size = adev->gmc.real_vram_size;
731 	}
732 #endif
733 	/* In case the PCI BAR is larger than the actual amount of vram */
734 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
735 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
736 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
737 
738 	/* set the gart size */
739 	if (amdgpu_gart_size == -1) {
740 		adev->gmc.gart_size = 512ULL << 20;
741 	} else
742 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
743 
744 	gmc_v12_0_vram_gtt_location(adev, &adev->gmc);
745 
746 	return 0;
747 }
748 
749 static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
750 {
751 	int r;
752 
753 	if (adev->gart.bo) {
754 		WARN(1, "PCIE GART already initialized\n");
755 		return 0;
756 	}
757 
758 	/* Initialize common gart structure */
759 	r = amdgpu_gart_init(adev);
760 	if (r)
761 		return r;
762 
763 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
764 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) |
765 				    AMDGPU_PTE_EXECUTABLE |
766 				    AMDGPU_PTE_IS_PTE;
767 
768 	return amdgpu_gart_table_vram_alloc(adev);
769 }
770 
771 static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
772 {
773 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
774 	struct amdgpu_device *adev = ip_block->adev;
775 	int i;
776 
777 	adev->mmhub.funcs->init(adev);
778 
779 	adev->gfxhub.funcs->init(adev);
780 
781 	spin_lock_init(&adev->gmc.invalidate_lock);
782 
783 	r = amdgpu_atomfirmware_get_vram_info(adev,
784 					      &vram_width, &vram_type, &vram_vendor);
785 	adev->gmc.vram_width = vram_width;
786 
787 	adev->gmc.vram_type = vram_type;
788 	adev->gmc.vram_vendor = vram_vendor;
789 
790 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
791 	case IP_VERSION(12, 0, 0):
792 	case IP_VERSION(12, 0, 1):
793 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
794 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
795 		/*
796 		 * To fulfill 4-level page support,
797 		 * vm size is 256TB (48bit), maximum size,
798 		 * block size 512 (9bit)
799 		 */
800 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
801 		break;
802 	case IP_VERSION(12, 1, 0):
803 		bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
804 				NUM_XCC(adev->gfx.xcc_mask));
805 		for (i = 0; i < hweight32(adev->aid_mask); i++)
806 			set_bit(AMDGPU_MMHUB0(i), adev->vmhubs_mask);
807 		/*
808 		 * To fulfill 4-level page support,
809 		 * vm size is 256TB (48bit), maximum size,
810 		 * block size 512 (9bit)
811 		 */
812 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
813 		break;
814 	default:
815 		break;
816 	}
817 
818 	/* This interrupt is VMC page fault.*/
819 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
820 			      VMC_1_0__SRCID__VM_FAULT,
821 			      &adev->gmc.vm_fault);
822 
823 	if (r)
824 		return r;
825 
826 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
827 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_UTCL2,
828 				      UTCL2_1_0__SRCID__FAULT,
829 				      &adev->gmc.vm_fault);
830 		if (r)
831 			return r;
832 		/* Add GCVM UTCL2 Retry fault */
833 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_UTCL2,
834 				      UTCL2_1_0__SRCID__RETRY,
835 				      &adev->gmc.vm_fault);
836 		if (r)
837 			return r;
838 
839 		/* Add MMVM UTCL2 Retry fault */
840 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
841 				      VMC_1_0__SRCID__VM_RETRY,
842 				      &adev->gmc.vm_fault);
843 	} else {
844 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
845 				      UTCL2_1_0__SRCID__FAULT,
846 				      &adev->gmc.vm_fault);
847 	}
848 	if (r)
849 		return r;
850 
851 	if (!amdgpu_sriov_vf(adev)) {
852 		/* interrupt sent to DF. */
853 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
854 				      &adev->gmc.ecc_irq);
855 		if (r)
856 			return r;
857 	}
858 
859 	/*
860 	 * Set the internal MC address mask This is the max address of the GPU's
861 	 * internal address space.
862 	 */
863 	adev->gmc.mc_mask = AMDGPU_GMC_HOLE_MASK;
864 
865 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
866 	if (r) {
867 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
868 		return r;
869 	}
870 
871 	adev->need_swiotlb = drm_need_swiotlb(44);
872 
873 	r = gmc_v12_0_mc_init(adev);
874 	if (r)
875 		return r;
876 
877 	amdgpu_gmc_get_vbios_allocations(adev);
878 
879 	/* Memory manager */
880 	r = amdgpu_bo_init(adev);
881 	if (r)
882 		return r;
883 
884 	r = gmc_v12_0_gart_init(adev);
885 	if (r)
886 		return r;
887 
888 	/*
889 	 * number of VMs
890 	 * VMID 0 is reserved for System
891 	 * amdgpu graphics/compute will use VMIDs 1-7
892 	 * amdkfd will use VMIDs 8-15
893 	 */
894 	adev->vm_manager.first_kfd_vmid =
895 		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0) ?
896 		3 : 8;
897 	adev->vm_manager.first_kfd_vmid =
898 		adev->gfx.disable_kq ? 1 : (adev->vm_manager.first_kfd_vmid);
899 
900 	amdgpu_vm_manager_init(adev);
901 
902 	r = amdgpu_gmc_ras_sw_init(adev);
903 	if (r)
904 		return r;
905 
906 	return 0;
907 }
908 
909 /**
910  * gmc_v12_0_gart_fini - vm fini callback
911  *
912  * @adev: amdgpu_device pointer
913  *
914  * Tears down the driver GART/VM setup (CIK).
915  */
916 static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
917 {
918 	amdgpu_gart_table_vram_free(adev);
919 }
920 
921 static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
922 {
923 	struct amdgpu_device *adev = ip_block->adev;
924 
925 	amdgpu_vm_manager_fini(adev);
926 	gmc_v12_0_gart_fini(adev);
927 	amdgpu_gem_force_release(adev);
928 	amdgpu_bo_fini(adev);
929 
930 	return 0;
931 }
932 
933 static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev)
934 {
935 }
936 
937 /**
938  * gmc_v12_0_gart_enable - gart enable
939  *
940  * @adev: amdgpu_device pointer
941  */
942 static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
943 {
944 	int r;
945 	bool value;
946 
947 	if (adev->gart.bo == NULL) {
948 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
949 		return -EINVAL;
950 	}
951 
952 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
953 
954 	r = adev->mmhub.funcs->gart_enable(adev);
955 	if (r)
956 		return r;
957 
958 	/* Flush HDP after it is initialized */
959 	amdgpu_device_flush_hdp(adev, NULL);
960 
961 	value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
962 
963 	adev->mmhub.funcs->set_fault_enable_default(adev, value);
964 	gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
965 
966 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
967 		 (unsigned)(adev->gmc.gart_size >> 20),
968 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
969 
970 	return 0;
971 }
972 
973 static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
974 {
975 	int r;
976 	struct amdgpu_device *adev = ip_block->adev;
977 
978 	/* The sequence of these two function calls matters.*/
979 	gmc_v12_0_init_golden_registers(adev);
980 
981 	r = gmc_v12_0_gart_enable(adev);
982 	if (r)
983 		return r;
984 
985 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
986 		adev->umc.funcs->init_registers(adev);
987 
988 	return 0;
989 }
990 
991 /**
992  * gmc_v12_0_gart_disable - gart disable
993  *
994  * @adev: amdgpu_device pointer
995  *
996  * This disables all VM page table.
997  */
998 static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
999 {
1000 	adev->mmhub.funcs->gart_disable(adev);
1001 }
1002 
1003 static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
1004 {
1005 	struct amdgpu_device *adev = ip_block->adev;
1006 
1007 	if (amdgpu_sriov_vf(adev)) {
1008 		/* full access mode, so don't touch any GMC register */
1009 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1010 		return 0;
1011 	}
1012 
1013 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1014 
1015 	if (adev->gmc.ecc_irq.funcs &&
1016 		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1017 		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1018 
1019 	gmc_v12_0_gart_disable(adev);
1020 
1021 	return 0;
1022 }
1023 
1024 static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
1025 {
1026 	gmc_v12_0_hw_fini(ip_block);
1027 
1028 	return 0;
1029 }
1030 
1031 static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
1032 {
1033 	int r;
1034 
1035 	r = gmc_v12_0_hw_init(ip_block);
1036 	if (r)
1037 		return r;
1038 
1039 	amdgpu_vmid_reset_all(ip_block->adev);
1040 
1041 	return 0;
1042 }
1043 
1044 static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
1045 {
1046 	/* MC is always ready in GMC v11.*/
1047 	return true;
1048 }
1049 
1050 static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1051 {
1052 	/* There is no need to wait for MC idle in GMC v11.*/
1053 	return 0;
1054 }
1055 
1056 static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1057 					   enum amd_clockgating_state state)
1058 {
1059 	int r;
1060 	struct amdgpu_device *adev = ip_block->adev;
1061 
1062 	r = adev->mmhub.funcs->set_clockgating(adev, state);
1063 	if (r)
1064 		return r;
1065 
1066 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 1, 0))
1067 		return athub_v4_1_0_set_clockgating(adev, state);
1068 	else
1069 		return 0;
1070 }
1071 
1072 static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1073 {
1074 	struct amdgpu_device *adev = ip_block->adev;
1075 
1076 	adev->mmhub.funcs->get_clockgating(adev, flags);
1077 
1078 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 1, 0))
1079 		athub_v4_1_0_get_clockgating(adev, flags);
1080 }
1081 
1082 static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1083 					   enum amd_powergating_state state)
1084 {
1085 	return 0;
1086 }
1087 
1088 const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
1089 	.name = "gmc_v12_0",
1090 	.early_init = gmc_v12_0_early_init,
1091 	.sw_init = gmc_v12_0_sw_init,
1092 	.hw_init = gmc_v12_0_hw_init,
1093 	.late_init = gmc_v12_0_late_init,
1094 	.sw_fini = gmc_v12_0_sw_fini,
1095 	.hw_fini = gmc_v12_0_hw_fini,
1096 	.suspend = gmc_v12_0_suspend,
1097 	.resume = gmc_v12_0_resume,
1098 	.is_idle = gmc_v12_0_is_idle,
1099 	.wait_for_idle = gmc_v12_0_wait_for_idle,
1100 	.set_clockgating_state = gmc_v12_0_set_clockgating_state,
1101 	.set_powergating_state = gmc_v12_0_set_powergating_state,
1102 	.get_clockgating_state = gmc_v12_0_get_clockgating_state,
1103 };
1104 
1105 const struct amdgpu_ip_block_version gmc_v12_0_ip_block = {
1106 	.type = AMD_IP_BLOCK_TYPE_GMC,
1107 	.major = 12,
1108 	.minor = 0,
1109 	.rev = 0,
1110 	.funcs = &gmc_v12_0_ip_funcs,
1111 };
1112