xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision fdcf62fbfb288f4cb050c02c5ab9bc58fc53a872)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 
27 #include <drm/drm_cache.h>
28 
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33 
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_sh_mask.h"
42 #include "athub/athub_1_0_offset.h"
43 #include "oss/osssys_4_0_offset.h"
44 
45 #include "soc15.h"
46 #include "soc15d.h"
47 #include "soc15_common.h"
48 #include "umc/umc_6_0_sh_mask.h"
49 
50 #include "gfxhub_v1_0.h"
51 #include "mmhub_v1_0.h"
52 #include "athub_v1_0.h"
53 #include "gfxhub_v1_1.h"
54 #include "mmhub_v9_4.h"
55 #include "umc_v6_1.h"
56 #include "umc_v6_0.h"
57 
58 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
59 
60 #include "amdgpu_ras.h"
61 #include "amdgpu_xgmi.h"
62 
63 /* add these here since we already include dce12 headers and these are for DCN */
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
65 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
70 
71 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
72 #define AMDGPU_NUM_OF_VMIDS			8
73 
74 static const u32 golden_settings_vega10_hdp[] =
75 {
76 	0xf64, 0x0fffffff, 0x00000000,
77 	0xf65, 0x0fffffff, 0x00000000,
78 	0xf66, 0x0fffffff, 0x00000000,
79 	0xf67, 0x0fffffff, 0x00000000,
80 	0xf68, 0x0fffffff, 0x00000000,
81 	0xf6a, 0x0fffffff, 0x00000000,
82 	0xf6b, 0x0fffffff, 0x00000000,
83 	0xf6c, 0x0fffffff, 0x00000000,
84 	0xf6d, 0x0fffffff, 0x00000000,
85 	0xf6e, 0x0fffffff, 0x00000000,
86 };
87 
88 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
89 {
90 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
91 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
92 };
93 
94 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
95 {
96 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
97 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
98 };
99 
100 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
101 	(0x000143c0 + 0x00000000),
102 	(0x000143c0 + 0x00000800),
103 	(0x000143c0 + 0x00001000),
104 	(0x000143c0 + 0x00001800),
105 	(0x000543c0 + 0x00000000),
106 	(0x000543c0 + 0x00000800),
107 	(0x000543c0 + 0x00001000),
108 	(0x000543c0 + 0x00001800),
109 	(0x000943c0 + 0x00000000),
110 	(0x000943c0 + 0x00000800),
111 	(0x000943c0 + 0x00001000),
112 	(0x000943c0 + 0x00001800),
113 	(0x000d43c0 + 0x00000000),
114 	(0x000d43c0 + 0x00000800),
115 	(0x000d43c0 + 0x00001000),
116 	(0x000d43c0 + 0x00001800),
117 	(0x001143c0 + 0x00000000),
118 	(0x001143c0 + 0x00000800),
119 	(0x001143c0 + 0x00001000),
120 	(0x001143c0 + 0x00001800),
121 	(0x001543c0 + 0x00000000),
122 	(0x001543c0 + 0x00000800),
123 	(0x001543c0 + 0x00001000),
124 	(0x001543c0 + 0x00001800),
125 	(0x001943c0 + 0x00000000),
126 	(0x001943c0 + 0x00000800),
127 	(0x001943c0 + 0x00001000),
128 	(0x001943c0 + 0x00001800),
129 	(0x001d43c0 + 0x00000000),
130 	(0x001d43c0 + 0x00000800),
131 	(0x001d43c0 + 0x00001000),
132 	(0x001d43c0 + 0x00001800),
133 };
134 
135 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
136 	(0x000143e0 + 0x00000000),
137 	(0x000143e0 + 0x00000800),
138 	(0x000143e0 + 0x00001000),
139 	(0x000143e0 + 0x00001800),
140 	(0x000543e0 + 0x00000000),
141 	(0x000543e0 + 0x00000800),
142 	(0x000543e0 + 0x00001000),
143 	(0x000543e0 + 0x00001800),
144 	(0x000943e0 + 0x00000000),
145 	(0x000943e0 + 0x00000800),
146 	(0x000943e0 + 0x00001000),
147 	(0x000943e0 + 0x00001800),
148 	(0x000d43e0 + 0x00000000),
149 	(0x000d43e0 + 0x00000800),
150 	(0x000d43e0 + 0x00001000),
151 	(0x000d43e0 + 0x00001800),
152 	(0x001143e0 + 0x00000000),
153 	(0x001143e0 + 0x00000800),
154 	(0x001143e0 + 0x00001000),
155 	(0x001143e0 + 0x00001800),
156 	(0x001543e0 + 0x00000000),
157 	(0x001543e0 + 0x00000800),
158 	(0x001543e0 + 0x00001000),
159 	(0x001543e0 + 0x00001800),
160 	(0x001943e0 + 0x00000000),
161 	(0x001943e0 + 0x00000800),
162 	(0x001943e0 + 0x00001000),
163 	(0x001943e0 + 0x00001800),
164 	(0x001d43e0 + 0x00000000),
165 	(0x001d43e0 + 0x00000800),
166 	(0x001d43e0 + 0x00001000),
167 	(0x001d43e0 + 0x00001800),
168 };
169 
170 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
171 	(0x000143c2 + 0x00000000),
172 	(0x000143c2 + 0x00000800),
173 	(0x000143c2 + 0x00001000),
174 	(0x000143c2 + 0x00001800),
175 	(0x000543c2 + 0x00000000),
176 	(0x000543c2 + 0x00000800),
177 	(0x000543c2 + 0x00001000),
178 	(0x000543c2 + 0x00001800),
179 	(0x000943c2 + 0x00000000),
180 	(0x000943c2 + 0x00000800),
181 	(0x000943c2 + 0x00001000),
182 	(0x000943c2 + 0x00001800),
183 	(0x000d43c2 + 0x00000000),
184 	(0x000d43c2 + 0x00000800),
185 	(0x000d43c2 + 0x00001000),
186 	(0x000d43c2 + 0x00001800),
187 	(0x001143c2 + 0x00000000),
188 	(0x001143c2 + 0x00000800),
189 	(0x001143c2 + 0x00001000),
190 	(0x001143c2 + 0x00001800),
191 	(0x001543c2 + 0x00000000),
192 	(0x001543c2 + 0x00000800),
193 	(0x001543c2 + 0x00001000),
194 	(0x001543c2 + 0x00001800),
195 	(0x001943c2 + 0x00000000),
196 	(0x001943c2 + 0x00000800),
197 	(0x001943c2 + 0x00001000),
198 	(0x001943c2 + 0x00001800),
199 	(0x001d43c2 + 0x00000000),
200 	(0x001d43c2 + 0x00000800),
201 	(0x001d43c2 + 0x00001000),
202 	(0x001d43c2 + 0x00001800),
203 };
204 
205 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
206 		struct amdgpu_irq_src *src,
207 		unsigned type,
208 		enum amdgpu_interrupt_state state)
209 {
210 	u32 bits, i, tmp, reg;
211 
212 	/* Devices newer then VEGA10/12 shall have these programming
213 	     sequences performed by PSP BL */
214 	if (adev->asic_type >= CHIP_VEGA20)
215 		return 0;
216 
217 	bits = 0x7f;
218 
219 	switch (state) {
220 	case AMDGPU_IRQ_STATE_DISABLE:
221 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
222 			reg = ecc_umc_mcumc_ctrl_addrs[i];
223 			tmp = RREG32(reg);
224 			tmp &= ~bits;
225 			WREG32(reg, tmp);
226 		}
227 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
228 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
229 			tmp = RREG32(reg);
230 			tmp &= ~bits;
231 			WREG32(reg, tmp);
232 		}
233 		break;
234 	case AMDGPU_IRQ_STATE_ENABLE:
235 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
236 			reg = ecc_umc_mcumc_ctrl_addrs[i];
237 			tmp = RREG32(reg);
238 			tmp |= bits;
239 			WREG32(reg, tmp);
240 		}
241 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
242 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
243 			tmp = RREG32(reg);
244 			tmp |= bits;
245 			WREG32(reg, tmp);
246 		}
247 		break;
248 	default:
249 		break;
250 	}
251 
252 	return 0;
253 }
254 
255 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
256 					struct amdgpu_irq_src *src,
257 					unsigned type,
258 					enum amdgpu_interrupt_state state)
259 {
260 	struct amdgpu_vmhub *hub;
261 	u32 tmp, reg, bits, i, j;
262 
263 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
264 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
265 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
266 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
267 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
268 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
269 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
270 
271 	switch (state) {
272 	case AMDGPU_IRQ_STATE_DISABLE:
273 		for (j = 0; j < adev->num_vmhubs; j++) {
274 			hub = &adev->vmhub[j];
275 			for (i = 0; i < 16; i++) {
276 				reg = hub->vm_context0_cntl + i;
277 				tmp = RREG32(reg);
278 				tmp &= ~bits;
279 				WREG32(reg, tmp);
280 			}
281 		}
282 		break;
283 	case AMDGPU_IRQ_STATE_ENABLE:
284 		for (j = 0; j < adev->num_vmhubs; j++) {
285 			hub = &adev->vmhub[j];
286 			for (i = 0; i < 16; i++) {
287 				reg = hub->vm_context0_cntl + i;
288 				tmp = RREG32(reg);
289 				tmp |= bits;
290 				WREG32(reg, tmp);
291 			}
292 		}
293 	default:
294 		break;
295 	}
296 
297 	return 0;
298 }
299 
300 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
301 				struct amdgpu_irq_src *source,
302 				struct amdgpu_iv_entry *entry)
303 {
304 	struct amdgpu_vmhub *hub;
305 	bool retry_fault = !!(entry->src_data[1] & 0x80);
306 	uint32_t status = 0;
307 	u64 addr;
308 	char hub_name[10];
309 
310 	addr = (u64)entry->src_data[0] << 12;
311 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
312 
313 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
314 						    entry->timestamp))
315 		return 1; /* This also prevents sending it to KFD */
316 
317 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
318 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
319 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
320 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
321 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
322 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
323 	} else {
324 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
325 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
326 	}
327 
328 	/* If it's the first fault for this address, process it normally */
329 	if (retry_fault && !in_interrupt() &&
330 	    amdgpu_vm_handle_fault(adev, entry->pasid, addr))
331 		return 1; /* This also prevents sending it to KFD */
332 
333 	if (!amdgpu_sriov_vf(adev)) {
334 		/*
335 		 * Issue a dummy read to wait for the status register to
336 		 * be updated to avoid reading an incorrect value due to
337 		 * the new fast GRBM interface.
338 		 */
339 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
340 			RREG32(hub->vm_l2_pro_fault_status);
341 
342 		status = RREG32(hub->vm_l2_pro_fault_status);
343 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
344 	}
345 
346 	if (printk_ratelimit()) {
347 		struct amdgpu_task_info task_info;
348 
349 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
350 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
351 
352 		dev_err(adev->dev,
353 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
354 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
355 			hub_name, retry_fault ? "retry" : "no-retry",
356 			entry->src_id, entry->ring_id, entry->vmid,
357 			entry->pasid, task_info.process_name, task_info.tgid,
358 			task_info.task_name, task_info.pid);
359 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
360 			addr, entry->client_id);
361 		if (!amdgpu_sriov_vf(adev)) {
362 			dev_err(adev->dev,
363 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
364 				status);
365 			dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
366 				REG_GET_FIELD(status,
367 				VM_L2_PROTECTION_FAULT_STATUS, CID));
368 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
369 				REG_GET_FIELD(status,
370 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
371 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
372 				REG_GET_FIELD(status,
373 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
374 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
375 				REG_GET_FIELD(status,
376 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
377 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
378 				REG_GET_FIELD(status,
379 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
380 			dev_err(adev->dev, "\t RW: 0x%lx\n",
381 				REG_GET_FIELD(status,
382 				VM_L2_PROTECTION_FAULT_STATUS, RW));
383 
384 		}
385 	}
386 
387 	return 0;
388 }
389 
390 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
391 	.set = gmc_v9_0_vm_fault_interrupt_state,
392 	.process = gmc_v9_0_process_interrupt,
393 };
394 
395 
396 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
397 	.set = gmc_v9_0_ecc_interrupt_state,
398 	.process = amdgpu_umc_process_ecc_irq,
399 };
400 
401 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
402 {
403 	adev->gmc.vm_fault.num_types = 1;
404 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
405 
406 	if (!amdgpu_sriov_vf(adev)) {
407 		adev->gmc.ecc_irq.num_types = 1;
408 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
409 	}
410 }
411 
412 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
413 					uint32_t flush_type)
414 {
415 	u32 req = 0;
416 
417 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
418 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
419 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
420 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
421 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
422 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
423 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
424 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
425 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
426 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
427 
428 	return req;
429 }
430 
431 /**
432  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
433  *
434  * @adev: amdgpu_device pointer
435  * @vmhub: vmhub type
436  *
437  */
438 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
439 				       uint32_t vmhub)
440 {
441 	return ((vmhub == AMDGPU_MMHUB_0 ||
442 		 vmhub == AMDGPU_MMHUB_1) &&
443 		(!amdgpu_sriov_vf(adev)) &&
444 		(!(adev->asic_type == CHIP_RAVEN &&
445 		   adev->rev_id < 0x8 &&
446 		   adev->pdev->device == 0x15d8)));
447 }
448 
449 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
450 					uint8_t vmid, uint16_t *p_pasid)
451 {
452 	uint32_t value;
453 
454 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
455 		     + vmid);
456 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
457 
458 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
459 }
460 
461 /*
462  * GART
463  * VMID 0 is the physical GPU addresses as used by the kernel.
464  * VMIDs 1-15 are used for userspace clients and are handled
465  * by the amdgpu vm/hsa code.
466  */
467 
468 /**
469  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
470  *
471  * @adev: amdgpu_device pointer
472  * @vmid: vm instance to flush
473  * @flush_type: the flush type
474  *
475  * Flush the TLB for the requested page table using certain type.
476  */
477 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
478 					uint32_t vmhub, uint32_t flush_type)
479 {
480 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
481 	const unsigned eng = 17;
482 	u32 j, inv_req, inv_req2, tmp;
483 	struct amdgpu_vmhub *hub;
484 
485 	BUG_ON(vmhub >= adev->num_vmhubs);
486 
487 	hub = &adev->vmhub[vmhub];
488 	if (adev->gmc.xgmi.num_physical_nodes &&
489 	    adev->asic_type == CHIP_VEGA20) {
490 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
491 		 * heavy-weight TLB flush (type 2), which flushes
492 		 * both. Due to a race condition with concurrent
493 		 * memory accesses using the same TLB cache line, we
494 		 * still need a second TLB flush after this.
495 		 */
496 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
497 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
498 	} else {
499 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
500 		inv_req2 = 0;
501 	}
502 
503 	/* This is necessary for a HW workaround under SRIOV as well
504 	 * as GFXOFF under bare metal
505 	 */
506 	if (adev->gfx.kiq.ring.sched.ready &&
507 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
508 			!adev->in_gpu_reset) {
509 		uint32_t req = hub->vm_inv_eng0_req + eng;
510 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
511 
512 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
513 				1 << vmid);
514 		return;
515 	}
516 
517 	spin_lock(&adev->gmc.invalidate_lock);
518 
519 	/*
520 	 * It may lose gpuvm invalidate acknowldege state across power-gating
521 	 * off cycle, add semaphore acquire before invalidation and semaphore
522 	 * release after invalidation to avoid entering power gated state
523 	 * to WA the Issue
524 	 */
525 
526 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
527 	if (use_semaphore) {
528 		for (j = 0; j < adev->usec_timeout; j++) {
529 			/* a read return value of 1 means semaphore acuqire */
530 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
531 			if (tmp & 0x1)
532 				break;
533 			udelay(1);
534 		}
535 
536 		if (j >= adev->usec_timeout)
537 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
538 	}
539 
540 	do {
541 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
542 
543 		/*
544 		 * Issue a dummy read to wait for the ACK register to
545 		 * be cleared to avoid a false ACK due to the new fast
546 		 * GRBM interface.
547 		 */
548 		if (vmhub == AMDGPU_GFXHUB_0)
549 			RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
550 
551 		for (j = 0; j < adev->usec_timeout; j++) {
552 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
553 			if (tmp & (1 << vmid))
554 				break;
555 			udelay(1);
556 		}
557 
558 		inv_req = inv_req2;
559 		inv_req2 = 0;
560 	} while (inv_req);
561 
562 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
563 	if (use_semaphore)
564 		/*
565 		 * add semaphore release after invalidation,
566 		 * write with 0 means semaphore release
567 		 */
568 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
569 
570 	spin_unlock(&adev->gmc.invalidate_lock);
571 
572 	if (j < adev->usec_timeout)
573 		return;
574 
575 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
576 }
577 
578 /**
579  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
580  *
581  * @adev: amdgpu_device pointer
582  * @pasid: pasid to be flush
583  *
584  * Flush the TLB for the requested pasid.
585  */
586 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
587 					uint16_t pasid, uint32_t flush_type,
588 					bool all_hub)
589 {
590 	int vmid, i;
591 	signed long r;
592 	uint32_t seq;
593 	uint16_t queried_pasid;
594 	bool ret;
595 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
596 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
597 
598 	if (adev->in_gpu_reset)
599 		return -EIO;
600 
601 	if (ring->sched.ready) {
602 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
603 		 * heavy-weight TLB flush (type 2), which flushes
604 		 * both. Due to a race condition with concurrent
605 		 * memory accesses using the same TLB cache line, we
606 		 * still need a second TLB flush after this.
607 		 */
608 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
609 				       adev->asic_type == CHIP_VEGA20);
610 		/* 2 dwords flush + 8 dwords fence */
611 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
612 
613 		if (vega20_xgmi_wa)
614 			ndw += kiq->pmf->invalidate_tlbs_size;
615 
616 		spin_lock(&adev->gfx.kiq.ring_lock);
617 		/* 2 dwords flush + 8 dwords fence */
618 		amdgpu_ring_alloc(ring, ndw);
619 		if (vega20_xgmi_wa)
620 			kiq->pmf->kiq_invalidate_tlbs(ring,
621 						      pasid, 2, all_hub);
622 		kiq->pmf->kiq_invalidate_tlbs(ring,
623 					pasid, flush_type, all_hub);
624 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
625 		if (r) {
626 			amdgpu_ring_undo(ring);
627 			spin_unlock(&adev->gfx.kiq.ring_lock);
628 			return -ETIME;
629 		}
630 
631 		amdgpu_ring_commit(ring);
632 		spin_unlock(&adev->gfx.kiq.ring_lock);
633 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
634 		if (r < 1) {
635 			DRM_ERROR("wait for kiq fence error: %ld.\n", r);
636 			return -ETIME;
637 		}
638 
639 		return 0;
640 	}
641 
642 	for (vmid = 1; vmid < 16; vmid++) {
643 
644 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
645 				&queried_pasid);
646 		if (ret && queried_pasid == pasid) {
647 			if (all_hub) {
648 				for (i = 0; i < adev->num_vmhubs; i++)
649 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
650 							i, flush_type);
651 			} else {
652 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
653 						AMDGPU_GFXHUB_0, flush_type);
654 			}
655 			break;
656 		}
657 	}
658 
659 	return 0;
660 
661 }
662 
663 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
664 					    unsigned vmid, uint64_t pd_addr)
665 {
666 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
667 	struct amdgpu_device *adev = ring->adev;
668 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
669 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
670 	unsigned eng = ring->vm_inv_eng;
671 
672 	/*
673 	 * It may lose gpuvm invalidate acknowldege state across power-gating
674 	 * off cycle, add semaphore acquire before invalidation and semaphore
675 	 * release after invalidation to avoid entering power gated state
676 	 * to WA the Issue
677 	 */
678 
679 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
680 	if (use_semaphore)
681 		/* a read return value of 1 means semaphore acuqire */
682 		amdgpu_ring_emit_reg_wait(ring,
683 					  hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
684 
685 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
686 			      lower_32_bits(pd_addr));
687 
688 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
689 			      upper_32_bits(pd_addr));
690 
691 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
692 					    hub->vm_inv_eng0_ack + eng,
693 					    req, 1 << vmid);
694 
695 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
696 	if (use_semaphore)
697 		/*
698 		 * add semaphore release after invalidation,
699 		 * write with 0 means semaphore release
700 		 */
701 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
702 
703 	return pd_addr;
704 }
705 
706 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
707 					unsigned pasid)
708 {
709 	struct amdgpu_device *adev = ring->adev;
710 	uint32_t reg;
711 
712 	/* Do nothing because there's no lut register for mmhub1. */
713 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
714 		return;
715 
716 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
717 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
718 	else
719 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
720 
721 	amdgpu_ring_emit_wreg(ring, reg, pasid);
722 }
723 
724 /*
725  * PTE format on VEGA 10:
726  * 63:59 reserved
727  * 58:57 mtype
728  * 56 F
729  * 55 L
730  * 54 P
731  * 53 SW
732  * 52 T
733  * 50:48 reserved
734  * 47:12 4k physical page base address
735  * 11:7 fragment
736  * 6 write
737  * 5 read
738  * 4 exe
739  * 3 Z
740  * 2 snooped
741  * 1 system
742  * 0 valid
743  *
744  * PDE format on VEGA 10:
745  * 63:59 block fragment size
746  * 58:55 reserved
747  * 54 P
748  * 53:48 reserved
749  * 47:6 physical base address of PD or PTE
750  * 5:3 reserved
751  * 2 C
752  * 1 system
753  * 0 valid
754  */
755 
756 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
757 
758 {
759 	switch (flags) {
760 	case AMDGPU_VM_MTYPE_DEFAULT:
761 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
762 	case AMDGPU_VM_MTYPE_NC:
763 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
764 	case AMDGPU_VM_MTYPE_WC:
765 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
766 	case AMDGPU_VM_MTYPE_RW:
767 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
768 	case AMDGPU_VM_MTYPE_CC:
769 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
770 	case AMDGPU_VM_MTYPE_UC:
771 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
772 	default:
773 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
774 	}
775 }
776 
777 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
778 				uint64_t *addr, uint64_t *flags)
779 {
780 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
781 		*addr = adev->vm_manager.vram_base_offset + *addr -
782 			adev->gmc.vram_start;
783 	BUG_ON(*addr & 0xFFFF00000000003FULL);
784 
785 	if (!adev->gmc.translate_further)
786 		return;
787 
788 	if (level == AMDGPU_VM_PDB1) {
789 		/* Set the block fragment size */
790 		if (!(*flags & AMDGPU_PDE_PTE))
791 			*flags |= AMDGPU_PDE_BFS(0x9);
792 
793 	} else if (level == AMDGPU_VM_PDB0) {
794 		if (*flags & AMDGPU_PDE_PTE)
795 			*flags &= ~AMDGPU_PDE_PTE;
796 		else
797 			*flags |= AMDGPU_PTE_TF;
798 	}
799 }
800 
801 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
802 				struct amdgpu_bo_va_mapping *mapping,
803 				uint64_t *flags)
804 {
805 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
806 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
807 
808 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
809 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
810 
811 	if (mapping->flags & AMDGPU_PTE_PRT) {
812 		*flags |= AMDGPU_PTE_PRT;
813 		*flags &= ~AMDGPU_PTE_VALID;
814 	}
815 
816 	if (adev->asic_type == CHIP_ARCTURUS &&
817 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
818 	    mapping->bo_va->is_xgmi)
819 		*flags |= AMDGPU_PTE_SNOOPED;
820 }
821 
822 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
823 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
824 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
825 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
826 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
827 	.map_mtype = gmc_v9_0_map_mtype,
828 	.get_vm_pde = gmc_v9_0_get_vm_pde,
829 	.get_vm_pte = gmc_v9_0_get_vm_pte
830 };
831 
832 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
833 {
834 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
835 }
836 
837 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
838 {
839 	switch (adev->asic_type) {
840 	case CHIP_VEGA10:
841 		adev->umc.funcs = &umc_v6_0_funcs;
842 		break;
843 	case CHIP_VEGA20:
844 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
845 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
846 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
847 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
848 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
849 		adev->umc.funcs = &umc_v6_1_funcs;
850 		break;
851 	case CHIP_ARCTURUS:
852 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
853 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
854 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
855 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
856 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
857 		adev->umc.funcs = &umc_v6_1_funcs;
858 		break;
859 	default:
860 		break;
861 	}
862 }
863 
864 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
865 {
866 	switch (adev->asic_type) {
867 	case CHIP_VEGA20:
868 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
869 		break;
870 	case CHIP_ARCTURUS:
871 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
872 		break;
873 	default:
874 		break;
875 	}
876 }
877 
878 static int gmc_v9_0_early_init(void *handle)
879 {
880 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881 
882 	gmc_v9_0_set_gmc_funcs(adev);
883 	gmc_v9_0_set_irq_funcs(adev);
884 	gmc_v9_0_set_umc_funcs(adev);
885 	gmc_v9_0_set_mmhub_funcs(adev);
886 
887 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
888 	adev->gmc.shared_aperture_end =
889 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
890 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
891 	adev->gmc.private_aperture_end =
892 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
893 
894 	return 0;
895 }
896 
897 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
898 {
899 
900 	/*
901 	 * TODO:
902 	 * Currently there is a bug where some memory client outside
903 	 * of the driver writes to first 8M of VRAM on S3 resume,
904 	 * this overrides GART which by default gets placed in first 8M and
905 	 * causes VM_FAULTS once GTT is accessed.
906 	 * Keep the stolen memory reservation until the while this is not solved.
907 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
908 	 */
909 	switch (adev->asic_type) {
910 	case CHIP_VEGA10:
911 	case CHIP_RAVEN:
912 	case CHIP_ARCTURUS:
913 	case CHIP_RENOIR:
914 		return true;
915 	case CHIP_VEGA12:
916 	case CHIP_VEGA20:
917 	default:
918 		return false;
919 	}
920 }
921 
922 static int gmc_v9_0_late_init(void *handle)
923 {
924 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
925 	int r;
926 
927 	if (!gmc_v9_0_keep_stolen_memory(adev))
928 		amdgpu_bo_late_init(adev);
929 
930 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
931 	if (r)
932 		return r;
933 	/* Check if ecc is available */
934 	if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
935 		r = amdgpu_atomfirmware_mem_ecc_supported(adev);
936 		if (!r) {
937 			DRM_INFO("ECC is not present.\n");
938 			if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
939 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
940 		} else
941 			DRM_INFO("ECC is active.\n");
942 
943 		r = amdgpu_atomfirmware_sram_ecc_supported(adev);
944 		if (!r)
945 			DRM_INFO("SRAM ECC is not present.\n");
946 		else
947 			DRM_INFO("SRAM ECC is active.\n");
948 	}
949 
950 	if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
951 		adev->mmhub.funcs->reset_ras_error_count(adev);
952 
953 	r = amdgpu_gmc_ras_late_init(adev);
954 	if (r)
955 		return r;
956 
957 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
958 }
959 
960 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
961 					struct amdgpu_gmc *mc)
962 {
963 	u64 base = 0;
964 
965 	if (adev->asic_type == CHIP_ARCTURUS)
966 		base = mmhub_v9_4_get_fb_location(adev);
967 	else if (!amdgpu_sriov_vf(adev))
968 		base = mmhub_v1_0_get_fb_location(adev);
969 
970 	/* add the xgmi offset of the physical node */
971 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
972 	amdgpu_gmc_vram_location(adev, mc, base);
973 	amdgpu_gmc_gart_location(adev, mc);
974 	amdgpu_gmc_agp_location(adev, mc);
975 	/* base offset of vram pages */
976 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
977 
978 	/* XXX: add the xgmi offset of the physical node? */
979 	adev->vm_manager.vram_base_offset +=
980 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
981 }
982 
983 /**
984  * gmc_v9_0_mc_init - initialize the memory controller driver params
985  *
986  * @adev: amdgpu_device pointer
987  *
988  * Look up the amount of vram, vram width, and decide how to place
989  * vram and gart within the GPU's physical address space.
990  * Returns 0 for success.
991  */
992 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
993 {
994 	int r;
995 
996 	/* size in MB on si */
997 	adev->gmc.mc_vram_size =
998 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
999 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1000 
1001 	if (!(adev->flags & AMD_IS_APU)) {
1002 		r = amdgpu_device_resize_fb_bar(adev);
1003 		if (r)
1004 			return r;
1005 	}
1006 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1007 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1008 
1009 #ifdef CONFIG_X86_64
1010 	if (adev->flags & AMD_IS_APU) {
1011 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
1012 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1013 	}
1014 #endif
1015 	/* In case the PCI BAR is larger than the actual amount of vram */
1016 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1017 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1018 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1019 
1020 	/* set the gart size */
1021 	if (amdgpu_gart_size == -1) {
1022 		switch (adev->asic_type) {
1023 		case CHIP_VEGA10:  /* all engines support GPUVM */
1024 		case CHIP_VEGA12:  /* all engines support GPUVM */
1025 		case CHIP_VEGA20:
1026 		case CHIP_ARCTURUS:
1027 		default:
1028 			adev->gmc.gart_size = 512ULL << 20;
1029 			break;
1030 		case CHIP_RAVEN:   /* DCE SG support */
1031 		case CHIP_RENOIR:
1032 			adev->gmc.gart_size = 1024ULL << 20;
1033 			break;
1034 		}
1035 	} else {
1036 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1037 	}
1038 
1039 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1040 
1041 	return 0;
1042 }
1043 
1044 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1045 {
1046 	int r;
1047 
1048 	if (adev->gart.bo) {
1049 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1050 		return 0;
1051 	}
1052 	/* Initialize common gart structure */
1053 	r = amdgpu_gart_init(adev);
1054 	if (r)
1055 		return r;
1056 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1057 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1058 				 AMDGPU_PTE_EXECUTABLE;
1059 	return amdgpu_gart_table_vram_alloc(adev);
1060 }
1061 
1062 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1063 {
1064 	u32 d1vga_control;
1065 	unsigned size;
1066 
1067 	/*
1068 	 * TODO Remove once GART corruption is resolved
1069 	 * Check related code in gmc_v9_0_sw_fini
1070 	 * */
1071 	if (gmc_v9_0_keep_stolen_memory(adev))
1072 		return 9 * 1024 * 1024;
1073 
1074 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1075 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1076 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1077 	} else {
1078 		u32 viewport;
1079 
1080 		switch (adev->asic_type) {
1081 		case CHIP_RAVEN:
1082 		case CHIP_RENOIR:
1083 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1084 			size = (REG_GET_FIELD(viewport,
1085 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1086 				REG_GET_FIELD(viewport,
1087 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1088 				4);
1089 			break;
1090 		case CHIP_VEGA10:
1091 		case CHIP_VEGA12:
1092 		case CHIP_VEGA20:
1093 		default:
1094 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1095 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1096 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1097 				4);
1098 			break;
1099 		}
1100 	}
1101 	/* return 0 if the pre-OS buffer uses up most of vram */
1102 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1103 		return 0;
1104 
1105 	return size;
1106 }
1107 
1108 static int gmc_v9_0_sw_init(void *handle)
1109 {
1110 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1111 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1112 
1113 	gfxhub_v1_0_init(adev);
1114 	if (adev->asic_type == CHIP_ARCTURUS)
1115 		mmhub_v9_4_init(adev);
1116 	else
1117 		mmhub_v1_0_init(adev);
1118 
1119 	spin_lock_init(&adev->gmc.invalidate_lock);
1120 
1121 	r = amdgpu_atomfirmware_get_vram_info(adev,
1122 		&vram_width, &vram_type, &vram_vendor);
1123 	if (amdgpu_sriov_vf(adev))
1124 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1125 		 * and DF related registers is not readable, seems hardcord is the
1126 		 * only way to set the correct vram_width
1127 		 */
1128 		adev->gmc.vram_width = 2048;
1129 	else if (amdgpu_emu_mode != 1)
1130 		adev->gmc.vram_width = vram_width;
1131 
1132 	if (!adev->gmc.vram_width) {
1133 		int chansize, numchan;
1134 
1135 		/* hbm memory channel size */
1136 		if (adev->flags & AMD_IS_APU)
1137 			chansize = 64;
1138 		else
1139 			chansize = 128;
1140 
1141 		numchan = adev->df.funcs->get_hbm_channel_number(adev);
1142 		adev->gmc.vram_width = numchan * chansize;
1143 	}
1144 
1145 	adev->gmc.vram_type = vram_type;
1146 	adev->gmc.vram_vendor = vram_vendor;
1147 	switch (adev->asic_type) {
1148 	case CHIP_RAVEN:
1149 		adev->num_vmhubs = 2;
1150 
1151 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1152 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1153 		} else {
1154 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
1155 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1156 			adev->gmc.translate_further =
1157 				adev->vm_manager.num_level > 1;
1158 		}
1159 		break;
1160 	case CHIP_VEGA10:
1161 	case CHIP_VEGA12:
1162 	case CHIP_VEGA20:
1163 	case CHIP_RENOIR:
1164 		adev->num_vmhubs = 2;
1165 
1166 
1167 		/*
1168 		 * To fulfill 4-level page support,
1169 		 * vm size is 256TB (48bit), maximum size of Vega10,
1170 		 * block size 512 (9bit)
1171 		 */
1172 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1173 		if (amdgpu_sriov_vf(adev))
1174 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1175 		else
1176 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1177 		break;
1178 	case CHIP_ARCTURUS:
1179 		adev->num_vmhubs = 3;
1180 
1181 		/* Keep the vm size same with Vega20 */
1182 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1183 		break;
1184 	default:
1185 		break;
1186 	}
1187 
1188 	/* This interrupt is VMC page fault.*/
1189 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1190 				&adev->gmc.vm_fault);
1191 	if (r)
1192 		return r;
1193 
1194 	if (adev->asic_type == CHIP_ARCTURUS) {
1195 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1196 					&adev->gmc.vm_fault);
1197 		if (r)
1198 			return r;
1199 	}
1200 
1201 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1202 				&adev->gmc.vm_fault);
1203 
1204 	if (r)
1205 		return r;
1206 
1207 	if (!amdgpu_sriov_vf(adev)) {
1208 		/* interrupt sent to DF. */
1209 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1210 				      &adev->gmc.ecc_irq);
1211 		if (r)
1212 			return r;
1213 	}
1214 
1215 	/* Set the internal MC address mask
1216 	 * This is the max address of the GPU's
1217 	 * internal address space.
1218 	 */
1219 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1220 
1221 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1222 	if (r) {
1223 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1224 		return r;
1225 	}
1226 	adev->need_swiotlb = drm_need_swiotlb(44);
1227 
1228 	if (adev->gmc.xgmi.supported) {
1229 		r = gfxhub_v1_1_get_xgmi_info(adev);
1230 		if (r)
1231 			return r;
1232 	}
1233 
1234 	r = gmc_v9_0_mc_init(adev);
1235 	if (r)
1236 		return r;
1237 
1238 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1239 
1240 	/* Memory manager */
1241 	r = amdgpu_bo_init(adev);
1242 	if (r)
1243 		return r;
1244 
1245 	r = gmc_v9_0_gart_init(adev);
1246 	if (r)
1247 		return r;
1248 
1249 	/*
1250 	 * number of VMs
1251 	 * VMID 0 is reserved for System
1252 	 * amdgpu graphics/compute will use VMIDs 1-7
1253 	 * amdkfd will use VMIDs 8-15
1254 	 */
1255 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1256 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1257 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1258 
1259 	amdgpu_vm_manager_init(adev);
1260 
1261 	return 0;
1262 }
1263 
1264 static int gmc_v9_0_sw_fini(void *handle)
1265 {
1266 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 	void *stolen_vga_buf;
1268 
1269 	amdgpu_gmc_ras_fini(adev);
1270 	amdgpu_gem_force_release(adev);
1271 	amdgpu_vm_manager_fini(adev);
1272 
1273 	if (gmc_v9_0_keep_stolen_memory(adev))
1274 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1275 
1276 	amdgpu_gart_table_vram_free(adev);
1277 	amdgpu_bo_fini(adev);
1278 	amdgpu_gart_fini(adev);
1279 
1280 	return 0;
1281 }
1282 
1283 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1284 {
1285 
1286 	switch (adev->asic_type) {
1287 	case CHIP_VEGA10:
1288 		if (amdgpu_sriov_vf(adev))
1289 			break;
1290 		/* fall through */
1291 	case CHIP_VEGA20:
1292 		soc15_program_register_sequence(adev,
1293 						golden_settings_mmhub_1_0_0,
1294 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1295 		soc15_program_register_sequence(adev,
1296 						golden_settings_athub_1_0_0,
1297 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1298 		break;
1299 	case CHIP_VEGA12:
1300 		break;
1301 	case CHIP_RAVEN:
1302 		/* TODO for renoir */
1303 		soc15_program_register_sequence(adev,
1304 						golden_settings_athub_1_0_0,
1305 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1306 		break;
1307 	default:
1308 		break;
1309 	}
1310 }
1311 
1312 /**
1313  * gmc_v9_0_restore_registers - restores regs
1314  *
1315  * @adev: amdgpu_device pointer
1316  *
1317  * This restores register values, saved at suspend.
1318  */
1319 static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1320 {
1321 	if (adev->asic_type == CHIP_RAVEN)
1322 		WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1323 }
1324 
1325 /**
1326  * gmc_v9_0_gart_enable - gart enable
1327  *
1328  * @adev: amdgpu_device pointer
1329  */
1330 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1331 {
1332 	int r;
1333 
1334 	if (adev->gart.bo == NULL) {
1335 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1336 		return -EINVAL;
1337 	}
1338 	r = amdgpu_gart_table_vram_pin(adev);
1339 	if (r)
1340 		return r;
1341 
1342 	r = gfxhub_v1_0_gart_enable(adev);
1343 	if (r)
1344 		return r;
1345 
1346 	if (adev->asic_type == CHIP_ARCTURUS)
1347 		r = mmhub_v9_4_gart_enable(adev);
1348 	else
1349 		r = mmhub_v1_0_gart_enable(adev);
1350 	if (r)
1351 		return r;
1352 
1353 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1354 		 (unsigned)(adev->gmc.gart_size >> 20),
1355 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1356 	adev->gart.ready = true;
1357 	return 0;
1358 }
1359 
1360 static int gmc_v9_0_hw_init(void *handle)
1361 {
1362 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1363 	bool value;
1364 	int r, i;
1365 	u32 tmp;
1366 
1367 	/* The sequence of these two function calls matters.*/
1368 	gmc_v9_0_init_golden_registers(adev);
1369 
1370 	if (adev->mode_info.num_crtc) {
1371 		if (adev->asic_type != CHIP_ARCTURUS) {
1372 			/* Lockout access through VGA aperture*/
1373 			WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1374 
1375 			/* disable VGA render */
1376 			WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1377 		}
1378 	}
1379 
1380 	amdgpu_device_program_register_sequence(adev,
1381 						golden_settings_vega10_hdp,
1382 						ARRAY_SIZE(golden_settings_vega10_hdp));
1383 
1384 	switch (adev->asic_type) {
1385 	case CHIP_RAVEN:
1386 		/* TODO for renoir */
1387 		mmhub_v1_0_update_power_gating(adev, true);
1388 		break;
1389 	case CHIP_ARCTURUS:
1390 		WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1391 		break;
1392 	default:
1393 		break;
1394 	}
1395 
1396 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1397 
1398 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1399 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1400 
1401 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1402 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1403 
1404 	/* After HDP is initialized, flush HDP.*/
1405 	adev->nbio.funcs->hdp_flush(adev, NULL);
1406 
1407 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1408 		value = false;
1409 	else
1410 		value = true;
1411 
1412 	if (!amdgpu_sriov_vf(adev)) {
1413 		gfxhub_v1_0_set_fault_enable_default(adev, value);
1414 		if (adev->asic_type == CHIP_ARCTURUS)
1415 			mmhub_v9_4_set_fault_enable_default(adev, value);
1416 		else
1417 			mmhub_v1_0_set_fault_enable_default(adev, value);
1418 	}
1419 	for (i = 0; i < adev->num_vmhubs; ++i)
1420 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1421 
1422 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1423 		adev->umc.funcs->init_registers(adev);
1424 
1425 	r = gmc_v9_0_gart_enable(adev);
1426 
1427 	return r;
1428 }
1429 
1430 /**
1431  * gmc_v9_0_save_registers - saves regs
1432  *
1433  * @adev: amdgpu_device pointer
1434  *
1435  * This saves potential register values that should be
1436  * restored upon resume
1437  */
1438 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1439 {
1440 	if (adev->asic_type == CHIP_RAVEN)
1441 		adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1442 }
1443 
1444 /**
1445  * gmc_v9_0_gart_disable - gart disable
1446  *
1447  * @adev: amdgpu_device pointer
1448  *
1449  * This disables all VM page table.
1450  */
1451 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1452 {
1453 	gfxhub_v1_0_gart_disable(adev);
1454 	if (adev->asic_type == CHIP_ARCTURUS)
1455 		mmhub_v9_4_gart_disable(adev);
1456 	else
1457 		mmhub_v1_0_gart_disable(adev);
1458 	amdgpu_gart_table_vram_unpin(adev);
1459 }
1460 
1461 static int gmc_v9_0_hw_fini(void *handle)
1462 {
1463 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1464 
1465 	if (amdgpu_sriov_vf(adev)) {
1466 		/* full access mode, so don't touch any GMC register */
1467 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1468 		return 0;
1469 	}
1470 
1471 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1472 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1473 	gmc_v9_0_gart_disable(adev);
1474 
1475 	return 0;
1476 }
1477 
1478 static int gmc_v9_0_suspend(void *handle)
1479 {
1480 	int r;
1481 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1482 
1483 	r = gmc_v9_0_hw_fini(adev);
1484 	if (r)
1485 		return r;
1486 
1487 	gmc_v9_0_save_registers(adev);
1488 
1489 	return 0;
1490 }
1491 
1492 static int gmc_v9_0_resume(void *handle)
1493 {
1494 	int r;
1495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496 
1497 	gmc_v9_0_restore_registers(adev);
1498 	r = gmc_v9_0_hw_init(adev);
1499 	if (r)
1500 		return r;
1501 
1502 	amdgpu_vmid_reset_all(adev);
1503 
1504 	return 0;
1505 }
1506 
1507 static bool gmc_v9_0_is_idle(void *handle)
1508 {
1509 	/* MC is always ready in GMC v9.*/
1510 	return true;
1511 }
1512 
1513 static int gmc_v9_0_wait_for_idle(void *handle)
1514 {
1515 	/* There is no need to wait for MC idle in GMC v9.*/
1516 	return 0;
1517 }
1518 
1519 static int gmc_v9_0_soft_reset(void *handle)
1520 {
1521 	/* XXX for emulation.*/
1522 	return 0;
1523 }
1524 
1525 static int gmc_v9_0_set_clockgating_state(void *handle,
1526 					enum amd_clockgating_state state)
1527 {
1528 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1529 
1530 	if (adev->asic_type == CHIP_ARCTURUS)
1531 		mmhub_v9_4_set_clockgating(adev, state);
1532 	else
1533 		mmhub_v1_0_set_clockgating(adev, state);
1534 
1535 	athub_v1_0_set_clockgating(adev, state);
1536 
1537 	return 0;
1538 }
1539 
1540 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1541 {
1542 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1543 
1544 	if (adev->asic_type == CHIP_ARCTURUS)
1545 		mmhub_v9_4_get_clockgating(adev, flags);
1546 	else
1547 		mmhub_v1_0_get_clockgating(adev, flags);
1548 
1549 	athub_v1_0_get_clockgating(adev, flags);
1550 }
1551 
1552 static int gmc_v9_0_set_powergating_state(void *handle,
1553 					enum amd_powergating_state state)
1554 {
1555 	return 0;
1556 }
1557 
1558 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1559 	.name = "gmc_v9_0",
1560 	.early_init = gmc_v9_0_early_init,
1561 	.late_init = gmc_v9_0_late_init,
1562 	.sw_init = gmc_v9_0_sw_init,
1563 	.sw_fini = gmc_v9_0_sw_fini,
1564 	.hw_init = gmc_v9_0_hw_init,
1565 	.hw_fini = gmc_v9_0_hw_fini,
1566 	.suspend = gmc_v9_0_suspend,
1567 	.resume = gmc_v9_0_resume,
1568 	.is_idle = gmc_v9_0_is_idle,
1569 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1570 	.soft_reset = gmc_v9_0_soft_reset,
1571 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1572 	.set_powergating_state = gmc_v9_0_set_powergating_state,
1573 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1574 };
1575 
1576 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1577 {
1578 	.type = AMD_IP_BLOCK_TYPE_GMC,
1579 	.major = 9,
1580 	.minor = 0,
1581 	.rev = 0,
1582 	.funcs = &gmc_v9_0_ip_funcs,
1583 };
1584