xref: /linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c (revision 99a97a8ba9881fc47901ff36b057e5cd0bf06af0)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "mmhub_v1_0.h"
25 
26 #include "vega10/soc15ip.h"
27 #include "vega10/MMHUB/mmhub_1_0_offset.h"
28 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
29 #include "vega10/MMHUB/mmhub_1_0_default.h"
30 #include "vega10/ATHUB/athub_1_0_offset.h"
31 #include "vega10/ATHUB/athub_1_0_sh_mask.h"
32 #include "vega10/ATHUB/athub_1_0_default.h"
33 #include "vega10/vega10_enum.h"
34 
35 #include "soc15_common.h"
36 
37 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
38 {
39 	u64 base = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE));
40 
41 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
42 	base <<= 24;
43 
44 	return base;
45 }
46 
47 int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
48 {
49 	u32 tmp;
50 	u64 value;
51 	uint64_t addr;
52 	u32 i;
53 
54 	/* Program MC. */
55 	/* Update configuration */
56 	DRM_INFO("%s -- in\n", __func__);
57 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
58 		adev->mc.vram_start >> 18);
59 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
60 		adev->mc.vram_end >> 18);
61 	value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
62 		adev->vm_manager.vram_base_offset;
63 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
64 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
65 				(u32)(value >> 12));
66 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
67 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
68 				(u32)(value >> 44));
69 
70 	if (amdgpu_sriov_vf(adev)) {
71 		/* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
72 		vbios post doesn't program them, for SRIOV driver need to program them */
73 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE),
74 			adev->mc.vram_start >> 24);
75 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP),
76 			adev->mc.vram_end >> 24);
77 	}
78 
79 	/* Disable AGP. */
80 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BASE), 0);
81 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_TOP), 0);
82 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BOT), 0x00FFFFFF);
83 
84 	/* GART Enable. */
85 
86 	/* Setup TLB control */
87 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
88 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
89 	tmp = REG_SET_FIELD(tmp,
90 				MC_VM_MX_L1_TLB_CNTL,
91 				SYSTEM_ACCESS_MODE,
92 				3);
93 	tmp = REG_SET_FIELD(tmp,
94 				MC_VM_MX_L1_TLB_CNTL,
95 				ENABLE_ADVANCED_DRIVER_MODEL,
96 				1);
97 	tmp = REG_SET_FIELD(tmp,
98 				MC_VM_MX_L1_TLB_CNTL,
99 				SYSTEM_APERTURE_UNMAPPED_ACCESS,
100 				0);
101 	tmp = REG_SET_FIELD(tmp,
102 				MC_VM_MX_L1_TLB_CNTL,
103 				ECO_BITS,
104 				0);
105 	tmp = REG_SET_FIELD(tmp,
106 				MC_VM_MX_L1_TLB_CNTL,
107 				MTYPE,
108 				MTYPE_UC);/* XXX for emulation. */
109 	tmp = REG_SET_FIELD(tmp,
110 				MC_VM_MX_L1_TLB_CNTL,
111 				ATC_EN,
112 				1);
113 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
114 
115 	/* Setup L2 cache */
116 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
117 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
118 	tmp = REG_SET_FIELD(tmp,
119 				VM_L2_CNTL,
120 				ENABLE_L2_FRAGMENT_PROCESSING,
121 				0);
122 	tmp = REG_SET_FIELD(tmp,
123 				VM_L2_CNTL,
124 				L2_PDE0_CACHE_TAG_GENERATION_MODE,
125 				0);/* XXX for emulation, Refer to closed source code.*/
126 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
127 	tmp = REG_SET_FIELD(tmp,
128 				VM_L2_CNTL,
129 				CONTEXT1_IDENTITY_ACCESS_MODE,
130 				1);
131 	tmp = REG_SET_FIELD(tmp,
132 				VM_L2_CNTL,
133 				IDENTITY_MODE_FRAGMENT_SIZE,
134 				0);
135 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
136 
137 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2));
138 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
139 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
140 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2), tmp);
141 
142 	tmp = mmVM_L2_CNTL3_DEFAULT;
143 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), tmp);
144 
145 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4));
146 	tmp = REG_SET_FIELD(tmp,
147 			    VM_L2_CNTL4,
148 			    VMC_TAP_PDE_REQUEST_PHYSICAL,
149 			    0);
150 	tmp = REG_SET_FIELD(tmp,
151 			    VM_L2_CNTL4,
152 			    VMC_TAP_PTE_REQUEST_PHYSICAL,
153 			    0);
154 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4), tmp);
155 
156 	/* setup context0 */
157 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
158 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
159 		(u32)(adev->mc.gtt_start >> 12));
160 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
161 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
162 		(u32)(adev->mc.gtt_start >> 44));
163 
164 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
165 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
166 		(u32)(adev->mc.gtt_end >> 12));
167 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
168 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
169 		(u32)(adev->mc.gtt_end >> 44));
170 
171 	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
172 	value = adev->gart.table_addr - adev->mc.vram_start +
173 		adev->vm_manager.vram_base_offset;
174 	value &= 0x0000FFFFFFFFF000ULL;
175 	value |= 0x1; /* valid bit */
176 
177 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
178 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
179 		(u32)value);
180 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
181 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
182 		(u32)(value >> 32));
183 
184 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
185 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
186 		(u32)(adev->dummy_page.addr >> 12));
187 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
188 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
189 		(u32)((u64)adev->dummy_page.addr >> 44));
190 
191 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
192 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
193 			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
194 			    1);
195 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
196 
197 	addr = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
198 	tmp = RREG32(addr);
199 
200 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
201 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
202 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL), tmp);
203 
204 	tmp = RREG32(addr);
205 
206 	/* Disable identity aperture.*/
207 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
208 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
209 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
210 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
211 
212 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
213 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
214 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
215 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
216 
217 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
218 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
219 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
220 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
221 
222 	for (i = 0; i <= 14; i++) {
223 		tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL)
224 				+ i);
225 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
226 				ENABLE_CONTEXT, 1);
227 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
228 				PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
229 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
230 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
231 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
232 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
233 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
234 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
235 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
236 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
237 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
238 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
239 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
240 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
241 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
242 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
243 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
244 				PAGE_TABLE_BLOCK_SIZE,
245 				amdgpu_vm_block_size - 9);
246 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
247 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
248 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
249 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
250 			lower_32_bits(adev->vm_manager.max_pfn - 1));
251 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
252 			upper_32_bits(adev->vm_manager.max_pfn - 1));
253 	}
254 
255 	return 0;
256 }
257 
258 void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
259 {
260 	u32 tmp;
261 	u32 i;
262 
263 	/* Disable all tables */
264 	for (i = 0; i < 16; i++)
265 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL) + i, 0);
266 
267 	/* Setup TLB control */
268 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
269 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
270 	tmp = REG_SET_FIELD(tmp,
271 				MC_VM_MX_L1_TLB_CNTL,
272 				ENABLE_ADVANCED_DRIVER_MODEL,
273 				0);
274 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
275 
276 	/* Setup L2 cache */
277 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
278 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
279 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
280 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), 0);
281 }
282 
283 /**
284  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
285  *
286  * @adev: amdgpu_device pointer
287  * @value: true redirects VM faults to the default page
288  */
289 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
290 {
291 	u32 tmp;
292 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
293 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
294 			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
295 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
296 			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
297 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
298 			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
299 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
300 			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
301 	tmp = REG_SET_FIELD(tmp,
302 			VM_L2_PROTECTION_FAULT_CNTL,
303 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
304 			value);
305 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
306 			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
307 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
308 			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
309 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
310 			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
311 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
312 			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
313 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
314 			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
315 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
316 			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
317 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
318 }
319 
320 static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
321 {
322 	u32 req = 0;
323 
324 	/* invalidate using legacy mode on vm_id*/
325 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
326 			    PER_VMID_INVALIDATE_REQ, 1 << vm_id);
327 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
328 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
329 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
330 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
331 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
332 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
333 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
334 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
335 
336 	return req;
337 }
338 
339 static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
340 {
341 	return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
342 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
343 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
344 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
345 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
346 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
347 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
348 }
349 
350 static int mmhub_v1_0_early_init(void *handle)
351 {
352 	return 0;
353 }
354 
355 static int mmhub_v1_0_late_init(void *handle)
356 {
357 	return 0;
358 }
359 
360 static int mmhub_v1_0_sw_init(void *handle)
361 {
362 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
363 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
364 
365 	hub->ctx0_ptb_addr_lo32 =
366 		SOC15_REG_OFFSET(MMHUB, 0,
367 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
368 	hub->ctx0_ptb_addr_hi32 =
369 		SOC15_REG_OFFSET(MMHUB, 0,
370 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
371 	hub->vm_inv_eng0_req =
372 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
373 	hub->vm_inv_eng0_ack =
374 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
375 	hub->vm_context0_cntl =
376 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
377 	hub->vm_l2_pro_fault_status =
378 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
379 	hub->vm_l2_pro_fault_cntl =
380 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
381 
382 	hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
383 	hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
384 
385 	return 0;
386 }
387 
388 static int mmhub_v1_0_sw_fini(void *handle)
389 {
390 	return 0;
391 }
392 
393 static int mmhub_v1_0_hw_init(void *handle)
394 {
395 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
396 	unsigned i;
397 
398 	for (i = 0; i < 18; ++i) {
399 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
400 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
401 		       2 * i, 0xffffffff);
402 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
403 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
404 		       2 * i, 0x1f);
405 	}
406 
407 	return 0;
408 }
409 
410 static int mmhub_v1_0_hw_fini(void *handle)
411 {
412 	return 0;
413 }
414 
415 static int mmhub_v1_0_suspend(void *handle)
416 {
417 	return 0;
418 }
419 
420 static int mmhub_v1_0_resume(void *handle)
421 {
422 	return 0;
423 }
424 
425 static bool mmhub_v1_0_is_idle(void *handle)
426 {
427 	return true;
428 }
429 
430 static int mmhub_v1_0_wait_for_idle(void *handle)
431 {
432 	return 0;
433 }
434 
435 static int mmhub_v1_0_soft_reset(void *handle)
436 {
437 	return 0;
438 }
439 
440 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
441 							bool enable)
442 {
443 	uint32_t def, data, def1, data1, def2, data2;
444 
445 	def  = data  = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
446 	def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2));
447 	def2 = data2 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2));
448 
449 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
450 		data |= ATC_L2_MISC_CG__ENABLE_MASK;
451 
452 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
453 		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
454 		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
455 		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
456 		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
457 		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
458 
459 		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
460 		           DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
461 		           DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
462 		           DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
463 		           DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
464 		           DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
465 	} else {
466 		data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
467 
468 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
469 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
470 			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
471 			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
472 			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
473 			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
474 
475 		data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
476 		          DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
477 		          DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
478 		          DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
479 		          DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
480 		          DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
481 	}
482 
483 	if (def != data)
484 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
485 
486 	if (def1 != data1)
487 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2), data1);
488 
489 	if (def2 != data2)
490 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2), data2);
491 }
492 
493 static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
494 						   bool enable)
495 {
496 	uint32_t def, data;
497 
498 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
499 
500 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
501 		data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
502 	else
503 		data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
504 
505 	if (def != data)
506 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
507 }
508 
509 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
510 						       bool enable)
511 {
512 	uint32_t def, data;
513 
514 	def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
515 
516 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
517 		data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
518 	else
519 		data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
520 
521 	if (def != data)
522 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
523 }
524 
525 static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
526 						  bool enable)
527 {
528 	uint32_t def, data;
529 
530 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
531 
532 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
533 	    (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
534 		data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
535 	else
536 		data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
537 
538 	if(def != data)
539 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
540 }
541 
542 static int mmhub_v1_0_set_clockgating_state(void *handle,
543 					enum amd_clockgating_state state)
544 {
545 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546 
547 	switch (adev->asic_type) {
548 	case CHIP_VEGA10:
549 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
550 				state == AMD_CG_STATE_GATE ? true : false);
551 		athub_update_medium_grain_clock_gating(adev,
552 				state == AMD_CG_STATE_GATE ? true : false);
553 		mmhub_v1_0_update_medium_grain_light_sleep(adev,
554 				state == AMD_CG_STATE_GATE ? true : false);
555 		athub_update_medium_grain_light_sleep(adev,
556 				state == AMD_CG_STATE_GATE ? true : false);
557 		break;
558 	default:
559 		break;
560 	}
561 
562 	return 0;
563 }
564 
565 static void mmhub_v1_0_get_clockgating_state(void *handle, u32 *flags)
566 {
567 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
568 	int data;
569 
570 	if (amdgpu_sriov_vf(adev))
571 		*flags = 0;
572 
573 	/* AMD_CG_SUPPORT_MC_MGCG */
574 	data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
575 	if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
576 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
577 
578 	/* AMD_CG_SUPPORT_MC_LS */
579 	data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
580 	if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
581 		*flags |= AMD_CG_SUPPORT_MC_LS;
582 }
583 
584 static int mmhub_v1_0_set_powergating_state(void *handle,
585 					enum amd_powergating_state state)
586 {
587 	return 0;
588 }
589 
590 const struct amd_ip_funcs mmhub_v1_0_ip_funcs = {
591 	.name = "mmhub_v1_0",
592 	.early_init = mmhub_v1_0_early_init,
593 	.late_init = mmhub_v1_0_late_init,
594 	.sw_init = mmhub_v1_0_sw_init,
595 	.sw_fini = mmhub_v1_0_sw_fini,
596 	.hw_init = mmhub_v1_0_hw_init,
597 	.hw_fini = mmhub_v1_0_hw_fini,
598 	.suspend = mmhub_v1_0_suspend,
599 	.resume = mmhub_v1_0_resume,
600 	.is_idle = mmhub_v1_0_is_idle,
601 	.wait_for_idle = mmhub_v1_0_wait_for_idle,
602 	.soft_reset = mmhub_v1_0_soft_reset,
603 	.set_clockgating_state = mmhub_v1_0_set_clockgating_state,
604 	.set_powergating_state = mmhub_v1_0_set_powergating_state,
605 	.get_clockgating_state = mmhub_v1_0_get_clockgating_state,
606 };
607 
608 const struct amdgpu_ip_block_version mmhub_v1_0_ip_block =
609 {
610 	.type = AMD_IP_BLOCK_TYPE_MMHUB,
611 	.major = 1,
612 	.minor = 0,
613 	.rev = 0,
614 	.funcs = &mmhub_v1_0_ip_funcs,
615 };
616