1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25
26 #include <drm/drm_cache.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v10_0.h"
31 #include "umc_v8_7.h"
32
33 #include "athub/athub_2_0_0_sh_mask.h"
34 #include "athub/athub_2_0_0_offset.h"
35 #include "dcn/dcn_2_0_0_offset.h"
36 #include "dcn/dcn_2_0_0_sh_mask.h"
37 #include "oss/osssys_5_0_0_offset.h"
38 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39 #include "navi10_enum.h"
40
41 #include "soc15.h"
42 #include "soc15d.h"
43 #include "soc15_common.h"
44
45 #include "nbio_v2_3.h"
46
47 #include "gfxhub_v2_0.h"
48 #include "gfxhub_v2_1.h"
49 #include "mmhub_v2_0.h"
50 #include "mmhub_v2_3.h"
51 #include "athub_v2_0.h"
52 #include "athub_v2_1.h"
53
gmc_v10_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)54 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
55 struct amdgpu_irq_src *src,
56 unsigned int type,
57 enum amdgpu_interrupt_state state)
58 {
59 return 0;
60 }
61
62 static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)63 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
64 struct amdgpu_irq_src *src, unsigned int type,
65 enum amdgpu_interrupt_state state)
66 {
67 switch (state) {
68 case AMDGPU_IRQ_STATE_DISABLE:
69 /* MM HUB */
70 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
71 /* GFX HUB */
72 /* This works because this interrupt is only
73 * enabled at init/resume and disabled in
74 * fini/suspend, so the overall state doesn't
75 * change over the course of suspend/resume.
76 */
77 if (!adev->in_s0ix)
78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
79 break;
80 case AMDGPU_IRQ_STATE_ENABLE:
81 /* MM HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
83 /* GFX HUB */
84 /* This works because this interrupt is only
85 * enabled at init/resume and disabled in
86 * fini/suspend, so the overall state doesn't
87 * change over the course of suspend/resume.
88 */
89 if (!adev->in_s0ix)
90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
91 break;
92 default:
93 break;
94 }
95
96 return 0;
97 }
98
gmc_v10_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)99 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
100 struct amdgpu_irq_src *source,
101 struct amdgpu_iv_entry *entry)
102 {
103 uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
104 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
106 bool retry_fault = !!(entry->src_data[1] & 0x80);
107 bool write_fault = !!(entry->src_data[1] & 0x20);
108 struct amdgpu_task_info *task_info;
109 uint32_t status = 0;
110 u64 addr;
111
112 addr = (u64)entry->src_data[0] << 12;
113 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
114
115 if (retry_fault) {
116 /* Returning 1 here also prevents sending the IV to the KFD */
117
118 /* Process it onyl if it's the first fault for this address */
119 if (entry->ih != &adev->irq.ih_soft &&
120 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
121 entry->timestamp))
122 return 1;
123
124 /* Delegate it to a different ring if the hardware hasn't
125 * already done it.
126 */
127 if (entry->ih == &adev->irq.ih) {
128 amdgpu_irq_delegate(adev, entry, 8);
129 return 1;
130 }
131
132 /* Try to handle the recoverable page faults by filling page
133 * tables
134 */
135 if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
136 entry->timestamp, write_fault))
137 return 1;
138 }
139
140 if (!amdgpu_sriov_vf(adev)) {
141 /*
142 * Issue a dummy read to wait for the status register to
143 * be updated to avoid reading an incorrect value due to
144 * the new fast GRBM interface.
145 */
146 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
147 (amdgpu_ip_version(adev, GC_HWIP, 0) <
148 IP_VERSION(10, 3, 0)))
149 RREG32(hub->vm_l2_pro_fault_status);
150
151 status = RREG32(hub->vm_l2_pro_fault_status);
152 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
153
154 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
155 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
156 }
157
158 if (!printk_ratelimit())
159 return 0;
160
161 dev_err(adev->dev,
162 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
163 entry->vmid_src ? "mmhub" : "gfxhub",
164 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
165 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
166 if (task_info) {
167 dev_err(adev->dev,
168 " in process %s pid %d thread %s pid %d\n",
169 task_info->process_name, task_info->tgid,
170 task_info->task_name, task_info->pid);
171 amdgpu_vm_put_task_info(task_info);
172 }
173
174 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
175 addr, entry->client_id,
176 soc15_ih_clientid_name[entry->client_id]);
177
178 /* Only print L2 fault status if the status register could be read and
179 * contains useful information
180 */
181 if (status != 0)
182 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
183 status);
184
185 return 0;
186 }
187
188 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
189 .set = gmc_v10_0_vm_fault_interrupt_state,
190 .process = gmc_v10_0_process_interrupt,
191 };
192
193 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
194 .set = gmc_v10_0_ecc_interrupt_state,
195 .process = amdgpu_umc_process_ecc_irq,
196 };
197
gmc_v10_0_set_irq_funcs(struct amdgpu_device * adev)198 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
199 {
200 adev->gmc.vm_fault.num_types = 1;
201 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
202
203 if (!amdgpu_sriov_vf(adev)) {
204 adev->gmc.ecc_irq.num_types = 1;
205 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
206 }
207 }
208
209 /**
210 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
211 *
212 * @adev: amdgpu_device pointer
213 * @vmhub: vmhub type
214 *
215 */
gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)216 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
217 uint32_t vmhub)
218 {
219 return ((vmhub == AMDGPU_MMHUB0(0)) &&
220 (!amdgpu_sriov_vf(adev)));
221 }
222
gmc_v10_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)223 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
224 struct amdgpu_device *adev,
225 uint8_t vmid, uint16_t *p_pasid)
226 {
227 uint32_t value;
228
229 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
230 + vmid);
231 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
232
233 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
234 }
235
236 /*
237 * GART
238 * VMID 0 is the physical GPU addresses as used by the kernel.
239 * VMIDs 1-15 are used for userspace clients and are handled
240 * by the amdgpu vm/hsa code.
241 */
242
243 /**
244 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
245 *
246 * @adev: amdgpu_device pointer
247 * @vmid: vm instance to flush
248 * @vmhub: vmhub type
249 * @flush_type: the flush type
250 *
251 * Flush the TLB for the requested page table.
252 */
gmc_v10_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)253 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
254 uint32_t vmhub, uint32_t flush_type)
255 {
256 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
257 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
258 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
259 /* Use register 17 for GART */
260 const unsigned int eng = 17;
261 unsigned char hub_ip = 0;
262 u32 sem, req, ack;
263 unsigned int i;
264 u32 tmp;
265
266 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
267 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
268 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
269
270 /* flush hdp cache */
271 amdgpu_device_flush_hdp(adev, NULL);
272
273 /* This is necessary for SRIOV as well as for GFXOFF to function
274 * properly under bare metal
275 */
276 if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
277 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
278 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
279 1 << vmid, GET_INST(GC, 0));
280 return;
281 }
282
283 /* This path is needed before KIQ/MES/GFXOFF are set up */
284 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
285
286 spin_lock(&adev->gmc.invalidate_lock);
287 /*
288 * It may lose gpuvm invalidate acknowldege state across power-gating
289 * off cycle, add semaphore acquire before invalidation and semaphore
290 * release after invalidation to avoid entering power gated state
291 * to WA the Issue
292 */
293
294 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
295 if (use_semaphore) {
296 for (i = 0; i < adev->usec_timeout; i++) {
297 /* a read return value of 1 means semaphore acuqire */
298 tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
299 if (tmp & 0x1)
300 break;
301 udelay(1);
302 }
303
304 if (i >= adev->usec_timeout)
305 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
306 }
307
308 WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
309
310 /*
311 * Issue a dummy read to wait for the ACK register to be cleared
312 * to avoid a false ACK due to the new fast GRBM interface.
313 */
314 if ((vmhub == AMDGPU_GFXHUB(0)) &&
315 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0)))
316 RREG32_RLC_NO_KIQ(req, hub_ip);
317
318 /* Wait for ACK with a delay.*/
319 for (i = 0; i < adev->usec_timeout; i++) {
320 tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
321 tmp &= 1 << vmid;
322 if (tmp)
323 break;
324
325 udelay(1);
326 }
327
328 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
329 if (use_semaphore)
330 WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
331
332 spin_unlock(&adev->gmc.invalidate_lock);
333
334 if (i >= adev->usec_timeout)
335 dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n",
336 vmhub);
337 }
338
339 /**
340 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
341 *
342 * @adev: amdgpu_device pointer
343 * @pasid: pasid to be flush
344 * @flush_type: the flush type
345 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
346 * @inst: is used to select which instance of KIQ to use for the invalidation
347 *
348 * Flush the TLB for the requested pasid.
349 */
gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)350 static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
351 uint16_t pasid, uint32_t flush_type,
352 bool all_hub, uint32_t inst)
353 {
354 uint16_t queried;
355 int vmid, i;
356
357 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
358 bool valid;
359
360 valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
361 &queried);
362 if (!valid || queried != pasid)
363 continue;
364
365 if (all_hub) {
366 for_each_set_bit(i, adev->vmhubs_mask,
367 AMDGPU_MAX_VMHUBS)
368 gmc_v10_0_flush_gpu_tlb(adev, vmid, i,
369 flush_type);
370 } else {
371 gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
372 flush_type);
373 }
374 }
375 }
376
gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)377 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
378 unsigned int vmid, uint64_t pd_addr)
379 {
380 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
381 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
382 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
383 unsigned int eng = ring->vm_inv_eng;
384
385 /*
386 * It may lose gpuvm invalidate acknowldege state across power-gating
387 * off cycle, add semaphore acquire before invalidation and semaphore
388 * release after invalidation to avoid entering power gated state
389 * to WA the Issue
390 */
391
392 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
393 if (use_semaphore)
394 /* a read return value of 1 means semaphore acuqire */
395 amdgpu_ring_emit_reg_wait(ring,
396 hub->vm_inv_eng0_sem +
397 hub->eng_distance * eng, 0x1, 0x1);
398
399 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
400 (hub->ctx_addr_distance * vmid),
401 lower_32_bits(pd_addr));
402
403 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
404 (hub->ctx_addr_distance * vmid),
405 upper_32_bits(pd_addr));
406
407 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
408 hub->eng_distance * eng,
409 hub->vm_inv_eng0_ack +
410 hub->eng_distance * eng,
411 req, 1 << vmid);
412
413 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
414 if (use_semaphore)
415 /*
416 * add semaphore release after invalidation,
417 * write with 0 means semaphore release
418 */
419 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
420 hub->eng_distance * eng, 0);
421
422 return pd_addr;
423 }
424
gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)425 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
426 unsigned int pasid)
427 {
428 struct amdgpu_device *adev = ring->adev;
429 uint32_t reg;
430
431 if (ring->vm_hub == AMDGPU_GFXHUB(0))
432 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
433 else
434 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
435
436 amdgpu_ring_emit_wreg(ring, reg, pasid);
437 }
438
439 /*
440 * PTE format on NAVI 10:
441 * 63:59 reserved
442 * 58 reserved and for sienna_cichlid is used for MALL noalloc
443 * 57 reserved
444 * 56 F
445 * 55 L
446 * 54 reserved
447 * 53:52 SW
448 * 51 T
449 * 50:48 mtype
450 * 47:12 4k physical page base address
451 * 11:7 fragment
452 * 6 write
453 * 5 read
454 * 4 exe
455 * 3 Z
456 * 2 snooped
457 * 1 system
458 * 0 valid
459 *
460 * PDE format on NAVI 10:
461 * 63:59 block fragment size
462 * 58:55 reserved
463 * 54 P
464 * 53:48 reserved
465 * 47:6 physical base address of PD or PTE
466 * 5:3 reserved
467 * 2 C
468 * 1 system
469 * 0 valid
470 */
471
gmc_v10_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)472 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
473 {
474 switch (flags) {
475 case AMDGPU_VM_MTYPE_DEFAULT:
476 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
477 case AMDGPU_VM_MTYPE_NC:
478 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
479 case AMDGPU_VM_MTYPE_WC:
480 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
481 case AMDGPU_VM_MTYPE_CC:
482 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
483 case AMDGPU_VM_MTYPE_UC:
484 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
485 default:
486 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
487 }
488 }
489
gmc_v10_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)490 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
491 uint64_t *addr, uint64_t *flags)
492 {
493 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
494 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
495 BUG_ON(*addr & 0xFFFF00000000003FULL);
496
497 if (!adev->gmc.translate_further)
498 return;
499
500 if (level == AMDGPU_VM_PDB1) {
501 /* Set the block fragment size */
502 if (!(*flags & AMDGPU_PDE_PTE))
503 *flags |= AMDGPU_PDE_BFS(0x9);
504
505 } else if (level == AMDGPU_VM_PDB0) {
506 if (*flags & AMDGPU_PDE_PTE)
507 *flags &= ~AMDGPU_PDE_PTE;
508 else
509 *flags |= AMDGPU_PTE_TF;
510 }
511 }
512
gmc_v10_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)513 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
514 struct amdgpu_bo_va_mapping *mapping,
515 uint64_t *flags)
516 {
517 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
518
519 *flags &= ~AMDGPU_PTE_EXECUTABLE;
520 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
521
522 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
523 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
524
525 *flags &= ~AMDGPU_PTE_NOALLOC;
526 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
527
528 if (mapping->flags & AMDGPU_PTE_PRT) {
529 *flags |= AMDGPU_PTE_PRT;
530 *flags |= AMDGPU_PTE_SNOOPED;
531 *flags |= AMDGPU_PTE_LOG;
532 *flags |= AMDGPU_PTE_SYSTEM;
533 *flags &= ~AMDGPU_PTE_VALID;
534 }
535
536 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
537 AMDGPU_GEM_CREATE_EXT_COHERENT |
538 AMDGPU_GEM_CREATE_UNCACHED))
539 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
540 }
541
gmc_v10_0_get_vbios_fb_size(struct amdgpu_device * adev)542 static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
543 {
544 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
545 unsigned int size;
546
547 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
548 size = AMDGPU_VBIOS_VGA_ALLOCATION;
549 } else {
550 u32 viewport;
551 u32 pitch;
552
553 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
554 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
555 size = (REG_GET_FIELD(viewport,
556 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
557 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
558 4);
559 }
560
561 return size;
562 }
563
564 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
565 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
566 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
567 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
568 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
569 .map_mtype = gmc_v10_0_map_mtype,
570 .get_vm_pde = gmc_v10_0_get_vm_pde,
571 .get_vm_pte = gmc_v10_0_get_vm_pte,
572 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
573 };
574
gmc_v10_0_set_gmc_funcs(struct amdgpu_device * adev)575 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
576 {
577 if (adev->gmc.gmc_funcs == NULL)
578 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
579 }
580
gmc_v10_0_set_umc_funcs(struct amdgpu_device * adev)581 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
582 {
583 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
584 case IP_VERSION(8, 7, 0):
585 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
586 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
587 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
588 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
589 adev->umc.retire_unit = 1;
590 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
591 adev->umc.ras = &umc_v8_7_ras;
592 break;
593 default:
594 break;
595 }
596 }
597
gmc_v10_0_set_mmhub_funcs(struct amdgpu_device * adev)598 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
599 {
600 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
601 case IP_VERSION(2, 3, 0):
602 case IP_VERSION(2, 4, 0):
603 case IP_VERSION(2, 4, 1):
604 adev->mmhub.funcs = &mmhub_v2_3_funcs;
605 break;
606 default:
607 adev->mmhub.funcs = &mmhub_v2_0_funcs;
608 break;
609 }
610 }
611
gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device * adev)612 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
613 {
614 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
615 case IP_VERSION(10, 3, 0):
616 case IP_VERSION(10, 3, 2):
617 case IP_VERSION(10, 3, 1):
618 case IP_VERSION(10, 3, 4):
619 case IP_VERSION(10, 3, 5):
620 case IP_VERSION(10, 3, 6):
621 case IP_VERSION(10, 3, 3):
622 case IP_VERSION(10, 3, 7):
623 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
624 break;
625 default:
626 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
627 break;
628 }
629 }
630
631
gmc_v10_0_early_init(struct amdgpu_ip_block * ip_block)632 static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block)
633 {
634 struct amdgpu_device *adev = ip_block->adev;
635
636 gmc_v10_0_set_mmhub_funcs(adev);
637 gmc_v10_0_set_gfxhub_funcs(adev);
638 gmc_v10_0_set_gmc_funcs(adev);
639 gmc_v10_0_set_irq_funcs(adev);
640 gmc_v10_0_set_umc_funcs(adev);
641
642 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
643 adev->gmc.shared_aperture_end =
644 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
645 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
646 adev->gmc.private_aperture_end =
647 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
648 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
649
650 return 0;
651 }
652
gmc_v10_0_late_init(struct amdgpu_ip_block * ip_block)653 static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block)
654 {
655 struct amdgpu_device *adev = ip_block->adev;
656 int r;
657
658 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
659 if (r)
660 return r;
661
662 r = amdgpu_gmc_ras_late_init(adev);
663 if (r)
664 return r;
665
666 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
667 }
668
gmc_v10_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)669 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
670 struct amdgpu_gmc *mc)
671 {
672 u64 base = 0;
673
674 base = adev->gfxhub.funcs->get_fb_location(adev);
675
676 /* add the xgmi offset of the physical node */
677 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
678
679 amdgpu_gmc_set_agp_default(adev, mc);
680 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
681 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
682 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
683 amdgpu_gmc_agp_location(adev, mc);
684
685 /* base offset of vram pages */
686 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
687
688 /* add the xgmi offset of the physical node */
689 adev->vm_manager.vram_base_offset +=
690 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
691 }
692
693 /**
694 * gmc_v10_0_mc_init - initialize the memory controller driver params
695 *
696 * @adev: amdgpu_device pointer
697 *
698 * Look up the amount of vram, vram width, and decide how to place
699 * vram and gart within the GPU's physical address space.
700 * Returns 0 for success.
701 */
gmc_v10_0_mc_init(struct amdgpu_device * adev)702 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
703 {
704 int r;
705
706 /* size in MB on si */
707 adev->gmc.mc_vram_size =
708 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
709 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
710
711 if (!(adev->flags & AMD_IS_APU)) {
712 r = amdgpu_device_resize_fb_bar(adev);
713 if (r)
714 return r;
715 }
716 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
717 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
718
719 #ifdef CONFIG_X86_64
720 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
721 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
722 adev->gmc.aper_size = adev->gmc.real_vram_size;
723 }
724 #endif
725
726 adev->gmc.visible_vram_size = adev->gmc.aper_size;
727
728 /* set the gart size */
729 if (amdgpu_gart_size == -1) {
730 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
731 default:
732 adev->gmc.gart_size = 512ULL << 20;
733 break;
734 case IP_VERSION(10, 3, 1): /* DCE SG support */
735 case IP_VERSION(10, 3, 3): /* DCE SG support */
736 case IP_VERSION(10, 3, 6): /* DCE SG support */
737 case IP_VERSION(10, 3, 7): /* DCE SG support */
738 adev->gmc.gart_size = 1024ULL << 20;
739 break;
740 }
741 } else {
742 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
743 }
744
745 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
746
747 return 0;
748 }
749
gmc_v10_0_gart_init(struct amdgpu_device * adev)750 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
751 {
752 int r;
753
754 if (adev->gart.bo) {
755 WARN(1, "NAVI10 PCIE GART already initialized\n");
756 return 0;
757 }
758
759 /* Initialize common gart structure */
760 r = amdgpu_gart_init(adev);
761 if (r)
762 return r;
763
764 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
765 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) |
766 AMDGPU_PTE_EXECUTABLE;
767
768 return amdgpu_gart_table_vram_alloc(adev);
769 }
770
gmc_v10_0_sw_init(struct amdgpu_ip_block * ip_block)771 static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
772 {
773 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
774 struct amdgpu_device *adev = ip_block->adev;
775
776 adev->gfxhub.funcs->init(adev);
777
778 adev->mmhub.funcs->init(adev);
779
780 spin_lock_init(&adev->gmc.invalidate_lock);
781
782 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
783 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
784 adev->gmc.vram_width = 64;
785 } else if (amdgpu_emu_mode == 1) {
786 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
787 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
788 } else {
789 r = amdgpu_atomfirmware_get_vram_info(adev,
790 &vram_width, &vram_type, &vram_vendor);
791 adev->gmc.vram_width = vram_width;
792
793 adev->gmc.vram_type = vram_type;
794 adev->gmc.vram_vendor = vram_vendor;
795 }
796
797 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
798 case IP_VERSION(10, 3, 0):
799 adev->gmc.mall_size = 128 * 1024 * 1024;
800 break;
801 case IP_VERSION(10, 3, 2):
802 adev->gmc.mall_size = 96 * 1024 * 1024;
803 break;
804 case IP_VERSION(10, 3, 4):
805 adev->gmc.mall_size = 32 * 1024 * 1024;
806 break;
807 case IP_VERSION(10, 3, 5):
808 adev->gmc.mall_size = 16 * 1024 * 1024;
809 break;
810 default:
811 adev->gmc.mall_size = 0;
812 break;
813 }
814
815 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
816 case IP_VERSION(10, 1, 10):
817 case IP_VERSION(10, 1, 1):
818 case IP_VERSION(10, 1, 2):
819 case IP_VERSION(10, 1, 3):
820 case IP_VERSION(10, 1, 4):
821 case IP_VERSION(10, 3, 0):
822 case IP_VERSION(10, 3, 2):
823 case IP_VERSION(10, 3, 1):
824 case IP_VERSION(10, 3, 4):
825 case IP_VERSION(10, 3, 5):
826 case IP_VERSION(10, 3, 6):
827 case IP_VERSION(10, 3, 3):
828 case IP_VERSION(10, 3, 7):
829 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
830 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
831 /*
832 * To fulfill 4-level page support,
833 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
834 * block size 512 (9bit)
835 */
836 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
837 break;
838 default:
839 break;
840 }
841
842 /* This interrupt is VMC page fault.*/
843 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
844 VMC_1_0__SRCID__VM_FAULT,
845 &adev->gmc.vm_fault);
846
847 if (r)
848 return r;
849
850 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
851 UTCL2_1_0__SRCID__FAULT,
852 &adev->gmc.vm_fault);
853 if (r)
854 return r;
855
856 if (!amdgpu_sriov_vf(adev)) {
857 /* interrupt sent to DF. */
858 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
859 &adev->gmc.ecc_irq);
860 if (r)
861 return r;
862 }
863
864 /*
865 * Set the internal MC address mask This is the max address of the GPU's
866 * internal address space.
867 */
868 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
869
870 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
871 if (r) {
872 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
873 return r;
874 }
875
876 adev->need_swiotlb = drm_need_swiotlb(44);
877
878 r = gmc_v10_0_mc_init(adev);
879 if (r)
880 return r;
881
882 amdgpu_gmc_get_vbios_allocations(adev);
883
884 /* Memory manager */
885 r = amdgpu_bo_init(adev);
886 if (r)
887 return r;
888
889 r = gmc_v10_0_gart_init(adev);
890 if (r)
891 return r;
892
893 /*
894 * number of VMs
895 * VMID 0 is reserved for System
896 * amdgpu graphics/compute will use VMIDs 1-7
897 * amdkfd will use VMIDs 8-15
898 */
899 adev->vm_manager.first_kfd_vmid = 8;
900
901 amdgpu_vm_manager_init(adev);
902
903 r = amdgpu_gmc_ras_sw_init(adev);
904 if (r)
905 return r;
906
907 return 0;
908 }
909
910 /**
911 * gmc_v10_0_gart_fini - vm fini callback
912 *
913 * @adev: amdgpu_device pointer
914 *
915 * Tears down the driver GART/VM setup (CIK).
916 */
gmc_v10_0_gart_fini(struct amdgpu_device * adev)917 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
918 {
919 amdgpu_gart_table_vram_free(adev);
920 }
921
gmc_v10_0_sw_fini(struct amdgpu_ip_block * ip_block)922 static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
923 {
924 struct amdgpu_device *adev = ip_block->adev;
925
926 amdgpu_vm_manager_fini(adev);
927 gmc_v10_0_gart_fini(adev);
928 amdgpu_gem_force_release(adev);
929 amdgpu_bo_fini(adev);
930
931 return 0;
932 }
933
gmc_v10_0_init_golden_registers(struct amdgpu_device * adev)934 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
935 {
936 }
937
938 /**
939 * gmc_v10_0_gart_enable - gart enable
940 *
941 * @adev: amdgpu_device pointer
942 */
gmc_v10_0_gart_enable(struct amdgpu_device * adev)943 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
944 {
945 int r;
946 bool value;
947
948 if (adev->gart.bo == NULL) {
949 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
950 return -EINVAL;
951 }
952
953 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
954
955 if (!adev->in_s0ix) {
956 r = adev->gfxhub.funcs->gart_enable(adev);
957 if (r)
958 return r;
959 }
960
961 r = adev->mmhub.funcs->gart_enable(adev);
962 if (r)
963 return r;
964
965 adev->hdp.funcs->init_registers(adev);
966
967 /* Flush HDP after it is initialized */
968 amdgpu_device_flush_hdp(adev, NULL);
969
970 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
971 false : true;
972
973 if (!adev->in_s0ix)
974 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
975 adev->mmhub.funcs->set_fault_enable_default(adev, value);
976 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
977 if (!adev->in_s0ix)
978 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
979
980 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
981 (unsigned int)(adev->gmc.gart_size >> 20),
982 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
983
984 return 0;
985 }
986
gmc_v10_0_hw_init(struct amdgpu_ip_block * ip_block)987 static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
988 {
989 struct amdgpu_device *adev = ip_block->adev;
990 int r;
991
992 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
993
994 /* The sequence of these two function calls matters.*/
995 gmc_v10_0_init_golden_registers(adev);
996
997 /*
998 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
999 * register setup within GMC, or else system hang when harvesting SA.
1000 */
1001 if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1002 adev->gfxhub.funcs->utcl2_harvest(adev);
1003
1004 r = gmc_v10_0_gart_enable(adev);
1005 if (r)
1006 return r;
1007
1008 if (amdgpu_emu_mode == 1) {
1009 r = amdgpu_gmc_vram_checking(adev);
1010 if (r)
1011 return r;
1012 }
1013
1014 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1015 adev->umc.funcs->init_registers(adev);
1016
1017 return 0;
1018 }
1019
1020 /**
1021 * gmc_v10_0_gart_disable - gart disable
1022 *
1023 * @adev: amdgpu_device pointer
1024 *
1025 * This disables all VM page table.
1026 */
gmc_v10_0_gart_disable(struct amdgpu_device * adev)1027 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1028 {
1029 if (!adev->in_s0ix)
1030 adev->gfxhub.funcs->gart_disable(adev);
1031 adev->mmhub.funcs->gart_disable(adev);
1032 }
1033
gmc_v10_0_hw_fini(struct amdgpu_ip_block * ip_block)1034 static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
1035 {
1036 struct amdgpu_device *adev = ip_block->adev;
1037
1038 gmc_v10_0_gart_disable(adev);
1039
1040 if (amdgpu_sriov_vf(adev)) {
1041 /* full access mode, so don't touch any GMC register */
1042 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1043 return 0;
1044 }
1045
1046 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1047
1048 if (adev->gmc.ecc_irq.funcs &&
1049 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
1050 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1051
1052 return 0;
1053 }
1054
gmc_v10_0_suspend(struct amdgpu_ip_block * ip_block)1055 static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block)
1056 {
1057 gmc_v10_0_hw_fini(ip_block);
1058
1059 return 0;
1060 }
1061
gmc_v10_0_resume(struct amdgpu_ip_block * ip_block)1062 static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block)
1063 {
1064 int r;
1065
1066 r = gmc_v10_0_hw_init(ip_block);
1067 if (r)
1068 return r;
1069
1070 amdgpu_vmid_reset_all(ip_block->adev);
1071
1072 return 0;
1073 }
1074
gmc_v10_0_is_idle(struct amdgpu_ip_block * ip_block)1075 static bool gmc_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
1076 {
1077 /* MC is always ready in GMC v10.*/
1078 return true;
1079 }
1080
gmc_v10_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1081 static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1082 {
1083 /* There is no need to wait for MC idle in GMC v10.*/
1084 return 0;
1085 }
1086
gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1087 static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1088 enum amd_clockgating_state state)
1089 {
1090 int r;
1091 struct amdgpu_device *adev = ip_block->adev;
1092
1093 /*
1094 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1095 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1096 * seen any issue on the DF 3.0.2 series platform.
1097 */
1098 if (adev->in_s0ix &&
1099 amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) {
1100 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1101 return 0;
1102 }
1103
1104 r = adev->mmhub.funcs->set_clockgating(adev, state);
1105 if (r)
1106 return r;
1107
1108 if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1109 return athub_v2_1_set_clockgating(adev, state);
1110 else
1111 return athub_v2_0_set_clockgating(adev, state);
1112 }
1113
gmc_v10_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)1114 static void gmc_v10_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1115 {
1116 struct amdgpu_device *adev = ip_block->adev;
1117
1118 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) ||
1119 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4))
1120 return;
1121
1122 adev->mmhub.funcs->get_clockgating(adev, flags);
1123
1124 if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0))
1125 athub_v2_1_get_clockgating(adev, flags);
1126 else
1127 athub_v2_0_get_clockgating(adev, flags);
1128 }
1129
gmc_v10_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1130 static int gmc_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1131 enum amd_powergating_state state)
1132 {
1133 return 0;
1134 }
1135
1136 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1137 .name = "gmc_v10_0",
1138 .early_init = gmc_v10_0_early_init,
1139 .late_init = gmc_v10_0_late_init,
1140 .sw_init = gmc_v10_0_sw_init,
1141 .sw_fini = gmc_v10_0_sw_fini,
1142 .hw_init = gmc_v10_0_hw_init,
1143 .hw_fini = gmc_v10_0_hw_fini,
1144 .suspend = gmc_v10_0_suspend,
1145 .resume = gmc_v10_0_resume,
1146 .is_idle = gmc_v10_0_is_idle,
1147 .wait_for_idle = gmc_v10_0_wait_for_idle,
1148 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1149 .set_powergating_state = gmc_v10_0_set_powergating_state,
1150 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1151 };
1152
1153 const struct amdgpu_ip_block_version gmc_v10_0_ip_block = {
1154 .type = AMD_IP_BLOCK_TYPE_GMC,
1155 .major = 10,
1156 .minor = 0,
1157 .rev = 0,
1158 .funcs = &gmc_v10_0_ip_funcs,
1159 };
1160