xref: /linux/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_ih.h"
28 
29 #include "oss/osssys_6_0_0_offset.h"
30 #include "oss/osssys_6_0_0_sh_mask.h"
31 
32 #include "soc15_common.h"
33 #include "ih_v6_0.h"
34 
35 #define MAX_REARM_RETRY 10
36 
37 static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev);
38 
39 /**
40  * ih_v6_0_init_register_offset - Initialize register offset for ih rings
41  *
42  * @adev: amdgpu_device pointer
43  *
44  * Initialize register offset ih rings (IH_V6_0).
45  */
46 static void ih_v6_0_init_register_offset(struct amdgpu_device *adev)
47 {
48 	struct amdgpu_ih_regs *ih_regs;
49 
50 	/* ih ring 2 is removed
51 	 * ih ring and ih ring 1 are available */
52 	if (adev->irq.ih.ring_size) {
53 		ih_regs = &adev->irq.ih.ih_regs;
54 		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
55 		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
56 		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
57 		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
58 		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
59 		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
60 		ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
61 		ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
62 		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
63 	}
64 
65 	if (adev->irq.ih1.ring_size) {
66 		ih_regs = &adev->irq.ih1.ih_regs;
67 		ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
68 		ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
69 		ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
70 		ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
71 		ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
72 		ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
73 		ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
74 	}
75 }
76 
77 /**
78  * force_update_wptr_for_self_int - Force update the wptr for self interrupt
79  *
80  * @adev: amdgpu_device pointer
81  * @threshold: threshold to trigger the wptr reporting
82  * @timeout: timeout to trigger the wptr reporting
83  * @enabled: Enable/disable timeout flush mechanism
84  *
85  * threshold input range: 0 ~ 15, default 0,
86  * real_threshold = 2^threshold
87  * timeout input range: 0 ~ 20, default 8,
88  * real_timeout = (2^timeout) * 1024 / (socclk_freq)
89  *
90  * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
91  */
92 static void
93 force_update_wptr_for_self_int(struct amdgpu_device *adev,
94 			       u32 threshold, u32 timeout, bool enabled)
95 {
96 	u32 ih_cntl, ih_rb_cntl;
97 
98 	ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
99 	ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
100 
101 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
102 				SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
103 	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
104 				SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
105 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
106 				   RB_USED_INT_THRESHOLD, threshold);
107 
108 	if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
109 		if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
110 			return;
111 	} else {
112 		WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
113 	}
114 
115 	WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
116 }
117 
118 /**
119  * ih_v6_0_toggle_ring_interrupts - toggle the interrupt ring buffer
120  *
121  * @adev: amdgpu_device pointer
122  * @ih: amdgpu_ih_ring pointer
123  * @enable: true - enable the interrupts, false - disable the interrupts
124  *
125  * Toggle the interrupt ring buffer (IH_V6_0)
126  */
127 static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
128 					  struct amdgpu_ih_ring *ih,
129 					  bool enable)
130 {
131 	struct amdgpu_ih_regs *ih_regs;
132 	uint32_t tmp;
133 
134 	ih_regs = &ih->ih_regs;
135 
136 	tmp = RREG32(ih_regs->ih_rb_cntl);
137 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
138 
139 	if (enable) {
140 		/* Unset the CLEAR_OVERFLOW bit to make sure the next step
141 		 * is switching the bit from 0 to 1
142 		 */
143 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
144 		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
145 			if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
146 				return -ETIMEDOUT;
147 		} else {
148 			WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
149 		}
150 
151 		/* Clear RB_OVERFLOW bit */
152 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
153 		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
154 			if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
155 				return -ETIMEDOUT;
156 		} else {
157 			WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
158 		}
159 
160 		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
161 		 * can be detected.
162 		 */
163 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
164 	}
165 
166 	/* enable_intr field is only valid in ring0 */
167 	if (ih == &adev->irq.ih)
168 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
169 
170 	if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
171 		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
172 			return -ETIMEDOUT;
173 	} else {
174 		WREG32(ih_regs->ih_rb_cntl, tmp);
175 	}
176 
177 	if (enable) {
178 		ih->enabled = true;
179 	} else {
180 		/* set rptr, wptr to 0 */
181 		WREG32(ih_regs->ih_rb_rptr, 0);
182 		WREG32(ih_regs->ih_rb_wptr, 0);
183 		ih->enabled = false;
184 		ih->rptr = 0;
185 	}
186 
187 	return 0;
188 }
189 
190 /**
191  * ih_v6_0_toggle_interrupts - Toggle all the available interrupt ring buffers
192  *
193  * @adev: amdgpu_device pointer
194  * @enable: enable or disable interrupt ring buffers
195  *
196  * Toggle all the available interrupt ring buffers (IH_V6_0).
197  */
198 static int ih_v6_0_toggle_interrupts(struct amdgpu_device *adev, bool enable)
199 {
200 	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
201 	int i;
202 	int r;
203 
204 	for (i = 0; i < ARRAY_SIZE(ih); i++) {
205 		if (ih[i]->ring_size) {
206 			r = ih_v6_0_toggle_ring_interrupts(adev, ih[i], enable);
207 			if (r)
208 				return r;
209 		}
210 	}
211 
212 	return 0;
213 }
214 
215 static uint32_t ih_v6_0_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
216 {
217 	int rb_bufsz = order_base_2(ih->ring_size / 4);
218 
219 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
220 				   MC_SPACE, ih->use_bus_addr ? 2 : 4);
221 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
222 				   WPTR_OVERFLOW_CLEAR, 1);
223 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
224 				   WPTR_OVERFLOW_ENABLE, 1);
225 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
226 	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
227 	 * value is written to memory
228 	 */
229 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
230 				   WPTR_WRITEBACK_ENABLE, 1);
231 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
232 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
233 	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
234 
235 	return ih_rb_cntl;
236 }
237 
238 static uint32_t ih_v6_0_doorbell_rptr(struct amdgpu_ih_ring *ih)
239 {
240 	u32 ih_doorbell_rtpr = 0;
241 
242 	if (ih->use_doorbell) {
243 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
244 						 IH_DOORBELL_RPTR, OFFSET,
245 						 ih->doorbell_index);
246 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
247 						 IH_DOORBELL_RPTR,
248 						 ENABLE, 1);
249 	} else {
250 		ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
251 						 IH_DOORBELL_RPTR,
252 						 ENABLE, 0);
253 	}
254 	return ih_doorbell_rtpr;
255 }
256 
257 /**
258  * ih_v6_0_enable_ring - enable an ih ring buffer
259  *
260  * @adev: amdgpu_device pointer
261  * @ih: amdgpu_ih_ring pointer
262  *
263  * Enable an ih ring buffer (IH_V6_0)
264  */
265 static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
266 				      struct amdgpu_ih_ring *ih)
267 {
268 	struct amdgpu_ih_regs *ih_regs;
269 	uint32_t tmp;
270 
271 	ih_regs = &ih->ih_regs;
272 
273 	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
274 	WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
275 	WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
276 
277 	tmp = RREG32(ih_regs->ih_rb_cntl);
278 	tmp = ih_v6_0_rb_cntl(ih, tmp);
279 	if (ih == &adev->irq.ih)
280 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
281 	if (ih == &adev->irq.ih1) {
282 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
283 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
284 	}
285 
286 	if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
287 		if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
288 			DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
289 			return -ETIMEDOUT;
290 		}
291 	} else {
292 		WREG32(ih_regs->ih_rb_cntl, tmp);
293 	}
294 
295 	if (ih == &adev->irq.ih) {
296 		/* set the ih ring 0 writeback address whether it's enabled or not */
297 		WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
298 		WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
299 	}
300 
301 	/* set rptr, wptr to 0 */
302 	WREG32(ih_regs->ih_rb_wptr, 0);
303 	WREG32(ih_regs->ih_rb_rptr, 0);
304 
305 	WREG32(ih_regs->ih_doorbell_rptr, ih_v6_0_doorbell_rptr(ih));
306 
307 	return 0;
308 }
309 
310 /**
311  * ih_v6_0_irq_init - init and enable the interrupt ring
312  *
313  * @adev: amdgpu_device pointer
314  *
315  * Allocate a ring buffer for the interrupt controller,
316  * enable the RLC, disable interrupts, enable the IH
317  * ring buffer and enable it.
318  * Called at device load and reume.
319  * Returns 0 for success, errors for failure.
320  */
321 static int ih_v6_0_irq_init(struct amdgpu_device *adev)
322 {
323 	struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
324 	u32 ih_chicken;
325 	u32 tmp;
326 	int ret;
327 	int i;
328 
329 	/* disable irqs */
330 	ret = ih_v6_0_toggle_interrupts(adev, false);
331 	if (ret)
332 		return ret;
333 
334 	adev->nbio.funcs->ih_control(adev);
335 
336 	if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
337 		     (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
338 		if (ih[0]->use_bus_addr) {
339 			ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
340 			ih_chicken = REG_SET_FIELD(ih_chicken,
341 					IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
342 			WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
343 		}
344 	}
345 
346 	for (i = 0; i < ARRAY_SIZE(ih); i++) {
347 		if (ih[i]->ring_size) {
348 			ret = ih_v6_0_enable_ring(adev, ih[i]);
349 			if (ret)
350 				return ret;
351 		}
352 	}
353 
354 	/* update doorbell range for ih ring 0 */
355 	adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
356 					    ih[0]->doorbell_index);
357 
358 	tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
359 	tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
360 			    CLIENT18_IS_STORM_CLIENT, 1);
361 	WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
362 
363 	tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
364 	tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
365 	WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
366 
367 	/* GC/MMHUB UTCL2 page fault interrupts are configured as
368 	 * MSI storm capable interrupts by deafult. The delay is
369 	 * used to avoid ISR being called too frequently
370 	 * when page fault happens on several continuous page
371 	 * and thus avoid MSI storm */
372 	tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
373 	tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
374 			    DELAY, 3);
375 	WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
376 
377 	/* Redirect the interrupts to IH RB1 for dGPU */
378 	if (adev->irq.ih1.ring_size) {
379 		tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
380 		tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
381 		WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
382 
383 		tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
384 		tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
385 		tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
386 		tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
387 				    SOURCE_ID_MATCH_ENABLE, 0x1);
388 
389 		WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
390 	}
391 
392 	pci_set_master(adev->pdev);
393 
394 	/* enable interrupts */
395 	ret = ih_v6_0_toggle_interrupts(adev, true);
396 	if (ret)
397 		return ret;
398 	/* enable wptr force update for self int */
399 	force_update_wptr_for_self_int(adev, 0, 8, true);
400 
401 	if (adev->irq.ih_soft.ring_size)
402 		adev->irq.ih_soft.enabled = true;
403 
404 	return 0;
405 }
406 
407 /**
408  * ih_v6_0_irq_disable - disable interrupts
409  *
410  * @adev: amdgpu_device pointer
411  *
412  * Disable interrupts on the hw.
413  */
414 static void ih_v6_0_irq_disable(struct amdgpu_device *adev)
415 {
416 	force_update_wptr_for_self_int(adev, 0, 8, false);
417 	ih_v6_0_toggle_interrupts(adev, false);
418 
419 	/* Wait and acknowledge irq */
420 	mdelay(1);
421 }
422 
423 /**
424  * ih_v6_0_get_wptr - get the IH ring buffer wptr
425  *
426  * @adev: amdgpu_device pointer
427  * @ih: amdgpu_ih_ring pointer
428  *
429  * Get the IH ring buffer wptr from either the register
430  * or the writeback memory buffer.  Also check for
431  * ring buffer overflow and deal with it.
432  * Returns the value of the wptr.
433  */
434 static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
435 			      struct amdgpu_ih_ring *ih)
436 {
437 	u32 wptr, tmp;
438 	struct amdgpu_ih_regs *ih_regs;
439 
440 	wptr = le32_to_cpu(*ih->wptr_cpu);
441 	ih_regs = &ih->ih_regs;
442 
443 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
444 		goto out;
445 
446 	wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
447 	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
448 		goto out;
449 	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
450 
451 	/* When a ring buffer overflow happen start parsing interrupt
452 	 * from the last not overwritten vector (wptr + 32). Hopefully
453 	 * this should allow us to catch up.
454 	 */
455 	tmp = (wptr + 32) & ih->ptr_mask;
456 	dev_warn(adev->dev, "IH ring buffer overflow "
457 		 "(0x%08X, 0x%08X, 0x%08X)\n",
458 		 wptr, ih->rptr, tmp);
459 	ih->rptr = tmp;
460 
461 	tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
462 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
463 	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
464 
465 	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
466 	 * can be detected.
467 	 */
468 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
469 	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
470 out:
471 	return (wptr & ih->ptr_mask);
472 }
473 
474 /**
475  * ih_v6_0_irq_rearm - rearm IRQ if lost
476  *
477  * @adev: amdgpu_device pointer
478  * @ih: amdgpu_ih_ring pointer
479  *
480  */
481 static void ih_v6_0_irq_rearm(struct amdgpu_device *adev,
482 			       struct amdgpu_ih_ring *ih)
483 {
484 	uint32_t v = 0;
485 	uint32_t i = 0;
486 	struct amdgpu_ih_regs *ih_regs;
487 
488 	ih_regs = &ih->ih_regs;
489 
490 	/* Rearm IRQ / re-write doorbell if doorbell write is lost */
491 	for (i = 0; i < MAX_REARM_RETRY; i++) {
492 		v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
493 		if ((v < ih->ring_size) && (v != ih->rptr))
494 			WDOORBELL32(ih->doorbell_index, ih->rptr);
495 		else
496 			break;
497 	}
498 }
499 
500 /**
501  * ih_v6_0_set_rptr - set the IH ring buffer rptr
502  *
503  * @adev: amdgpu_device pointer
504  * @ih: amdgpu_ih_ring pointer
505  *
506  * Set the IH ring buffer rptr.
507  */
508 static void ih_v6_0_set_rptr(struct amdgpu_device *adev,
509 			       struct amdgpu_ih_ring *ih)
510 {
511 	struct amdgpu_ih_regs *ih_regs;
512 
513 	if (ih->use_doorbell) {
514 		/* XXX check if swapping is necessary on BE */
515 		*ih->rptr_cpu = ih->rptr;
516 		WDOORBELL32(ih->doorbell_index, ih->rptr);
517 
518 		if (amdgpu_sriov_vf(adev))
519 			ih_v6_0_irq_rearm(adev, ih);
520 	} else {
521 		ih_regs = &ih->ih_regs;
522 		WREG32(ih_regs->ih_rb_rptr, ih->rptr);
523 	}
524 }
525 
526 /**
527  * ih_v6_0_self_irq - dispatch work for ring 1
528  *
529  * @adev: amdgpu_device pointer
530  * @source: irq source
531  * @entry: IV with WPTR update
532  *
533  * Update the WPTR from the IV and schedule work to handle the entries.
534  */
535 static int ih_v6_0_self_irq(struct amdgpu_device *adev,
536 			      struct amdgpu_irq_src *source,
537 			      struct amdgpu_iv_entry *entry)
538 {
539 	uint32_t wptr = cpu_to_le32(entry->src_data[0]);
540 
541 	switch (entry->ring_id) {
542 	case 1:
543 		*adev->irq.ih1.wptr_cpu = wptr;
544 		schedule_work(&adev->irq.ih1_work);
545 		break;
546 	default:
547 		break;
548 	}
549 	return 0;
550 }
551 
552 static const struct amdgpu_irq_src_funcs ih_v6_0_self_irq_funcs = {
553 	.process = ih_v6_0_self_irq,
554 };
555 
556 static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev)
557 {
558 	adev->irq.self_irq.num_types = 0;
559 	adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs;
560 }
561 
562 static int ih_v6_0_early_init(void *handle)
563 {
564 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565 
566 	ih_v6_0_set_interrupt_funcs(adev);
567 	ih_v6_0_set_self_irq_funcs(adev);
568 	return 0;
569 }
570 
571 static int ih_v6_0_sw_init(void *handle)
572 {
573 	int r;
574 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
575 	bool use_bus_addr;
576 
577 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
578 			      &adev->irq.self_irq);
579 
580 	if (r)
581 		return r;
582 
583 	/* use gpu virtual address for ih ring
584 	 * until ih_checken is programmed to allow
585 	 * use bus address for ih ring by psp bl */
586 	use_bus_addr =
587 		(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
588 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
589 	if (r)
590 		return r;
591 
592 	adev->irq.ih.use_doorbell = true;
593 	adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
594 
595 	if (!(adev->flags & AMD_IS_APU)) {
596 		r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE,
597 					use_bus_addr);
598 		if (r)
599 			return r;
600 
601 		adev->irq.ih1.use_doorbell = true;
602 		adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
603 	}
604 
605 	/* initialize ih control register offset */
606 	ih_v6_0_init_register_offset(adev);
607 
608 	r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
609 	if (r)
610 		return r;
611 
612 	r = amdgpu_irq_init(adev);
613 
614 	return r;
615 }
616 
617 static int ih_v6_0_sw_fini(void *handle)
618 {
619 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
620 
621 	amdgpu_irq_fini_sw(adev);
622 
623 	return 0;
624 }
625 
626 static int ih_v6_0_hw_init(void *handle)
627 {
628 	int r;
629 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
630 
631 	r = ih_v6_0_irq_init(adev);
632 	if (r)
633 		return r;
634 
635 	return 0;
636 }
637 
638 static int ih_v6_0_hw_fini(void *handle)
639 {
640 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
641 
642 	ih_v6_0_irq_disable(adev);
643 
644 	return 0;
645 }
646 
647 static int ih_v6_0_suspend(void *handle)
648 {
649 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
650 
651 	return ih_v6_0_hw_fini(adev);
652 }
653 
654 static int ih_v6_0_resume(void *handle)
655 {
656 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
657 
658 	return ih_v6_0_hw_init(adev);
659 }
660 
661 static bool ih_v6_0_is_idle(void *handle)
662 {
663 	/* todo */
664 	return true;
665 }
666 
667 static int ih_v6_0_wait_for_idle(void *handle)
668 {
669 	/* todo */
670 	return -ETIMEDOUT;
671 }
672 
673 static int ih_v6_0_soft_reset(void *handle)
674 {
675 	/* todo */
676 	return 0;
677 }
678 
679 static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
680 					       bool enable)
681 {
682 	uint32_t data, def, field_val;
683 
684 	if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
685 		def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
686 		field_val = enable ? 0 : 1;
687 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
688 				     DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
689 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
690 				     OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
691 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
692 				     LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
693 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
694 				     DYN_CLK_SOFT_OVERRIDE, field_val);
695 		data = REG_SET_FIELD(data, IH_CLK_CTRL,
696 				     REG_CLK_SOFT_OVERRIDE, field_val);
697 		if (def != data)
698 			WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
699 	}
700 }
701 
702 static int ih_v6_0_set_clockgating_state(void *handle,
703 					   enum amd_clockgating_state state)
704 {
705 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
706 
707 	ih_v6_0_update_clockgating_state(adev,
708 				state == AMD_CG_STATE_GATE);
709 	return 0;
710 }
711 
712 static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
713 					       bool enable)
714 {
715 	uint32_t ih_mem_pwr_cntl;
716 
717 	/* Disable ih sram power cntl before switch powergating mode */
718 	ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
719 	ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
720 					IH_BUFFER_MEM_POWER_CTRL_EN, 0);
721 	WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
722 
723 	/* It is recommended to set mem powergating mode to DS mode */
724 	if (enable) {
725 		/* mem power mode */
726 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
727 						IH_BUFFER_MEM_POWER_LS_EN, 0);
728 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
729 						IH_BUFFER_MEM_POWER_DS_EN, 1);
730 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
731 						IH_BUFFER_MEM_POWER_SD_EN, 0);
732 		/* cam mem power mode */
733 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
734 						IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
735 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
736 						IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
737 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
738 						IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
739 		/* re-enable power cntl */
740 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
741 						IH_BUFFER_MEM_POWER_CTRL_EN, 1);
742 	} else {
743 		/* mem power mode */
744 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
745 						IH_BUFFER_MEM_POWER_LS_EN, 0);
746 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
747 						IH_BUFFER_MEM_POWER_DS_EN, 0);
748 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
749 						IH_BUFFER_MEM_POWER_SD_EN, 0);
750 		/* cam mem power mode */
751 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
752 						IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
753 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
754 						IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
755 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
756 						IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
757 		/* re-enable power cntl*/
758 		ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
759 						IH_BUFFER_MEM_POWER_CTRL_EN, 1);
760 	}
761 
762 	WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
763 }
764 
765 static int ih_v6_0_set_powergating_state(void *handle,
766 					 enum amd_powergating_state state)
767 {
768 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
769 	bool enable = (state == AMD_PG_STATE_GATE);
770 
771 	if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
772 		ih_v6_0_update_ih_mem_power_gating(adev, enable);
773 
774 	return 0;
775 }
776 
777 static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
778 {
779 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
780 
781 	if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
782 		*flags |= AMD_CG_SUPPORT_IH_CG;
783 }
784 
785 static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
786 	.name = "ih_v6_0",
787 	.early_init = ih_v6_0_early_init,
788 	.late_init = NULL,
789 	.sw_init = ih_v6_0_sw_init,
790 	.sw_fini = ih_v6_0_sw_fini,
791 	.hw_init = ih_v6_0_hw_init,
792 	.hw_fini = ih_v6_0_hw_fini,
793 	.suspend = ih_v6_0_suspend,
794 	.resume = ih_v6_0_resume,
795 	.is_idle = ih_v6_0_is_idle,
796 	.wait_for_idle = ih_v6_0_wait_for_idle,
797 	.soft_reset = ih_v6_0_soft_reset,
798 	.set_clockgating_state = ih_v6_0_set_clockgating_state,
799 	.set_powergating_state = ih_v6_0_set_powergating_state,
800 	.get_clockgating_state = ih_v6_0_get_clockgating_state,
801 	.dump_ip_state = NULL,
802 	.print_ip_state = NULL,
803 };
804 
805 static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
806 	.get_wptr = ih_v6_0_get_wptr,
807 	.decode_iv = amdgpu_ih_decode_iv_helper,
808 	.decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
809 	.set_rptr = ih_v6_0_set_rptr
810 };
811 
812 static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev)
813 {
814 	adev->irq.ih_funcs = &ih_v6_0_funcs;
815 }
816 
817 const struct amdgpu_ip_block_version ih_v6_0_ip_block = {
818 	.type = AMD_IP_BLOCK_TYPE_IH,
819 	.major = 6,
820 	.minor = 0,
821 	.rev = 0,
822 	.funcs = &ih_v6_0_ip_funcs,
823 };
824