xref: /linux/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "nbif_v6_3_1.h"
25 
26 #include "nbif/nbif_6_3_1_offset.h"
27 #include "nbif/nbif_6_3_1_sh_mask.h"
28 #include "pcie/pcie_6_1_0_offset.h"
29 #include "pcie/pcie_6_1_0_sh_mask.h"
30 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
31 #include <uapi/linux/kfd_ioctl.h>
32 
33 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL_nbif_4_10                                                           0x4f0aeb
34 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL_nbif_4_10_BASE_IDX                                                  3
35 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL1_nbif_4_10                                                          0x4f0aec
36 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL1_nbif_4_10_BASE_IDX                                                 3
37 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL_nbif_4_10                                                           0x4f0aed
38 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL_nbif_4_10_BASE_IDX                                                  3
39 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL1_nbif_4_10                                                          0x4f0aee
40 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL1_nbif_4_10_BASE_IDX                                                 3
41 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL_nbif_4_10                                                           0x4f0aef
42 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL_nbif_4_10_BASE_IDX                                                  3
43 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL1_nbif_4_10                                                          0x4f0af0
44 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL1_nbif_4_10_BASE_IDX                                                 3
45 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL_nbif_4_10                                                           0x4f0af1
46 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL_nbif_4_10_BASE_IDX                                                  3
47 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL1_nbif_4_10                                                          0x4f0af2
48 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL1_nbif_4_10_BASE_IDX                                                 3
49 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL_nbif_4_10                                                           0x4f0af3
50 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL_nbif_4_10_BASE_IDX                                                  3
51 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL1_nbif_4_10                                                          0x4f0af4
52 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL1_nbif_4_10_BASE_IDX                                                 3
53 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL_nbif_4_10                                                           0x4f0af5
54 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL_nbif_4_10_BASE_IDX                                                  3
55 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL1_nbif_4_10                                                          0x4f0af6
56 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL1_nbif_4_10_BASE_IDX                                                 3
57 #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10                                                              0x0021
58 #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10_BASE_IDX                                                     2
59 
60 static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
61 {
62 	WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
63 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
64 	WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
65 		adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
66 }
67 
68 static u32 nbif_v6_3_1_get_rev_id(struct amdgpu_device *adev)
69 {
70 	u32 tmp;
71 
72 	if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4))
73 		tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10);
74 	else
75 		tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
76 
77 	tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
78 	tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
79 
80 	return tmp;
81 }
82 
83 static void nbif_v6_3_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
84 {
85 	if (enable)
86 		WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
87 			     BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
88 			     BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
89 	else
90 		WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
91 }
92 
93 static u32 nbif_v6_3_1_get_memsize(struct amdgpu_device *adev)
94 {
95 	return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
96 }
97 
98 static void nbif_v6_3_1_sdma_doorbell_range(struct amdgpu_device *adev,
99 					    int instance, bool use_doorbell,
100 					    int doorbell_index,
101 					    int doorbell_size)
102 {
103 	if (instance == 0) {
104 		u32 doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL);
105 
106 		if (use_doorbell) {
107 			doorbell_range = REG_SET_FIELD(doorbell_range,
108 						       GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
109 						       S2A_DOORBELL_PORT2_ENABLE,
110 						       0x1);
111 			doorbell_range = REG_SET_FIELD(doorbell_range,
112 						       GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
113 						       S2A_DOORBELL_PORT2_AWID,
114 						       0xe);
115 			doorbell_range = REG_SET_FIELD(doorbell_range,
116 						       GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
117 						       S2A_DOORBELL_PORT2_RANGE_OFFSET,
118 						       doorbell_index);
119 			doorbell_range = REG_SET_FIELD(doorbell_range,
120 						       GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
121 						       S2A_DOORBELL_PORT2_RANGE_SIZE,
122 						       doorbell_size);
123 			doorbell_range = REG_SET_FIELD(doorbell_range,
124 						       GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
125 						       S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE,
126 						       0x3);
127 		} else
128 			doorbell_range = REG_SET_FIELD(doorbell_range,
129 						       GDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL,
130 						       S2A_DOORBELL_PORT2_RANGE_SIZE,
131 						       0);
132 
133 		if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4)) {
134 			WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL_nbif_4_10, doorbell_range);
135 		} else {
136 			WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_2_CTRL, doorbell_range);
137 		}
138 	}
139 }
140 
141 static void nbif_v6_3_1_vcn_doorbell_range(struct amdgpu_device *adev,
142 					   bool use_doorbell, int doorbell_index,
143 					   int instance)
144 {
145 	u32 doorbell_range;
146 
147 	if (instance)
148 		doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL);
149 	else
150 		doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL);
151 
152 	if (use_doorbell) {
153 		doorbell_range = REG_SET_FIELD(doorbell_range,
154 					       GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
155 					       S2A_DOORBELL_PORT4_ENABLE,
156 					       0x1);
157 		doorbell_range = REG_SET_FIELD(doorbell_range,
158 					       GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
159 					       S2A_DOORBELL_PORT4_AWID,
160 					       instance ? 0x7 : 0x4);
161 		doorbell_range = REG_SET_FIELD(doorbell_range,
162 					       GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
163 					       S2A_DOORBELL_PORT4_RANGE_OFFSET,
164 					       doorbell_index);
165 		doorbell_range = REG_SET_FIELD(doorbell_range,
166 					       GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
167 					       S2A_DOORBELL_PORT4_RANGE_SIZE,
168 					       8);
169 		doorbell_range = REG_SET_FIELD(doorbell_range,
170 					       GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
171 					       S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE,
172 					       instance ? 0x7 : 0x4);
173 	} else
174 		doorbell_range = REG_SET_FIELD(doorbell_range,
175 					       GDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL,
176 					       S2A_DOORBELL_PORT4_RANGE_SIZE,
177 					       0);
178 
179 	if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4)) {
180 		if (instance)
181 			WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL_nbif_4_10, doorbell_range);
182 		else
183 			WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL_nbif_4_10, doorbell_range);
184 	} else {
185 		if (instance)
186 			WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL, doorbell_range);
187 		else
188 			WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_4_CTRL, doorbell_range);
189 	}
190 }
191 
192 static void nbif_v6_3_1_gc_doorbell_init(struct amdgpu_device *adev)
193 {
194 	if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4)) {
195 		WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL_nbif_4_10, 0x30000007);
196 		WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL_nbif_4_10, 0x3000000d);
197 	} else {
198 		WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_0_CTRL, 0x30000007);
199 		WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_3_CTRL, 0x3000000d);
200 	}
201 }
202 
203 static void nbif_v6_3_1_enable_doorbell_aperture(struct amdgpu_device *adev,
204 						 bool enable)
205 {
206 	WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
207 			BIF_DOORBELL_APER_EN, enable ? 1 : 0);
208 }
209 
210 static void
211 nbif_v6_3_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
212 					      bool enable)
213 {
214 	u32 tmp = 0;
215 
216 	if (enable) {
217 		tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
218 				    DOORBELL_SELFRING_GPA_APER_EN, 1) |
219 		      REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
220 				    DOORBELL_SELFRING_GPA_APER_MODE, 1) |
221 		      REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
222 				    DOORBELL_SELFRING_GPA_APER_SIZE, 0);
223 
224 		WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
225 			     lower_32_bits(adev->doorbell.base));
226 		WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
227 			     upper_32_bits(adev->doorbell.base));
228 	}
229 
230 	WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
231 }
232 
233 static void nbif_v6_3_1_ih_doorbell_range(struct amdgpu_device *adev,
234 					  bool use_doorbell, int doorbell_index)
235 {
236 	u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL);
237 
238 	if (use_doorbell) {
239 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
240 						  GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
241 						  S2A_DOORBELL_PORT1_ENABLE,
242 						  0x1);
243 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
244 						  GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
245 						  S2A_DOORBELL_PORT1_AWID,
246 						  0x0);
247 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
248 						  GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
249 						  S2A_DOORBELL_PORT1_RANGE_OFFSET,
250 						  doorbell_index);
251 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
252 						  GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
253 						  S2A_DOORBELL_PORT1_RANGE_SIZE,
254 						  2);
255 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
256 						  GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
257 						  S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
258 						  0x0);
259 	} else
260 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
261 						  GDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL,
262 						  S2A_DOORBELL_PORT1_RANGE_SIZE,
263 						  0);
264 
265 	if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4)) {
266 		WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL_nbif_4_10, ih_doorbell_range);
267 	} else {
268 		WREG32_SOC15(NBIO, 0, regGDC_S2A0_S2A_DOORBELL_ENTRY_1_CTRL, ih_doorbell_range);
269 	}
270 }
271 
272 static void nbif_v6_3_1_ih_control(struct amdgpu_device *adev)
273 {
274 	u32 interrupt_cntl;
275 
276 	/* setup interrupt control */
277 	WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
278 
279 	interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
280 	/*
281 	 * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
282 	 * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
283 	 */
284 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
285 				       IH_DUMMY_RD_OVERRIDE, 0);
286 
287 	/* BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
288 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
289 				       IH_REQ_NONSNOOP_EN, 0);
290 
291 	WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
292 }
293 
294 static void
295 nbif_v6_3_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
296 					     bool enable)
297 {
298 }
299 
300 static void
301 nbif_v6_3_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
302 					    bool enable)
303 {
304 }
305 
306 static void
307 nbif_v6_3_1_get_clockgating_state(struct amdgpu_device *adev,
308 				  u64 *flags)
309 {
310 }
311 
312 static u32 nbif_v6_3_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
313 {
314 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
315 }
316 
317 static u32 nbif_v6_3_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
318 {
319 	return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
320 }
321 
322 static u32 nbif_v6_3_1_get_pcie_index_offset(struct amdgpu_device *adev)
323 {
324 	if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4)) {
325 		return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX);
326 	}
327 	else {
328 		return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
329 	}
330 }
331 
332 static u32 nbif_v6_3_1_get_pcie_data_offset(struct amdgpu_device *adev)
333 {
334 	if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4))
335 		return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA);
336 	else
337 		return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
338 }
339 
340 const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg = {
341 	.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
342 	.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
343 	.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
344 	.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
345 	.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
346 	.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
347 	.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
348 	.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
349 	.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
350 	.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
351 	.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
352 	.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
353 };
354 
355 static void nbif_v6_3_1_init_registers(struct amdgpu_device *adev)
356 {
357 	uint32_t data;
358 
359 	data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
360 	data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
361 	WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
362 }
363 
364 static u32 nbif_v6_3_1_get_rom_offset(struct amdgpu_device *adev)
365 {
366 	u32 data, rom_offset;
367 
368 	data = RREG32_SOC15(NBIO, 0, regREGS_ROM_OFFSET_CTRL);
369 	rom_offset = REG_GET_FIELD(data, REGS_ROM_OFFSET_CTRL, ROM_OFFSET);
370 
371 	return rom_offset;
372 }
373 
374 #ifdef CONFIG_PCIEASPM
375 static void nbif_v6_3_1_program_ltr(struct amdgpu_device *adev)
376 {
377 	uint32_t def, data;
378 	u16 devctl2;
379 
380 	def = RREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
381 	data = 0x35EB;
382 	data &= ~RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
383 	data &= ~RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK;
384 	if (def != data)
385 		WREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
386 
387 	def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2);
388 	data &= ~RCC_STRAP0_RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
389 	if (def != data)
390 		WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2, data);
391 
392 	pcie_capability_read_word(adev->pdev, PCI_EXP_DEVCTL2, &devctl2);
393 
394 	if (adev->pdev->ltr_path == (devctl2 & PCI_EXP_DEVCTL2_LTR_EN))
395 		return;
396 
397 	if (adev->pdev->ltr_path)
398 		pcie_capability_set_word(adev->pdev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN);
399 	else
400 		pcie_capability_clear_word(adev->pdev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN);
401 }
402 #endif
403 
404 static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev)
405 {
406 #ifdef CONFIG_PCIEASPM
407 	uint32_t def, data;
408 	u16 devctl2, ltr;
409 
410 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL);
411 	data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
412 	data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
413 	data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
414 	if (def != data)
415 		WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL, data);
416 
417 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL7);
418 	data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
419 	if (def != data)
420 		WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL7, data);
421 
422 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3);
423 	data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
424 	if (def != data)
425 		WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3, data);
426 
427 	def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
428 	data &= ~RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
429 	data &= ~RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
430 	if (def != data)
431 		WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
432 
433 	def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
434 	data &= ~RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
435 	if (def != data)
436 		WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
437 
438 	pcie_capability_read_word(adev->pdev, PCI_EXP_DEVCTL2, &devctl2);
439 	data = def = devctl2;
440 	data &= ~PCI_EXP_DEVCTL2_LTR_EN;
441 	if (def != data)
442 		pcie_capability_set_word(adev->pdev, PCI_EXP_DEVCTL2, (u16)data);
443 
444 	ltr = pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_LTR);
445 
446 	if (ltr) {
447 		pci_write_config_dword(adev->pdev, ltr + PCI_LTR_MAX_SNOOP_LAT, 0x10011001);
448 	}
449 
450 #if 0
451 	/* regPSWUSP0_PCIE_LC_CNTL2 should be replace by PCIE_LC_CNTL2 or someone else ? */
452 	def = data = RREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2);
453 	data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
454 		PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
455 	data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
456 	if (def != data)
457 		WREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2, data);
458 #endif
459 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL4);
460 	data |= PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK;
461 	if (def != data)
462 		WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL4, data);
463 
464 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL);
465 	data |= PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK;
466 	if (def != data)
467 		WREG32_SOC15(PCIE, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL, data);
468 
469 	nbif_v6_3_1_program_ltr(adev);
470 
471 	def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
472 	data |= 0x5DE0 << RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
473 	data |= 0x0010 << RCC_STRAP0_RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
474 	if (def != data)
475 		WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
476 
477 	def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
478 	data |= 0x0010 << RCC_STRAP0_RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
479 	if (def != data)
480 		WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
481 
482 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL);
483 	data |= 0x0 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
484 	data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
485 	data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
486 	if (def != data)
487 		WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL, data);
488 
489 	def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3);
490 	data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
491 	if (def != data)
492 		WREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL3, data);
493 #endif
494 }
495 
496 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
497 
498 static void nbif_v6_3_1_set_reg_remap(struct amdgpu_device *adev)
499 {
500 	if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
501 		adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
502 		adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
503 	} else {
504 		adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
505 			regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
506 		adev->rmmio_remap.bus_addr = 0;
507 	}
508 }
509 
510 const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs = {
511 	.get_hdp_flush_req_offset = nbif_v6_3_1_get_hdp_flush_req_offset,
512 	.get_hdp_flush_done_offset = nbif_v6_3_1_get_hdp_flush_done_offset,
513 	.get_pcie_index_offset = nbif_v6_3_1_get_pcie_index_offset,
514 	.get_pcie_data_offset = nbif_v6_3_1_get_pcie_data_offset,
515 	.get_rev_id = nbif_v6_3_1_get_rev_id,
516 	.mc_access_enable = nbif_v6_3_1_mc_access_enable,
517 	.get_memsize = nbif_v6_3_1_get_memsize,
518 	.sdma_doorbell_range = nbif_v6_3_1_sdma_doorbell_range,
519 	.vcn_doorbell_range = nbif_v6_3_1_vcn_doorbell_range,
520 	.gc_doorbell_init = nbif_v6_3_1_gc_doorbell_init,
521 	.enable_doorbell_aperture = nbif_v6_3_1_enable_doorbell_aperture,
522 	.enable_doorbell_selfring_aperture = nbif_v6_3_1_enable_doorbell_selfring_aperture,
523 	.ih_doorbell_range = nbif_v6_3_1_ih_doorbell_range,
524 	.update_medium_grain_clock_gating = nbif_v6_3_1_update_medium_grain_clock_gating,
525 	.update_medium_grain_light_sleep = nbif_v6_3_1_update_medium_grain_light_sleep,
526 	.get_clockgating_state = nbif_v6_3_1_get_clockgating_state,
527 	.ih_control = nbif_v6_3_1_ih_control,
528 	.init_registers = nbif_v6_3_1_init_registers,
529 	.remap_hdp_registers = nbif_v6_3_1_remap_hdp_registers,
530 	.get_rom_offset = nbif_v6_3_1_get_rom_offset,
531 	.program_aspm = nbif_v6_3_1_program_aspm,
532 	.set_reg_remap = nbif_v6_3_1_set_reg_remap,
533 };
534 
535 
536 static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
537 						       struct amdgpu_irq_src *src,
538 						       unsigned type,
539 						       enum amdgpu_interrupt_state state)
540 {
541 	/* The ras_controller_irq enablement should be done in psp bl when it
542 	 * tries to enable ras feature. Driver only need to set the correct interrupt
543 	 * vector for bare-metal and sriov use case respectively
544 	 */
545 	uint32_t bif_doorbell_int_cntl;
546 
547 	bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
548 	bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
549 					      BIF_BX0_BIF_DOORBELL_INT_CNTL,
550 					      RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE,
551 					      (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1);
552 	WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
553 
554 	return 0;
555 }
556 
557 static int nbif_v6_3_1_process_err_event_athub_irq(struct amdgpu_device *adev,
558 						 struct amdgpu_irq_src *source,
559 						 struct amdgpu_iv_entry *entry)
560 {
561 	/* By design, the ih cookie for err_event_athub_irq should be written
562 	 * to bif ring. since bif ring is not enabled, just leave process callback
563 	 * as a dummy one.
564 	 */
565 	return 0;
566 }
567 
568 static const struct amdgpu_irq_src_funcs nbif_v6_3_1_ras_err_event_athub_irq_funcs = {
569 	.set = nbif_v6_3_1_set_ras_err_event_athub_irq_state,
570 	.process = nbif_v6_3_1_process_err_event_athub_irq,
571 };
572 
573 static void nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
574 {
575 	uint32_t bif_doorbell_int_cntl;
576 
577 	bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
578 	if (REG_GET_FIELD(bif_doorbell_int_cntl,
579 			  BIF_BX0_BIF_DOORBELL_INT_CNTL,
580 			  RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
581 		/* driver has to clear the interrupt status when bif ring is disabled */
582 		bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
583 						BIF_BX0_BIF_DOORBELL_INT_CNTL,
584 						RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
585 		WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
586 		amdgpu_ras_global_ras_isr(adev);
587 	}
588 }
589 
590 static int nbif_v6_3_1_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev)
591 {
592 	int r;
593 
594 	/* init the irq funcs */
595 	adev->nbio.ras_err_event_athub_irq.funcs =
596 		&nbif_v6_3_1_ras_err_event_athub_irq_funcs;
597 	adev->nbio.ras_err_event_athub_irq.num_types = 1;
598 
599 	/* register ras err event athub interrupt
600 	 * nbif v6_3_1 uses the same irq source as nbio v7_4
601 	 */
602 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF,
603 			      NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
604 			      &adev->nbio.ras_err_event_athub_irq);
605 
606 	return r;
607 }
608 
609 struct amdgpu_nbio_ras nbif_v6_3_1_ras = {
610 	.handle_ras_err_event_athub_intr_no_bifring =
611 		nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring,
612 	.init_ras_err_event_athub_interrupt =
613 		nbif_v6_3_1_init_ras_err_event_athub_interrupt,
614 };
615