xref: /linux/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c (revision 297b0cebbcc3aad3fde692c988d5aa19f7b652b2)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 #include "soc15_common.h"
26 #include "soc_v1_0.h"
27 
28 #include "gc/gc_12_1_0_offset.h"
29 #include "gc/gc_12_1_0_sh_mask.h"
30 #include "mp/mp_15_0_8_offset.h"
31 
32 /* Initialized doorbells for amdgpu including multimedia
33  * KFD can use all the rest in 2M doorbell bar */
34 static void soc_v1_0_doorbell_index_init(struct amdgpu_device *adev)
35 {
36 	int i;
37 
38 	adev->doorbell_index.kiq = AMDGPU_SOC_V1_0_DOORBELL_KIQ_START;
39 
40 	adev->doorbell_index.mec_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MEC_RING_START;
41 	adev->doorbell_index.mes_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING0;
42 	adev->doorbell_index.mes_ring1 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING1;
43 
44 	adev->doorbell_index.userqueue_start = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_START;
45 	adev->doorbell_index.userqueue_end = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_END;
46 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_SOC_V1_0_DOORBELL_XCC_RANGE;
47 
48 	adev->doorbell_index.sdma_doorbell_range = 20;
49 	for (i = 0; i < adev->sdma.num_instances; i++)
50 		adev->doorbell_index.sdma_engine[i] =
51 			AMDGPU_SOC_V1_0_DOORBELL_sDMA_ENGINE_START +
52 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
53 
54 	adev->doorbell_index.ih = AMDGPU_SOC_V1_0_DOORBELL_IH;
55 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_SOC_V1_0_DOORBELL_VCN_START;
56 
57 	adev->doorbell_index.first_non_cp = AMDGPU_SOC_V1_0_DOORBELL_FIRST_NON_CP;
58 	adev->doorbell_index.last_non_cp = AMDGPU_SOC_V1_0_DOORBELL_LAST_NON_CP;
59 
60 	adev->doorbell_index.max_assignment = AMDGPU_SOC_V1_0_DOORBELL_MAX_ASSIGNMENT << 1;
61 }
62 
63 static u32 soc_v1_0_get_config_memsize(struct amdgpu_device *adev)
64 {
65 	return adev->nbio.funcs->get_memsize(adev);
66 }
67 
68 static u32 soc_v1_0_get_xclk(struct amdgpu_device *adev)
69 {
70 	return adev->clock.spll.reference_freq;
71 }
72 
73 void soc_v1_0_grbm_select(struct amdgpu_device *adev,
74 			  u32 me, u32 pipe,
75 			  u32 queue, u32 vmid,
76 			  int xcc_id)
77 {
78 	u32 grbm_gfx_cntl = 0;
79 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
80 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
81 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
82 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
83 
84 	WREG32_SOC15_RLC_SHADOW(GC, xcc_id, regGRBM_GFX_CNTL, grbm_gfx_cntl);
85 }
86 
87 static struct soc15_allowed_register_entry soc_v1_0_allowed_read_registers[] = {
88 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS) },
89 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2) },
90 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS3) },
91 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0) },
92 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1) },
93 	{ SOC15_REG_ENTRY(GC, 0, regCP_STAT) },
94 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1) },
95 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2) },
96 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3) },
97 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT) },
98 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1) },
99 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS) },
100 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT) },
101 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1) },
102 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS) },
103 	{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG_1) },
104 };
105 
106 static uint32_t soc_v1_0_read_indexed_register(struct amdgpu_device *adev,
107 					       u32 se_num,
108 					       u32 sh_num,
109 					       u32 reg_offset)
110 {
111 	uint32_t val;
112 
113 	mutex_lock(&adev->grbm_idx_mutex);
114 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
115 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
116 
117 	val = RREG32(reg_offset);
118 
119 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
120 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
121 	mutex_unlock(&adev->grbm_idx_mutex);
122 	return val;
123 }
124 
125 static uint32_t soc_v1_0_get_register_value(struct amdgpu_device *adev,
126 					    bool indexed, u32 se_num,
127 					    u32 sh_num, u32 reg_offset)
128 {
129 	if (indexed) {
130 		return soc_v1_0_read_indexed_register(adev, se_num, sh_num, reg_offset);
131 	} else {
132 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG_1) &&
133 		    adev->gfx.config.gb_addr_config)
134 			return adev->gfx.config.gb_addr_config;
135 		return RREG32(reg_offset);
136 	}
137 }
138 
139 static int soc_v1_0_read_register(struct amdgpu_device *adev,
140 				  u32 se_num, u32 sh_num,
141 				  u32 reg_offset, u32 *value)
142 {
143 	uint32_t i;
144 	struct soc15_allowed_register_entry  *en;
145 
146 	*value = 0;
147 	for (i = 0; i < ARRAY_SIZE(soc_v1_0_allowed_read_registers); i++) {
148 		en = &soc_v1_0_allowed_read_registers[i];
149 		if (!adev->reg_offset[en->hwip][en->inst])
150 			continue;
151 		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
152 					+ en->reg_offset))
153 			continue;
154 
155 		*value = soc_v1_0_get_register_value(adev,
156 				soc_v1_0_allowed_read_registers[i].grbm_indexed,
157 				se_num, sh_num, reg_offset);
158 		return 0;
159 	}
160 	return -EINVAL;
161 }
162 
163 static bool soc_v1_0_need_full_reset(struct amdgpu_device *adev)
164 {
165 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
166 	case IP_VERSION(12, 1, 0):
167 	default:
168 		return true;
169 	}
170 }
171 
172 static bool soc_v1_0_need_reset_on_init(struct amdgpu_device *adev)
173 {
174 	u32 sol_reg;
175 
176 	if (adev->flags & AMD_IS_APU)
177 		return false;
178 
179 	/* Check sOS sign of life register to confirm sys driver and sOS
180 	 * are already been loaded.
181 	 */
182 	sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
183 	if (sol_reg)
184 		return true;
185 
186 	return false;
187 }
188 
189 static int soc_v1_0_asic_reset(struct amdgpu_device *adev)
190 {
191 	return 0;
192 }
193 
194 static const struct amdgpu_asic_funcs soc_v1_0_asic_funcs = {
195 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
196 	.read_register = &soc_v1_0_read_register,
197 	.get_config_memsize = &soc_v1_0_get_config_memsize,
198 	.get_xclk = &soc_v1_0_get_xclk,
199 	.need_full_reset = &soc_v1_0_need_full_reset,
200 	.init_doorbell_index = &soc_v1_0_doorbell_index_init,
201 	.need_reset_on_init = &soc_v1_0_need_reset_on_init,
202 	.reset = soc_v1_0_asic_reset,
203 };
204 
205 static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
206 {
207 	struct amdgpu_device *adev = ip_block->adev;
208 
209 	adev->smc_rreg = NULL;
210 	adev->smc_wreg = NULL;
211 	adev->pcie_rreg = &amdgpu_device_indirect_rreg;
212 	adev->pcie_wreg = &amdgpu_device_indirect_wreg;
213 	adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
214 	adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
215 	adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
216 	adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
217 	adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
218 	adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
219 	adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
220 	adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
221 	adev->uvd_ctx_rreg = NULL;
222 	adev->uvd_ctx_wreg = NULL;
223 	adev->didt_rreg = NULL;
224 	adev->didt_wreg = NULL;
225 
226 	adev->asic_funcs = &soc_v1_0_asic_funcs;
227 
228 	adev->rev_id = amdgpu_device_get_rev_id(adev);
229 	adev->external_rev_id = 0xff;
230 
231 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
232 	case IP_VERSION(12, 1, 0):
233 		adev->cg_flags = 0;
234 		adev->pg_flags = 0;
235 		adev->external_rev_id = adev->rev_id + 0x50;
236 		break;
237 	default:
238 		/* FIXME: not supported yet */
239 		return -EINVAL;
240 	}
241 
242 	return 0;
243 }
244 
245 static int soc_v1_0_common_late_init(struct amdgpu_ip_block *ip_block)
246 {
247 	struct amdgpu_device *adev = ip_block->adev;
248 
249 	/* Enable selfring doorbell aperture late because doorbell BAR
250 	 * aperture will change if resize BAR successfully in gmc sw_init.
251 	 */
252 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
253 
254 	return 0;
255 }
256 
257 static int soc_v1_0_common_sw_init(struct amdgpu_ip_block *ip_block)
258 {
259 	return 0;
260 }
261 
262 static int soc_v1_0_common_hw_init(struct amdgpu_ip_block *ip_block)
263 {
264 	struct amdgpu_device *adev = ip_block->adev;
265 
266 	/* enable the doorbell aperture */
267 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
268 
269 	return 0;
270 }
271 
272 static int soc_v1_0_common_hw_fini(struct amdgpu_ip_block *ip_block)
273 {
274 	struct amdgpu_device *adev = ip_block->adev;
275 
276 	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
277 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
278 
279 	return 0;
280 }
281 
282 static int soc_v1_0_common_suspend(struct amdgpu_ip_block *ip_block)
283 {
284 	return soc_v1_0_common_hw_fini(ip_block);
285 }
286 
287 static int soc_v1_0_common_resume(struct amdgpu_ip_block *ip_block)
288 {
289 	return soc_v1_0_common_hw_init(ip_block);
290 }
291 
292 static bool soc_v1_0_common_is_idle(struct amdgpu_ip_block *ip_block)
293 {
294 	return true;
295 }
296 
297 static int soc_v1_0_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
298 						 enum amd_clockgating_state state)
299 {
300 	return 0;
301 }
302 
303 static int soc_v1_0_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
304 						 enum amd_powergating_state state)
305 {
306 	return 0;
307 }
308 
309 static void soc_v1_0_common_get_clockgating_state(struct amdgpu_ip_block *ip_block,
310 						  u64 *flags)
311 {
312 	return;
313 }
314 
315 static const struct amd_ip_funcs soc_v1_0_common_ip_funcs = {
316 	.name = "soc_v1_0_common",
317 	.early_init = soc_v1_0_common_early_init,
318 	.late_init = soc_v1_0_common_late_init,
319 	.sw_init = soc_v1_0_common_sw_init,
320 	.hw_init = soc_v1_0_common_hw_init,
321 	.hw_fini = soc_v1_0_common_hw_fini,
322 	.suspend = soc_v1_0_common_suspend,
323 	.resume = soc_v1_0_common_resume,
324 	.is_idle = soc_v1_0_common_is_idle,
325 	.set_clockgating_state = soc_v1_0_common_set_clockgating_state,
326 	.set_powergating_state = soc_v1_0_common_set_powergating_state,
327 	.get_clockgating_state = soc_v1_0_common_get_clockgating_state,
328 };
329 
330 const struct amdgpu_ip_block_version soc_v1_0_common_ip_block = {
331 	.type = AMD_IP_BLOCK_TYPE_COMMON,
332 	.major = 1,
333 	.minor = 0,
334 	.rev = 0,
335 	.funcs = &soc_v1_0_common_ip_funcs,
336 };
337