xref: /linux/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c (revision fe1c48e9bda60b1b4d4f1d84980c66dde7c1954a)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 #include "soc15_common.h"
26 #include "soc_v1_0.h"
27 #include "amdgpu_ip.h"
28 
29 #include "gc/gc_12_1_0_offset.h"
30 #include "gc/gc_12_1_0_sh_mask.h"
31 #include "mp/mp_15_0_8_offset.h"
32 
33 /* Initialized doorbells for amdgpu including multimedia
34  * KFD can use all the rest in 2M doorbell bar */
35 static void soc_v1_0_doorbell_index_init(struct amdgpu_device *adev)
36 {
37 	int i;
38 
39 	adev->doorbell_index.kiq = AMDGPU_SOC_V1_0_DOORBELL_KIQ_START;
40 
41 	adev->doorbell_index.mec_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MEC_RING_START;
42 	adev->doorbell_index.mes_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING0;
43 	adev->doorbell_index.mes_ring1 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING1;
44 
45 	adev->doorbell_index.userqueue_start = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_START;
46 	adev->doorbell_index.userqueue_end = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_END;
47 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_SOC_V1_0_DOORBELL_XCC_RANGE;
48 
49 	adev->doorbell_index.sdma_doorbell_range = 20;
50 	for (i = 0; i < adev->sdma.num_instances; i++)
51 		adev->doorbell_index.sdma_engine[i] =
52 			AMDGPU_SOC_V1_0_DOORBELL_sDMA_ENGINE_START +
53 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
54 
55 	adev->doorbell_index.ih = AMDGPU_SOC_V1_0_DOORBELL_IH;
56 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_SOC_V1_0_DOORBELL_VCN_START;
57 
58 	adev->doorbell_index.first_non_cp = AMDGPU_SOC_V1_0_DOORBELL_FIRST_NON_CP;
59 	adev->doorbell_index.last_non_cp = AMDGPU_SOC_V1_0_DOORBELL_LAST_NON_CP;
60 
61 	adev->doorbell_index.max_assignment = AMDGPU_SOC_V1_0_DOORBELL_MAX_ASSIGNMENT << 1;
62 }
63 
64 static u32 soc_v1_0_get_config_memsize(struct amdgpu_device *adev)
65 {
66 	return adev->nbio.funcs->get_memsize(adev);
67 }
68 
69 static u32 soc_v1_0_get_xclk(struct amdgpu_device *adev)
70 {
71 	return adev->clock.spll.reference_freq;
72 }
73 
74 void soc_v1_0_grbm_select(struct amdgpu_device *adev,
75 			  u32 me, u32 pipe,
76 			  u32 queue, u32 vmid,
77 			  int xcc_id)
78 {
79 	u32 grbm_gfx_cntl = 0;
80 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
81 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
82 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
83 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
84 
85 	WREG32_SOC15_RLC_SHADOW(GC, xcc_id, regGRBM_GFX_CNTL, grbm_gfx_cntl);
86 }
87 
88 static struct soc15_allowed_register_entry soc_v1_0_allowed_read_registers[] = {
89 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS) },
90 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2) },
91 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS3) },
92 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0) },
93 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1) },
94 	{ SOC15_REG_ENTRY(GC, 0, regCP_STAT) },
95 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1) },
96 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2) },
97 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3) },
98 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT) },
99 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1) },
100 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS) },
101 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT) },
102 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1) },
103 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS) },
104 	{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG_1) },
105 };
106 
107 static uint32_t soc_v1_0_read_indexed_register(struct amdgpu_device *adev,
108 					       u32 se_num,
109 					       u32 sh_num,
110 					       u32 reg_offset)
111 {
112 	uint32_t val;
113 
114 	mutex_lock(&adev->grbm_idx_mutex);
115 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
116 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
117 
118 	val = RREG32(reg_offset);
119 
120 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
121 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
122 	mutex_unlock(&adev->grbm_idx_mutex);
123 	return val;
124 }
125 
126 static uint32_t soc_v1_0_get_register_value(struct amdgpu_device *adev,
127 					    bool indexed, u32 se_num,
128 					    u32 sh_num, u32 reg_offset)
129 {
130 	if (indexed) {
131 		return soc_v1_0_read_indexed_register(adev, se_num, sh_num, reg_offset);
132 	} else {
133 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG_1) &&
134 		    adev->gfx.config.gb_addr_config)
135 			return adev->gfx.config.gb_addr_config;
136 		return RREG32(reg_offset);
137 	}
138 }
139 
140 static int soc_v1_0_read_register(struct amdgpu_device *adev,
141 				  u32 se_num, u32 sh_num,
142 				  u32 reg_offset, u32 *value)
143 {
144 	uint32_t i;
145 	struct soc15_allowed_register_entry  *en;
146 
147 	*value = 0;
148 	for (i = 0; i < ARRAY_SIZE(soc_v1_0_allowed_read_registers); i++) {
149 		en = &soc_v1_0_allowed_read_registers[i];
150 		if (!adev->reg_offset[en->hwip][en->inst])
151 			continue;
152 		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
153 					+ en->reg_offset))
154 			continue;
155 
156 		*value = soc_v1_0_get_register_value(adev,
157 				soc_v1_0_allowed_read_registers[i].grbm_indexed,
158 				se_num, sh_num, reg_offset);
159 		return 0;
160 	}
161 	return -EINVAL;
162 }
163 
164 static bool soc_v1_0_need_full_reset(struct amdgpu_device *adev)
165 {
166 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
167 	case IP_VERSION(12, 1, 0):
168 	default:
169 		return true;
170 	}
171 }
172 
173 static bool soc_v1_0_need_reset_on_init(struct amdgpu_device *adev)
174 {
175 	u32 sol_reg;
176 
177 	if (adev->flags & AMD_IS_APU)
178 		return false;
179 
180 	/* Check sOS sign of life register to confirm sys driver and sOS
181 	 * are already been loaded.
182 	 */
183 	sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
184 	if (sol_reg)
185 		return true;
186 
187 	return false;
188 }
189 
190 static int soc_v1_0_asic_reset(struct amdgpu_device *adev)
191 {
192 	return 0;
193 }
194 
195 static const struct amdgpu_asic_funcs soc_v1_0_asic_funcs = {
196 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
197 	.read_register = &soc_v1_0_read_register,
198 	.get_config_memsize = &soc_v1_0_get_config_memsize,
199 	.get_xclk = &soc_v1_0_get_xclk,
200 	.need_full_reset = &soc_v1_0_need_full_reset,
201 	.init_doorbell_index = &soc_v1_0_doorbell_index_init,
202 	.need_reset_on_init = &soc_v1_0_need_reset_on_init,
203 	.reset = soc_v1_0_asic_reset,
204 };
205 
206 static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
207 {
208 	struct amdgpu_device *adev = ip_block->adev;
209 
210 	adev->smc_rreg = NULL;
211 	adev->smc_wreg = NULL;
212 	adev->pcie_rreg = &amdgpu_device_indirect_rreg;
213 	adev->pcie_wreg = &amdgpu_device_indirect_wreg;
214 	adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
215 	adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
216 	adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
217 	adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
218 	adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
219 	adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
220 	adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
221 	adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
222 	adev->uvd_ctx_rreg = NULL;
223 	adev->uvd_ctx_wreg = NULL;
224 	adev->didt_rreg = NULL;
225 	adev->didt_wreg = NULL;
226 
227 	adev->asic_funcs = &soc_v1_0_asic_funcs;
228 
229 	adev->rev_id = amdgpu_device_get_rev_id(adev);
230 	adev->external_rev_id = 0xff;
231 
232 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
233 	case IP_VERSION(12, 1, 0):
234 		adev->cg_flags = 0;
235 		adev->pg_flags = 0;
236 		adev->external_rev_id = adev->rev_id + 0x50;
237 		break;
238 	default:
239 		/* FIXME: not supported yet */
240 		return -EINVAL;
241 	}
242 
243 	return 0;
244 }
245 
246 static int soc_v1_0_common_late_init(struct amdgpu_ip_block *ip_block)
247 {
248 	struct amdgpu_device *adev = ip_block->adev;
249 
250 	/* Enable selfring doorbell aperture late because doorbell BAR
251 	 * aperture will change if resize BAR successfully in gmc sw_init.
252 	 */
253 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
254 
255 	return 0;
256 }
257 
258 static int soc_v1_0_common_sw_init(struct amdgpu_ip_block *ip_block)
259 {
260 	return 0;
261 }
262 
263 static int soc_v1_0_common_hw_init(struct amdgpu_ip_block *ip_block)
264 {
265 	struct amdgpu_device *adev = ip_block->adev;
266 
267 	/* enable the doorbell aperture */
268 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
269 
270 	return 0;
271 }
272 
273 static int soc_v1_0_common_hw_fini(struct amdgpu_ip_block *ip_block)
274 {
275 	struct amdgpu_device *adev = ip_block->adev;
276 
277 	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
278 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
279 
280 	return 0;
281 }
282 
283 static int soc_v1_0_common_suspend(struct amdgpu_ip_block *ip_block)
284 {
285 	return soc_v1_0_common_hw_fini(ip_block);
286 }
287 
288 static int soc_v1_0_common_resume(struct amdgpu_ip_block *ip_block)
289 {
290 	return soc_v1_0_common_hw_init(ip_block);
291 }
292 
293 static bool soc_v1_0_common_is_idle(struct amdgpu_ip_block *ip_block)
294 {
295 	return true;
296 }
297 
298 static int soc_v1_0_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
299 						 enum amd_clockgating_state state)
300 {
301 	return 0;
302 }
303 
304 static int soc_v1_0_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
305 						 enum amd_powergating_state state)
306 {
307 	return 0;
308 }
309 
310 static void soc_v1_0_common_get_clockgating_state(struct amdgpu_ip_block *ip_block,
311 						  u64 *flags)
312 {
313 	return;
314 }
315 
316 static const struct amd_ip_funcs soc_v1_0_common_ip_funcs = {
317 	.name = "soc_v1_0_common",
318 	.early_init = soc_v1_0_common_early_init,
319 	.late_init = soc_v1_0_common_late_init,
320 	.sw_init = soc_v1_0_common_sw_init,
321 	.hw_init = soc_v1_0_common_hw_init,
322 	.hw_fini = soc_v1_0_common_hw_fini,
323 	.suspend = soc_v1_0_common_suspend,
324 	.resume = soc_v1_0_common_resume,
325 	.is_idle = soc_v1_0_common_is_idle,
326 	.set_clockgating_state = soc_v1_0_common_set_clockgating_state,
327 	.set_powergating_state = soc_v1_0_common_set_powergating_state,
328 	.get_clockgating_state = soc_v1_0_common_get_clockgating_state,
329 };
330 
331 const struct amdgpu_ip_block_version soc_v1_0_common_ip_block = {
332 	.type = AMD_IP_BLOCK_TYPE_COMMON,
333 	.major = 1,
334 	.minor = 0,
335 	.rev = 0,
336 	.funcs = &soc_v1_0_common_ip_funcs,
337 };
338 
339 int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
340 {
341 	adev->sdma.num_inst_per_xcc = 2;
342 
343 	amdgpu_ip_map_init(adev);
344 
345 	return 0;
346 }
347