xref: /linux/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 #include "soc15_common.h"
26 #include "soc_v1_0.h"
27 #include "amdgpu_ip.h"
28 #include "amdgpu_imu.h"
29 #include "gfxhub_v12_1.h"
30 #include "sdma_v7_1.h"
31 #include "gfx_v12_1.h"
32 
33 #include "gc/gc_12_1_0_offset.h"
34 #include "gc/gc_12_1_0_sh_mask.h"
35 #include "mp/mp_15_0_8_offset.h"
36 
37 #define XCC_REG_RANGE_0_LOW  0x1260     /* XCC gfxdec0 lower Bound */
38 #define XCC_REG_RANGE_0_HIGH 0x3C00     /* XCC gfxdec0 upper Bound */
39 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
40 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
41 #define NORMALIZE_XCC_REG_OFFSET(offset) \
42 	(offset & 0xFFFF)
43 
44 /* Initialized doorbells for amdgpu including multimedia
45  * KFD can use all the rest in 2M doorbell bar */
46 static void soc_v1_0_doorbell_index_init(struct amdgpu_device *adev)
47 {
48 	int i;
49 
50 	adev->doorbell_index.kiq = AMDGPU_SOC_V1_0_DOORBELL_KIQ_START;
51 
52 	adev->doorbell_index.mec_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MEC_RING_START;
53 	adev->doorbell_index.mes_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING0;
54 	adev->doorbell_index.mes_ring1 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING1;
55 
56 	adev->doorbell_index.userqueue_start = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_START;
57 	adev->doorbell_index.userqueue_end = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_END;
58 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_SOC_V1_0_DOORBELL_XCC_RANGE;
59 
60 	adev->doorbell_index.sdma_doorbell_range = 20;
61 	for (i = 0; i < adev->sdma.num_instances; i++)
62 		adev->doorbell_index.sdma_engine[i] =
63 			AMDGPU_SOC_V1_0_DOORBELL_sDMA_ENGINE_START +
64 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
65 
66 	adev->doorbell_index.ih = AMDGPU_SOC_V1_0_DOORBELL_IH;
67 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_SOC_V1_0_DOORBELL_VCN_START;
68 
69 	adev->doorbell_index.first_non_cp = AMDGPU_SOC_V1_0_DOORBELL_FIRST_NON_CP;
70 	adev->doorbell_index.last_non_cp = AMDGPU_SOC_V1_0_DOORBELL_LAST_NON_CP;
71 
72 	adev->doorbell_index.max_assignment = AMDGPU_SOC_V1_0_DOORBELL_MAX_ASSIGNMENT << 1;
73 }
74 
75 /* Fixed pattern for upper 32bits smn addressing.
76  *   bit[47:40]: Socket ID
77  *   bit[39:34]: Die ID
78  *   bit[32]: local or remote die in same socket
79  * The ext_id is comprised of socket_id and die_id.
80  *   ext_id = (socket_id << 6) | (die_id)
81 */
82 u64 soc_v1_0_encode_ext_smn_addressing(int ext_id)
83 {
84 	u64 ext_offset;
85 	int socket_id, die_id;
86 
87 	/* local die routing for MID0 on local socket */
88 	if (ext_id == 0)
89 		return 0;
90 
91 	die_id = ext_id & 0x3;
92 	socket_id = (ext_id >> 6) & 0xff;
93 
94 	/* Initiated from host, accessing to non-MID0 is cross-die traffic */
95 	if (socket_id == 0)
96 		ext_offset = ((u64)die_id << 34) | (1ULL << 32);
97 	else if (socket_id != 0 && die_id != 0)
98 		ext_offset = ((u64)socket_id << 40) | ((u64)die_id << 34) |
99 				(3ULL << 32);
100 	else
101 		ext_offset = ((u64)socket_id << 40) | (1ULL << 33);
102 
103 	return ext_offset;
104 }
105 
106 static u32 soc_v1_0_get_config_memsize(struct amdgpu_device *adev)
107 {
108 	return adev->nbio.funcs->get_memsize(adev);
109 }
110 
111 static u32 soc_v1_0_get_xclk(struct amdgpu_device *adev)
112 {
113 	return adev->clock.spll.reference_freq;
114 }
115 
116 void soc_v1_0_grbm_select(struct amdgpu_device *adev,
117 			  u32 me, u32 pipe,
118 			  u32 queue, u32 vmid,
119 			  int xcc_id)
120 {
121 	u32 grbm_gfx_cntl = 0;
122 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
123 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
124 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
125 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
126 
127 	WREG32_SOC15_RLC_SHADOW(GC, xcc_id, regGRBM_GFX_CNTL, grbm_gfx_cntl);
128 }
129 
130 static struct soc15_allowed_register_entry soc_v1_0_allowed_read_registers[] = {
131 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS) },
132 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2) },
133 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS3) },
134 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0) },
135 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1) },
136 	{ SOC15_REG_ENTRY(GC, 0, regCP_STAT) },
137 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1) },
138 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2) },
139 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3) },
140 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT) },
141 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1) },
142 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS) },
143 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT) },
144 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1) },
145 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS) },
146 	{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG_1) },
147 };
148 
149 static uint32_t soc_v1_0_read_indexed_register(struct amdgpu_device *adev,
150 					       u32 se_num,
151 					       u32 sh_num,
152 					       u32 reg_offset)
153 {
154 	uint32_t val;
155 
156 	mutex_lock(&adev->grbm_idx_mutex);
157 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
158 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
159 
160 	val = RREG32(reg_offset);
161 
162 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
163 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
164 	mutex_unlock(&adev->grbm_idx_mutex);
165 	return val;
166 }
167 
168 static uint32_t soc_v1_0_get_register_value(struct amdgpu_device *adev,
169 					    bool indexed, u32 se_num,
170 					    u32 sh_num, u32 reg_offset)
171 {
172 	if (indexed) {
173 		return soc_v1_0_read_indexed_register(adev, se_num, sh_num, reg_offset);
174 	} else {
175 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG_1) &&
176 		    adev->gfx.config.gb_addr_config)
177 			return adev->gfx.config.gb_addr_config;
178 		return RREG32(reg_offset);
179 	}
180 }
181 
182 static int soc_v1_0_read_register(struct amdgpu_device *adev,
183 				  u32 se_num, u32 sh_num,
184 				  u32 reg_offset, u32 *value)
185 {
186 	uint32_t i;
187 	struct soc15_allowed_register_entry  *en;
188 
189 	*value = 0;
190 	for (i = 0; i < ARRAY_SIZE(soc_v1_0_allowed_read_registers); i++) {
191 		en = &soc_v1_0_allowed_read_registers[i];
192 		if (!adev->reg_offset[en->hwip][en->inst])
193 			continue;
194 		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
195 					+ en->reg_offset))
196 			continue;
197 
198 		*value = soc_v1_0_get_register_value(adev,
199 				soc_v1_0_allowed_read_registers[i].grbm_indexed,
200 				se_num, sh_num, reg_offset);
201 		return 0;
202 	}
203 	return -EINVAL;
204 }
205 
206 static bool soc_v1_0_need_full_reset(struct amdgpu_device *adev)
207 {
208 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
209 	case IP_VERSION(12, 1, 0):
210 	default:
211 		return true;
212 	}
213 }
214 
215 static bool soc_v1_0_need_reset_on_init(struct amdgpu_device *adev)
216 {
217 	u32 sol_reg;
218 
219 	if (adev->flags & AMD_IS_APU)
220 		return false;
221 
222 	/* Check sOS sign of life register to confirm sys driver and sOS
223 	 * are already been loaded.
224 	 */
225 	sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
226 	if (sol_reg)
227 		return true;
228 
229 	return false;
230 }
231 
232 static int soc_v1_0_asic_reset(struct amdgpu_device *adev)
233 {
234 	return 0;
235 }
236 
237 static const struct amdgpu_asic_funcs soc_v1_0_asic_funcs = {
238 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
239 	.read_register = &soc_v1_0_read_register,
240 	.get_config_memsize = &soc_v1_0_get_config_memsize,
241 	.get_xclk = &soc_v1_0_get_xclk,
242 	.need_full_reset = &soc_v1_0_need_full_reset,
243 	.init_doorbell_index = &soc_v1_0_doorbell_index_init,
244 	.need_reset_on_init = &soc_v1_0_need_reset_on_init,
245 	.encode_ext_smn_addressing = &soc_v1_0_encode_ext_smn_addressing,
246 	.reset = soc_v1_0_asic_reset,
247 };
248 
249 static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
250 {
251 	struct amdgpu_device *adev = ip_block->adev;
252 
253 	adev->smc_rreg = NULL;
254 	adev->smc_wreg = NULL;
255 	adev->pcie_rreg = &amdgpu_device_indirect_rreg;
256 	adev->pcie_wreg = &amdgpu_device_indirect_wreg;
257 	adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
258 	adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
259 	adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
260 	adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
261 	adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
262 	adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
263 	adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
264 	adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
265 	adev->uvd_ctx_rreg = NULL;
266 	adev->uvd_ctx_wreg = NULL;
267 	adev->didt_rreg = NULL;
268 	adev->didt_wreg = NULL;
269 
270 	adev->asic_funcs = &soc_v1_0_asic_funcs;
271 
272 	adev->rev_id = amdgpu_device_get_rev_id(adev);
273 	adev->external_rev_id = 0xff;
274 
275 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
276 	case IP_VERSION(12, 1, 0):
277 		adev->cg_flags = 0;
278 		adev->pg_flags = 0;
279 		adev->external_rev_id = adev->rev_id + 0x50;
280 		break;
281 	default:
282 		/* FIXME: not supported yet */
283 		return -EINVAL;
284 	}
285 
286 	return 0;
287 }
288 
289 static int soc_v1_0_common_late_init(struct amdgpu_ip_block *ip_block)
290 {
291 	struct amdgpu_device *adev = ip_block->adev;
292 
293 	/* Enable selfring doorbell aperture late because doorbell BAR
294 	 * aperture will change if resize BAR successfully in gmc sw_init.
295 	 */
296 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
297 
298 	return 0;
299 }
300 
301 static int soc_v1_0_common_sw_init(struct amdgpu_ip_block *ip_block)
302 {
303 	return 0;
304 }
305 
306 static int soc_v1_0_common_hw_init(struct amdgpu_ip_block *ip_block)
307 {
308 	struct amdgpu_device *adev = ip_block->adev;
309 
310 	/* enable the doorbell aperture */
311 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
312 
313 	return 0;
314 }
315 
316 static int soc_v1_0_common_hw_fini(struct amdgpu_ip_block *ip_block)
317 {
318 	struct amdgpu_device *adev = ip_block->adev;
319 
320 	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
321 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
322 
323 	return 0;
324 }
325 
326 static int soc_v1_0_common_suspend(struct amdgpu_ip_block *ip_block)
327 {
328 	return soc_v1_0_common_hw_fini(ip_block);
329 }
330 
331 static int soc_v1_0_common_resume(struct amdgpu_ip_block *ip_block)
332 {
333 	return soc_v1_0_common_hw_init(ip_block);
334 }
335 
336 static bool soc_v1_0_common_is_idle(struct amdgpu_ip_block *ip_block)
337 {
338 	return true;
339 }
340 
341 static int soc_v1_0_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
342 						 enum amd_clockgating_state state)
343 {
344 	return 0;
345 }
346 
347 static int soc_v1_0_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
348 						 enum amd_powergating_state state)
349 {
350 	return 0;
351 }
352 
353 static void soc_v1_0_common_get_clockgating_state(struct amdgpu_ip_block *ip_block,
354 						  u64 *flags)
355 {
356 	return;
357 }
358 
359 static const struct amd_ip_funcs soc_v1_0_common_ip_funcs = {
360 	.name = "soc_v1_0_common",
361 	.early_init = soc_v1_0_common_early_init,
362 	.late_init = soc_v1_0_common_late_init,
363 	.sw_init = soc_v1_0_common_sw_init,
364 	.hw_init = soc_v1_0_common_hw_init,
365 	.hw_fini = soc_v1_0_common_hw_fini,
366 	.suspend = soc_v1_0_common_suspend,
367 	.resume = soc_v1_0_common_resume,
368 	.is_idle = soc_v1_0_common_is_idle,
369 	.set_clockgating_state = soc_v1_0_common_set_clockgating_state,
370 	.set_powergating_state = soc_v1_0_common_set_powergating_state,
371 	.get_clockgating_state = soc_v1_0_common_get_clockgating_state,
372 };
373 
374 const struct amdgpu_ip_block_version soc_v1_0_common_ip_block = {
375 	.type = AMD_IP_BLOCK_TYPE_COMMON,
376 	.major = 1,
377 	.minor = 0,
378 	.rev = 0,
379 	.funcs = &soc_v1_0_common_ip_funcs,
380 };
381 
382 static enum amdgpu_gfx_partition __soc_v1_0_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
383 {
384 	struct amdgpu_device *adev = xcp_mgr->adev;
385 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
386 
387 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
388 	if (adev->gfx.funcs &&
389 	    adev->gfx.funcs->get_xccs_per_xcp)
390 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
391 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
392 		mode = num_xcc / num_xcc_per_xcp;
393 
394 	if (num_xcc_per_xcp == 1)
395 		return AMDGPU_CPX_PARTITION_MODE;
396 
397 	switch (mode) {
398 	case 1:
399 		return AMDGPU_SPX_PARTITION_MODE;
400 	case 2:
401 		return AMDGPU_DPX_PARTITION_MODE;
402 	case 3:
403 		return AMDGPU_TPX_PARTITION_MODE;
404 	case 4:
405 		return AMDGPU_QPX_PARTITION_MODE;
406 	default:
407 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
408 	}
409 
410 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
411 }
412 
413 static int soc_v1_0_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
414 {
415 	enum amdgpu_gfx_partition derv_mode, mode;
416 	struct amdgpu_device *adev = xcp_mgr->adev;
417 
418 	mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
419 	derv_mode = __soc_v1_0_calc_xcp_mode(xcp_mgr);
420 
421 	if (amdgpu_sriov_vf(adev) || !adev->psp.funcs)
422 		return derv_mode;
423 
424 	if (adev->nbio.funcs &&
425 	    adev->nbio.funcs->get_compute_partition_mode) {
426 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
427 		if (mode != derv_mode)
428 			dev_warn(adev->dev,
429 				 "Mismatch in compute partition mode - reported : %d derived : %d",
430 				 mode, derv_mode);
431 	}
432 
433 	return mode;
434 }
435 
436 static int __soc_v1_0_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
437 {
438 	int num_xcc, num_xcc_per_xcp = 0;
439 
440 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
441 
442 	switch (mode) {
443 	case AMDGPU_SPX_PARTITION_MODE:
444 		num_xcc_per_xcp = num_xcc;
445 		break;
446 	case AMDGPU_DPX_PARTITION_MODE:
447 		num_xcc_per_xcp = num_xcc / 2;
448 		break;
449 	case AMDGPU_TPX_PARTITION_MODE:
450 		num_xcc_per_xcp = num_xcc / 3;
451 		break;
452 	case AMDGPU_QPX_PARTITION_MODE:
453 		num_xcc_per_xcp = num_xcc / 4;
454 		break;
455 	case AMDGPU_CPX_PARTITION_MODE:
456 		num_xcc_per_xcp = 1;
457 		break;
458 	}
459 
460 	return num_xcc_per_xcp;
461 }
462 
463 static int __soc_v1_0_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
464 				      enum AMDGPU_XCP_IP_BLOCK ip_id,
465 				      struct amdgpu_xcp_ip *ip)
466 {
467 	struct amdgpu_device *adev = xcp_mgr->adev;
468 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
469 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
470 
471 	num_sdma = adev->sdma.num_instances;
472 	num_vcn = adev->vcn.num_vcn_inst;
473 	num_shared_vcn = 1;
474 
475 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
476 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
477 
478 	switch (xcp_mgr->mode) {
479 	case AMDGPU_SPX_PARTITION_MODE:
480 	case AMDGPU_DPX_PARTITION_MODE:
481 	case AMDGPU_TPX_PARTITION_MODE:
482 	case AMDGPU_QPX_PARTITION_MODE:
483 	case AMDGPU_CPX_PARTITION_MODE:
484 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
485 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
486 		break;
487 	default:
488 		return -EINVAL;
489 	}
490 
491 	if (num_vcn && num_xcp > num_vcn)
492 		num_shared_vcn = num_xcp / num_vcn;
493 
494 	switch (ip_id) {
495 	case AMDGPU_XCP_GFXHUB:
496 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
497 		ip->ip_funcs = &gfxhub_v12_1_xcp_funcs;
498 		break;
499 	case AMDGPU_XCP_GFX:
500 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
501 		ip->ip_funcs = &gfx_v12_1_xcp_funcs;
502 		break;
503 	case AMDGPU_XCP_SDMA:
504 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
505 		ip->ip_funcs = &sdma_v7_1_xcp_funcs;
506 		break;
507 	case AMDGPU_XCP_VCN:
508 		ip->inst_mask =
509 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
510 		/* TODO : Assign IP funcs */
511 		break;
512 	default:
513 		return -EINVAL;
514 	}
515 
516 	ip->ip_id = ip_id;
517 
518 	return 0;
519 }
520 
521 static int soc_v1_0_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
522 				     int mode,
523 				     struct amdgpu_xcp_cfg *xcp_cfg)
524 {
525 	struct amdgpu_device *adev = xcp_mgr->adev;
526 	int max_res[AMDGPU_XCP_RES_MAX] = {};
527 	bool res_lt_xcp;
528 	int num_xcp, i;
529 	u16 nps_modes;
530 
531 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
532 		return -EINVAL;
533 
534 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
535 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
536 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
537 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
538 
539 	switch (mode) {
540 	case AMDGPU_SPX_PARTITION_MODE:
541 		num_xcp = 1;
542 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
543 		break;
544 	case AMDGPU_DPX_PARTITION_MODE:
545 		num_xcp = 2;
546 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
547 		break;
548 	case AMDGPU_TPX_PARTITION_MODE:
549 		num_xcp = 3;
550 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
551 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
552 		break;
553 	case AMDGPU_QPX_PARTITION_MODE:
554 		num_xcp = 4;
555 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
556 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
557 		break;
558 	case AMDGPU_CPX_PARTITION_MODE:
559 		num_xcp = NUM_XCC(adev->gfx.xcc_mask);
560 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
561 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
562 		break;
563 	default:
564 		return -EINVAL;
565 	}
566 
567 	xcp_cfg->compatible_nps_modes =
568 		(adev->gmc.supported_nps_modes & nps_modes);
569 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
570 
571 	for (i = 0; i < xcp_cfg->num_res; i++) {
572 		res_lt_xcp = max_res[i] < num_xcp;
573 		xcp_cfg->xcp_res[i].id = i;
574 		xcp_cfg->xcp_res[i].num_inst =
575 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
576 		xcp_cfg->xcp_res[i].num_inst =
577 			i == AMDGPU_XCP_RES_JPEG ?
578 			xcp_cfg->xcp_res[i].num_inst *
579 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
580 		xcp_cfg->xcp_res[i].num_shared =
581 			res_lt_xcp ? num_xcp / max_res[i] : 1;
582 	}
583 
584 	return 0;
585 }
586 
587 static enum amdgpu_gfx_partition __soc_v1_0_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
588 {
589 	struct amdgpu_device *adev = xcp_mgr->adev;
590 	int num_xcc;
591 
592 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
593 
594 	if (adev->gmc.num_mem_partitions == 1)
595 		return AMDGPU_SPX_PARTITION_MODE;
596 
597 	if (adev->gmc.num_mem_partitions == num_xcc)
598 		return AMDGPU_CPX_PARTITION_MODE;
599 
600 	if (adev->gmc.num_mem_partitions == 2)
601 		return AMDGPU_DPX_PARTITION_MODE;
602 
603 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
604 }
605 
606 static bool __soc_v1_0_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
607 				     enum amdgpu_gfx_partition mode)
608 {
609 	struct amdgpu_device *adev = xcp_mgr->adev;
610 	int num_xcc, num_xccs_per_xcp;
611 
612 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
613 	switch (mode) {
614 	case AMDGPU_SPX_PARTITION_MODE:
615 		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
616 	case AMDGPU_DPX_PARTITION_MODE:
617 		return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
618 	case AMDGPU_TPX_PARTITION_MODE:
619 		return (adev->gmc.num_mem_partitions == 1 ||
620 			adev->gmc.num_mem_partitions == 3) &&
621 		       ((num_xcc % 3) == 0);
622 	case AMDGPU_QPX_PARTITION_MODE:
623 		num_xccs_per_xcp = num_xcc / 4;
624 		return (adev->gmc.num_mem_partitions == 1 ||
625 			adev->gmc.num_mem_partitions == 4) &&
626 		       (num_xccs_per_xcp >= 2);
627 	case AMDGPU_CPX_PARTITION_MODE:
628 		/* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
629 		 * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
630 		 * num_compute_partitions can't be less than num_mem_partitions
631 		 */
632 		return ((num_xcc > 1) &&
633 		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
634 	default:
635 		return false;
636 	}
637 
638 	return false;
639 }
640 
641 static void __soc_v1_0_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
642 {
643 	int mode;
644 
645 	xcp_mgr->avail_xcp_modes = 0;
646 
647 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
648 		if (__soc_v1_0_is_valid_mode(xcp_mgr, mode))
649 			xcp_mgr->avail_xcp_modes |= BIT(mode);
650 	}
651 }
652 
653 static int soc_v1_0_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
654 					  int mode, int *num_xcps)
655 {
656 	int num_xcc_per_xcp, num_xcc, ret;
657 	struct amdgpu_device *adev;
658 	u32 flags = 0;
659 
660 	adev = xcp_mgr->adev;
661 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
662 
663 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
664 		mode = __soc_v1_0_get_auto_mode(xcp_mgr);
665 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
666 			dev_err(adev->dev,
667 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
668 				adev->gmc.num_mem_partitions);
669 			return -EINVAL;
670 		}
671 	} else if (!__soc_v1_0_is_valid_mode(xcp_mgr, mode)) {
672 		dev_err(adev->dev,
673 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
674 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
675 		return -EINVAL;
676 	}
677 
678 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
679 		flags |= AMDGPU_XCP_OPS_KFD;
680 
681 	if (flags & AMDGPU_XCP_OPS_KFD) {
682 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
683 		if (ret)
684 			goto out;
685 	}
686 
687 	ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
688 	if (ret)
689 		goto unlock;
690 
691 	num_xcc_per_xcp = __soc_v1_0_get_xcc_per_xcp(xcp_mgr, mode);
692 	if (adev->gfx.imu.funcs &&
693 	    adev->gfx.imu.funcs->switch_compute_partition) {
694 		ret = adev->gfx.imu.funcs->switch_compute_partition(xcp_mgr->adev, num_xcc_per_xcp, mode);
695 		if (ret)
696 			goto out;
697 	}
698 	if (adev->gfx.imu.funcs &&
699 	    adev->gfx.imu.funcs->init_mcm_addr_lut &&
700 	    amdgpu_emu_mode)
701 		adev->gfx.imu.funcs->init_mcm_addr_lut(adev);
702 
703 	/* Init info about new xcps */
704 	*num_xcps = num_xcc / num_xcc_per_xcp;
705 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
706 
707 	ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
708 	if (!ret)
709 		__soc_v1_0_update_available_partition_mode(xcp_mgr);
710 unlock:
711 	if (flags & AMDGPU_XCP_OPS_KFD)
712 		amdgpu_amdkfd_unlock_kfd(adev);
713 out:
714 	return ret;
715 }
716 
717 #ifdef HAVE_ACPI_DEV_GET_FIRST_MATCH_DEV
718 static int __soc_v1_0_get_xcp_mem_id(struct amdgpu_device *adev,
719 				     int xcc_id, uint8_t *mem_id)
720 {
721 	/* memory/spatial modes validation check is already done */
722 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
723 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
724 
725 	return 0;
726 }
727 
728 static int soc_v1_0_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
729 				   struct amdgpu_xcp *xcp, uint8_t *mem_id)
730 {
731 	struct amdgpu_numa_info numa_info;
732 	struct amdgpu_device *adev;
733 	uint32_t xcc_mask;
734 	int r, i, xcc_id;
735 
736 	adev = xcp_mgr->adev;
737 	/* TODO: BIOS is not returning the right info now
738 	 * Check on this later
739 	 */
740 	/*
741 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
742 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
743 	*/
744 	if (adev->gmc.num_mem_partitions == 1) {
745 		/* Only one range */
746 		*mem_id = 0;
747 		return 0;
748 	}
749 
750 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
751 	if (r || !xcc_mask)
752 		return -EINVAL;
753 
754 	xcc_id = ffs(xcc_mask) - 1;
755 	if (!adev->gmc.is_app_apu)
756 		return __soc_v1_0_get_xcp_mem_id(adev, xcc_id, mem_id);
757 
758 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
759 
760 	if (r)
761 		return r;
762 
763 	r = -EINVAL;
764 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
765 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
766 			*mem_id = i;
767 			r = 0;
768 			break;
769 		}
770 	}
771 
772 	return r;
773 }
774 #endif
775 
776 static int soc_v1_0_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
777 				       enum AMDGPU_XCP_IP_BLOCK ip_id,
778 				       struct amdgpu_xcp_ip *ip)
779 {
780 	if (!ip)
781 		return -EINVAL;
782 
783 	return __soc_v1_0_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
784 }
785 
786 struct amdgpu_xcp_mgr_funcs soc_v1_0_xcp_funcs = {
787 	.switch_partition_mode = &soc_v1_0_switch_partition_mode,
788 	.query_partition_mode = &soc_v1_0_query_partition_mode,
789 	.get_ip_details = &soc_v1_0_get_xcp_ip_details,
790 	.get_xcp_res_info = &soc_v1_0_get_xcp_res_info,
791 #ifdef HAVE_ACPI_DEV_GET_FIRST_MATCH_DEV
792 	.get_xcp_mem_id = &soc_v1_0_get_xcp_mem_id,
793 #endif
794 };
795 
796 static int soc_v1_0_xcp_mgr_init(struct amdgpu_device *adev)
797 {
798 	int ret;
799 
800 	if (amdgpu_sriov_vf(adev))
801 		soc_v1_0_xcp_funcs.switch_partition_mode = NULL;
802 
803 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE,
804 				  1, &soc_v1_0_xcp_funcs);
805 	if (ret)
806 		return ret;
807 
808 	amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
809 	/* TODO: Default memory node affinity init */
810 
811 	return ret;
812 }
813 
814 int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
815 {
816 	int ret, i;
817 	int xcc_inst_per_aid = 4;
818 	uint16_t xcc_mask;
819 
820 	xcc_mask = adev->gfx.xcc_mask;
821 	adev->aid_mask = 0;
822 	for (i = 0; xcc_mask; xcc_mask >>= xcc_inst_per_aid, i++) {
823 		if (xcc_mask & ((1U << xcc_inst_per_aid) - 1))
824 			adev->aid_mask |= (1 << i);
825 	}
826 
827 	adev->sdma.num_inst_per_xcc = 2;
828 	adev->sdma.num_instances =
829 		NUM_XCC(adev->gfx.xcc_mask) * adev->sdma.num_inst_per_xcc;
830 	adev->sdma.sdma_mask =
831 		GENMASK(adev->sdma.num_instances - 1, 0);
832 
833 	ret = soc_v1_0_xcp_mgr_init(adev);
834 	if (ret)
835 		return ret;
836 
837 	amdgpu_ip_map_init(adev);
838 
839 	return 0;
840 }
841 
842 bool soc_v1_0_normalize_xcc_reg_range(uint32_t reg)
843 {
844 	if (((reg >= XCC_REG_RANGE_0_LOW) && (reg < XCC_REG_RANGE_0_HIGH)) ||
845 	    ((reg >= XCC_REG_RANGE_1_LOW) && (reg < XCC_REG_RANGE_1_HIGH)))
846 		return true;
847 	else
848 		return false;
849 }
850 
851 uint32_t soc_v1_0_normalize_xcc_reg_offset(uint32_t reg)
852 {
853 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
854 
855 	/* If it is an XCC reg, normalize the reg to keep
856 	 * lower 16 bits in local xcc */
857 
858 	if (soc_v1_0_normalize_xcc_reg_range(normalized_reg))
859 		return normalized_reg;
860 	else
861 		return reg;
862 }
863