xref: /linux/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c (revision f9f0b4a1f35d39a1a2a2f8ec46eb7b81efc70a63)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 #include "soc15_common.h"
26 #include "soc_v1_0.h"
27 #include "amdgpu_ip.h"
28 #include "amdgpu_imu.h"
29 #include "gfxhub_v12_1.h"
30 #include "sdma_v7_1.h"
31 #include "gfx_v12_1.h"
32 
33 #include "gc/gc_12_1_0_offset.h"
34 #include "gc/gc_12_1_0_sh_mask.h"
35 #include "mp/mp_15_0_8_offset.h"
36 
37 #define XCC_REG_RANGE_0_LOW  0x1260     /* XCC gfxdec0 lower Bound */
38 #define XCC_REG_RANGE_0_HIGH 0x3C00     /* XCC gfxdec0 upper Bound */
39 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
40 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
41 #define NORMALIZE_XCC_REG_OFFSET(offset) \
42 	(offset & 0xFFFF)
43 
44 /* Initialized doorbells for amdgpu including multimedia
45  * KFD can use all the rest in 2M doorbell bar */
46 static void soc_v1_0_doorbell_index_init(struct amdgpu_device *adev)
47 {
48 	int i;
49 
50 	adev->doorbell_index.kiq = AMDGPU_SOC_V1_0_DOORBELL_KIQ_START;
51 
52 	adev->doorbell_index.mec_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MEC_RING_START;
53 	adev->doorbell_index.mes_ring0 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING0;
54 	adev->doorbell_index.mes_ring1 = AMDGPU_SOC_V1_0_DOORBELL_MES_RING1;
55 
56 	adev->doorbell_index.userqueue_start = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_START;
57 	adev->doorbell_index.userqueue_end = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_END;
58 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_SOC_V1_0_DOORBELL_XCC_RANGE;
59 
60 	adev->doorbell_index.sdma_doorbell_range = 20;
61 	for (i = 0; i < adev->sdma.num_instances; i++)
62 		adev->doorbell_index.sdma_engine[i] =
63 			AMDGPU_SOC_V1_0_DOORBELL_sDMA_ENGINE_START +
64 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
65 
66 	adev->doorbell_index.ih = AMDGPU_SOC_V1_0_DOORBELL_IH;
67 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_SOC_V1_0_DOORBELL_VCN_START;
68 
69 	adev->doorbell_index.first_non_cp = AMDGPU_SOC_V1_0_DOORBELL_FIRST_NON_CP;
70 	adev->doorbell_index.last_non_cp = AMDGPU_SOC_V1_0_DOORBELL_LAST_NON_CP;
71 
72 	adev->doorbell_index.max_assignment = AMDGPU_SOC_V1_0_DOORBELL_MAX_ASSIGNMENT << 1;
73 }
74 
75 /* Fixed pattern for upper 32bits smn addressing.
76  *   bit[47:40]: Socket ID
77  *   bit[39:34]: Die ID
78  *   bit[32]: local or remote die in same socket
79  * The ext_id is comprised of socket_id and die_id.
80  *   ext_id = (socket_id << 6) | (die_id)
81 */
82 u64 soc_v1_0_encode_ext_smn_addressing(int ext_id)
83 {
84 	u64 ext_offset;
85 	int socket_id, die_id;
86 
87 	/* local die routing for MID0 on local socket */
88 	if (ext_id == 0)
89 		return 0;
90 
91 	die_id = ext_id & 0x3;
92 	socket_id = (ext_id >> 6) & 0xff;
93 
94 	/* Initiated from host, accessing to non-MID0 is cross-die traffic */
95 	if (socket_id == 0)
96 		ext_offset = ((u64)die_id << 34) | (1ULL << 32);
97 	else if (socket_id != 0 && die_id != 0)
98 		ext_offset = ((u64)socket_id << 40) | ((u64)die_id << 34) |
99 				(3ULL << 32);
100 	else
101 		ext_offset = ((u64)socket_id << 40) | (1ULL << 33);
102 
103 	return ext_offset;
104 }
105 
106 static u32 soc_v1_0_get_config_memsize(struct amdgpu_device *adev)
107 {
108 	return adev->nbio.funcs->get_memsize(adev);
109 }
110 
111 static u32 soc_v1_0_get_xclk(struct amdgpu_device *adev)
112 {
113 	return adev->clock.spll.reference_freq;
114 }
115 
116 void soc_v1_0_grbm_select(struct amdgpu_device *adev,
117 			  u32 me, u32 pipe,
118 			  u32 queue, u32 vmid,
119 			  int xcc_id)
120 {
121 	u32 grbm_gfx_cntl = 0;
122 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
123 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
124 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
125 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
126 
127 	WREG32_SOC15_RLC_SHADOW(GC, xcc_id, regGRBM_GFX_CNTL, grbm_gfx_cntl);
128 }
129 
130 static struct soc15_allowed_register_entry soc_v1_0_allowed_read_registers[] = {
131 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS) },
132 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2) },
133 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS3) },
134 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0) },
135 	{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1) },
136 	{ SOC15_REG_ENTRY(GC, 0, regCP_STAT) },
137 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1) },
138 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2) },
139 	{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3) },
140 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT) },
141 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1) },
142 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS) },
143 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT) },
144 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1) },
145 	{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS) },
146 	{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG_1) },
147 };
148 
149 static uint32_t soc_v1_0_read_indexed_register(struct amdgpu_device *adev,
150 					       u32 se_num,
151 					       u32 sh_num,
152 					       u32 reg_offset)
153 {
154 	uint32_t val;
155 
156 	mutex_lock(&adev->grbm_idx_mutex);
157 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
158 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
159 
160 	val = RREG32(reg_offset);
161 
162 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
163 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
164 	mutex_unlock(&adev->grbm_idx_mutex);
165 	return val;
166 }
167 
168 static uint32_t soc_v1_0_get_register_value(struct amdgpu_device *adev,
169 					    bool indexed, u32 se_num,
170 					    u32 sh_num, u32 reg_offset)
171 {
172 	if (indexed) {
173 		return soc_v1_0_read_indexed_register(adev, se_num, sh_num, reg_offset);
174 	} else {
175 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG_1) &&
176 		    adev->gfx.config.gb_addr_config)
177 			return adev->gfx.config.gb_addr_config;
178 		return RREG32(reg_offset);
179 	}
180 }
181 
182 static int soc_v1_0_read_register(struct amdgpu_device *adev,
183 				  u32 se_num, u32 sh_num,
184 				  u32 reg_offset, u32 *value)
185 {
186 	uint32_t i;
187 	struct soc15_allowed_register_entry  *en;
188 
189 	*value = 0;
190 	for (i = 0; i < ARRAY_SIZE(soc_v1_0_allowed_read_registers); i++) {
191 		en = &soc_v1_0_allowed_read_registers[i];
192 		if (!adev->reg_offset[en->hwip][en->inst])
193 			continue;
194 		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
195 					+ en->reg_offset))
196 			continue;
197 
198 		*value = soc_v1_0_get_register_value(adev,
199 				soc_v1_0_allowed_read_registers[i].grbm_indexed,
200 				se_num, sh_num, reg_offset);
201 		return 0;
202 	}
203 	return -EINVAL;
204 }
205 
206 static bool soc_v1_0_need_full_reset(struct amdgpu_device *adev)
207 {
208 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
209 	case IP_VERSION(12, 1, 0):
210 	default:
211 		return true;
212 	}
213 }
214 
215 static bool soc_v1_0_need_reset_on_init(struct amdgpu_device *adev)
216 {
217 	u32 sol_reg;
218 
219 	if (adev->flags & AMD_IS_APU)
220 		return false;
221 
222 	/* Check sOS sign of life register to confirm sys driver and sOS
223 	 * are already been loaded.
224 	 */
225 	sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
226 	if (sol_reg)
227 		return true;
228 
229 	return false;
230 }
231 
232 static int soc_v1_0_asic_reset(struct amdgpu_device *adev)
233 {
234 	return 0;
235 }
236 
237 static const struct amdgpu_asic_funcs soc_v1_0_asic_funcs = {
238 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
239 	.read_register = &soc_v1_0_read_register,
240 	.get_config_memsize = &soc_v1_0_get_config_memsize,
241 	.get_xclk = &soc_v1_0_get_xclk,
242 	.need_full_reset = &soc_v1_0_need_full_reset,
243 	.init_doorbell_index = &soc_v1_0_doorbell_index_init,
244 	.need_reset_on_init = &soc_v1_0_need_reset_on_init,
245 	.encode_ext_smn_addressing = &soc_v1_0_encode_ext_smn_addressing,
246 	.reset = soc_v1_0_asic_reset,
247 };
248 
249 static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
250 {
251 	struct amdgpu_device *adev = ip_block->adev;
252 
253 	adev->reg.pcie.rreg = &amdgpu_device_indirect_rreg;
254 	adev->reg.pcie.wreg = &amdgpu_device_indirect_wreg;
255 	adev->reg.pcie.rreg_ext = &amdgpu_device_indirect_rreg_ext;
256 	adev->reg.pcie.wreg_ext = &amdgpu_device_indirect_wreg_ext;
257 	adev->reg.pcie.rreg64 = &amdgpu_device_indirect_rreg64;
258 	adev->reg.pcie.wreg64 = &amdgpu_device_indirect_wreg64;
259 	adev->reg.pcie.port_rreg = &amdgpu_device_pcie_port_rreg;
260 	adev->reg.pcie.port_wreg = &amdgpu_device_pcie_port_wreg;
261 	adev->reg.pcie.rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
262 	adev->reg.pcie.wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
263 
264 	adev->asic_funcs = &soc_v1_0_asic_funcs;
265 
266 	adev->rev_id = amdgpu_device_get_rev_id(adev);
267 	adev->external_rev_id = 0xff;
268 
269 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
270 	case IP_VERSION(12, 1, 0):
271 		adev->cg_flags = 0;
272 		adev->pg_flags = 0;
273 		adev->external_rev_id = adev->rev_id + 0x50;
274 		break;
275 	default:
276 		/* FIXME: not supported yet */
277 		return -EINVAL;
278 	}
279 
280 	return 0;
281 }
282 
283 static int soc_v1_0_common_late_init(struct amdgpu_ip_block *ip_block)
284 {
285 	struct amdgpu_device *adev = ip_block->adev;
286 
287 	/* Enable selfring doorbell aperture late because doorbell BAR
288 	 * aperture will change if resize BAR successfully in gmc sw_init.
289 	 */
290 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
291 
292 	return 0;
293 }
294 
295 static int soc_v1_0_common_sw_init(struct amdgpu_ip_block *ip_block)
296 {
297 	return 0;
298 }
299 
300 static int soc_v1_0_common_hw_init(struct amdgpu_ip_block *ip_block)
301 {
302 	struct amdgpu_device *adev = ip_block->adev;
303 
304 	/* enable the doorbell aperture */
305 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
306 
307 	return 0;
308 }
309 
310 static int soc_v1_0_common_hw_fini(struct amdgpu_ip_block *ip_block)
311 {
312 	struct amdgpu_device *adev = ip_block->adev;
313 
314 	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
315 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
316 
317 	return 0;
318 }
319 
320 static int soc_v1_0_common_suspend(struct amdgpu_ip_block *ip_block)
321 {
322 	return soc_v1_0_common_hw_fini(ip_block);
323 }
324 
325 static int soc_v1_0_common_resume(struct amdgpu_ip_block *ip_block)
326 {
327 	return soc_v1_0_common_hw_init(ip_block);
328 }
329 
330 static bool soc_v1_0_common_is_idle(struct amdgpu_ip_block *ip_block)
331 {
332 	return true;
333 }
334 
335 static int soc_v1_0_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
336 						 enum amd_clockgating_state state)
337 {
338 	return 0;
339 }
340 
341 static int soc_v1_0_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
342 						 enum amd_powergating_state state)
343 {
344 	return 0;
345 }
346 
347 static void soc_v1_0_common_get_clockgating_state(struct amdgpu_ip_block *ip_block,
348 						  u64 *flags)
349 {
350 	return;
351 }
352 
353 static const struct amd_ip_funcs soc_v1_0_common_ip_funcs = {
354 	.name = "soc_v1_0_common",
355 	.early_init = soc_v1_0_common_early_init,
356 	.late_init = soc_v1_0_common_late_init,
357 	.sw_init = soc_v1_0_common_sw_init,
358 	.hw_init = soc_v1_0_common_hw_init,
359 	.hw_fini = soc_v1_0_common_hw_fini,
360 	.suspend = soc_v1_0_common_suspend,
361 	.resume = soc_v1_0_common_resume,
362 	.is_idle = soc_v1_0_common_is_idle,
363 	.set_clockgating_state = soc_v1_0_common_set_clockgating_state,
364 	.set_powergating_state = soc_v1_0_common_set_powergating_state,
365 	.get_clockgating_state = soc_v1_0_common_get_clockgating_state,
366 };
367 
368 const struct amdgpu_ip_block_version soc_v1_0_common_ip_block = {
369 	.type = AMD_IP_BLOCK_TYPE_COMMON,
370 	.major = 1,
371 	.minor = 0,
372 	.rev = 0,
373 	.funcs = &soc_v1_0_common_ip_funcs,
374 };
375 
376 static enum amdgpu_gfx_partition __soc_v1_0_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
377 {
378 	struct amdgpu_device *adev = xcp_mgr->adev;
379 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
380 
381 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
382 	if (adev->gfx.funcs &&
383 	    adev->gfx.funcs->get_xccs_per_xcp)
384 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
385 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
386 		mode = num_xcc / num_xcc_per_xcp;
387 
388 	if (num_xcc_per_xcp == 1)
389 		return AMDGPU_CPX_PARTITION_MODE;
390 
391 	switch (mode) {
392 	case 1:
393 		return AMDGPU_SPX_PARTITION_MODE;
394 	case 2:
395 		return AMDGPU_DPX_PARTITION_MODE;
396 	case 3:
397 		return AMDGPU_TPX_PARTITION_MODE;
398 	case 4:
399 		return AMDGPU_QPX_PARTITION_MODE;
400 	default:
401 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
402 	}
403 
404 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
405 }
406 
407 static int soc_v1_0_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
408 {
409 	enum amdgpu_gfx_partition derv_mode, mode;
410 	struct amdgpu_device *adev = xcp_mgr->adev;
411 
412 	mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
413 	derv_mode = __soc_v1_0_calc_xcp_mode(xcp_mgr);
414 
415 	if (amdgpu_sriov_vf(adev) || !adev->psp.funcs)
416 		return derv_mode;
417 
418 	if (adev->nbio.funcs &&
419 	    adev->nbio.funcs->get_compute_partition_mode) {
420 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
421 		if (mode != derv_mode)
422 			dev_warn(adev->dev,
423 				 "Mismatch in compute partition mode - reported : %d derived : %d",
424 				 mode, derv_mode);
425 	}
426 
427 	return mode;
428 }
429 
430 static int __soc_v1_0_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
431 {
432 	int num_xcc, num_xcc_per_xcp = 0;
433 
434 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
435 
436 	switch (mode) {
437 	case AMDGPU_SPX_PARTITION_MODE:
438 		num_xcc_per_xcp = num_xcc;
439 		break;
440 	case AMDGPU_DPX_PARTITION_MODE:
441 		num_xcc_per_xcp = num_xcc / 2;
442 		break;
443 	case AMDGPU_TPX_PARTITION_MODE:
444 		num_xcc_per_xcp = num_xcc / 3;
445 		break;
446 	case AMDGPU_QPX_PARTITION_MODE:
447 		num_xcc_per_xcp = num_xcc / 4;
448 		break;
449 	case AMDGPU_CPX_PARTITION_MODE:
450 		num_xcc_per_xcp = 1;
451 		break;
452 	}
453 
454 	return num_xcc_per_xcp;
455 }
456 
457 static int __soc_v1_0_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
458 				      enum AMDGPU_XCP_IP_BLOCK ip_id,
459 				      struct amdgpu_xcp_ip *ip)
460 {
461 	struct amdgpu_device *adev = xcp_mgr->adev;
462 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
463 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
464 
465 	num_sdma = adev->sdma.num_instances;
466 	num_vcn = adev->vcn.num_vcn_inst;
467 	num_shared_vcn = 1;
468 
469 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
470 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
471 
472 	switch (xcp_mgr->mode) {
473 	case AMDGPU_SPX_PARTITION_MODE:
474 	case AMDGPU_DPX_PARTITION_MODE:
475 	case AMDGPU_TPX_PARTITION_MODE:
476 	case AMDGPU_QPX_PARTITION_MODE:
477 	case AMDGPU_CPX_PARTITION_MODE:
478 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
479 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
480 		break;
481 	default:
482 		return -EINVAL;
483 	}
484 
485 	if (num_vcn && num_xcp > num_vcn)
486 		num_shared_vcn = num_xcp / num_vcn;
487 
488 	switch (ip_id) {
489 	case AMDGPU_XCP_GFXHUB:
490 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
491 		ip->ip_funcs = &gfxhub_v12_1_xcp_funcs;
492 		break;
493 	case AMDGPU_XCP_GFX:
494 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
495 		ip->ip_funcs = &gfx_v12_1_xcp_funcs;
496 		break;
497 	case AMDGPU_XCP_SDMA:
498 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
499 		ip->ip_funcs = &sdma_v7_1_xcp_funcs;
500 		break;
501 	case AMDGPU_XCP_VCN:
502 		ip->inst_mask =
503 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
504 		/* TODO : Assign IP funcs */
505 		break;
506 	default:
507 		return -EINVAL;
508 	}
509 
510 	ip->ip_id = ip_id;
511 
512 	return 0;
513 }
514 
515 static int soc_v1_0_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
516 				     int mode,
517 				     struct amdgpu_xcp_cfg *xcp_cfg)
518 {
519 	struct amdgpu_device *adev = xcp_mgr->adev;
520 	int max_res[AMDGPU_XCP_RES_MAX] = {};
521 	bool res_lt_xcp;
522 	int num_xcp, i;
523 	u16 nps_modes;
524 
525 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
526 		return -EINVAL;
527 
528 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
529 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
530 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
531 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
532 
533 	switch (mode) {
534 	case AMDGPU_SPX_PARTITION_MODE:
535 		num_xcp = 1;
536 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
537 		break;
538 	case AMDGPU_DPX_PARTITION_MODE:
539 		num_xcp = 2;
540 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
541 		break;
542 	case AMDGPU_TPX_PARTITION_MODE:
543 		num_xcp = 3;
544 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
545 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
546 		break;
547 	case AMDGPU_QPX_PARTITION_MODE:
548 		num_xcp = 4;
549 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
550 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
551 		break;
552 	case AMDGPU_CPX_PARTITION_MODE:
553 		num_xcp = NUM_XCC(adev->gfx.xcc_mask);
554 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
555 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
556 		break;
557 	default:
558 		return -EINVAL;
559 	}
560 
561 	xcp_cfg->compatible_nps_modes =
562 		(adev->gmc.supported_nps_modes & nps_modes);
563 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
564 
565 	for (i = 0; i < xcp_cfg->num_res; i++) {
566 		res_lt_xcp = max_res[i] < num_xcp;
567 		xcp_cfg->xcp_res[i].id = i;
568 		xcp_cfg->xcp_res[i].num_inst =
569 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
570 		xcp_cfg->xcp_res[i].num_inst =
571 			i == AMDGPU_XCP_RES_JPEG ?
572 			xcp_cfg->xcp_res[i].num_inst *
573 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
574 		xcp_cfg->xcp_res[i].num_shared =
575 			res_lt_xcp ? num_xcp / max_res[i] : 1;
576 	}
577 
578 	return 0;
579 }
580 
581 static enum amdgpu_gfx_partition __soc_v1_0_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
582 {
583 	struct amdgpu_device *adev = xcp_mgr->adev;
584 	int num_xcc;
585 
586 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
587 
588 	if (adev->gmc.num_mem_partitions == 1)
589 		return AMDGPU_SPX_PARTITION_MODE;
590 
591 	if (adev->gmc.num_mem_partitions == num_xcc)
592 		return AMDGPU_CPX_PARTITION_MODE;
593 
594 	if (adev->gmc.num_mem_partitions == 2)
595 		return AMDGPU_DPX_PARTITION_MODE;
596 
597 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
598 }
599 
600 static bool __soc_v1_0_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
601 				     enum amdgpu_gfx_partition mode)
602 {
603 	struct amdgpu_device *adev = xcp_mgr->adev;
604 	int num_xcc, num_xccs_per_xcp;
605 
606 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
607 	switch (mode) {
608 	case AMDGPU_SPX_PARTITION_MODE:
609 		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
610 	case AMDGPU_DPX_PARTITION_MODE:
611 		return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
612 	case AMDGPU_TPX_PARTITION_MODE:
613 		return (adev->gmc.num_mem_partitions == 1 ||
614 			adev->gmc.num_mem_partitions == 3) &&
615 		       ((num_xcc % 3) == 0);
616 	case AMDGPU_QPX_PARTITION_MODE:
617 		num_xccs_per_xcp = num_xcc / 4;
618 		return (adev->gmc.num_mem_partitions == 1 ||
619 			adev->gmc.num_mem_partitions == 4) &&
620 		       (num_xccs_per_xcp >= 2);
621 	case AMDGPU_CPX_PARTITION_MODE:
622 		/* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
623 		 * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
624 		 * num_compute_partitions can't be less than num_mem_partitions
625 		 */
626 		return ((num_xcc > 1) &&
627 		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
628 	default:
629 		return false;
630 	}
631 
632 	return false;
633 }
634 
635 static void __soc_v1_0_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
636 {
637 	int mode;
638 
639 	xcp_mgr->avail_xcp_modes = 0;
640 
641 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
642 		if (__soc_v1_0_is_valid_mode(xcp_mgr, mode))
643 			xcp_mgr->avail_xcp_modes |= BIT(mode);
644 	}
645 }
646 
647 static int soc_v1_0_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
648 					  int mode, int *num_xcps)
649 {
650 	int num_xcc_per_xcp, num_xcc, ret;
651 	struct amdgpu_device *adev;
652 	u32 flags = 0;
653 
654 	adev = xcp_mgr->adev;
655 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
656 
657 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
658 		mode = __soc_v1_0_get_auto_mode(xcp_mgr);
659 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
660 			dev_err(adev->dev,
661 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
662 				adev->gmc.num_mem_partitions);
663 			return -EINVAL;
664 		}
665 	} else if (!__soc_v1_0_is_valid_mode(xcp_mgr, mode)) {
666 		dev_err(adev->dev,
667 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
668 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
669 		return -EINVAL;
670 	}
671 
672 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
673 		flags |= AMDGPU_XCP_OPS_KFD;
674 
675 	if (flags & AMDGPU_XCP_OPS_KFD) {
676 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
677 		if (ret)
678 			goto out;
679 	}
680 
681 	ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
682 	if (ret)
683 		goto unlock;
684 
685 	num_xcc_per_xcp = __soc_v1_0_get_xcc_per_xcp(xcp_mgr, mode);
686 	if (adev->gfx.imu.funcs &&
687 	    adev->gfx.imu.funcs->switch_compute_partition) {
688 		ret = adev->gfx.imu.funcs->switch_compute_partition(xcp_mgr->adev, num_xcc_per_xcp, mode);
689 		if (ret)
690 			goto out;
691 	}
692 	if (adev->gfx.imu.funcs &&
693 	    adev->gfx.imu.funcs->init_mcm_addr_lut &&
694 	    amdgpu_emu_mode)
695 		adev->gfx.imu.funcs->init_mcm_addr_lut(adev);
696 
697 	/* Init info about new xcps */
698 	*num_xcps = num_xcc / num_xcc_per_xcp;
699 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
700 
701 	ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
702 	if (!ret)
703 		__soc_v1_0_update_available_partition_mode(xcp_mgr);
704 unlock:
705 	if (flags & AMDGPU_XCP_OPS_KFD)
706 		amdgpu_amdkfd_unlock_kfd(adev);
707 out:
708 	return ret;
709 }
710 
711 #ifdef HAVE_ACPI_DEV_GET_FIRST_MATCH_DEV
712 static int __soc_v1_0_get_xcp_mem_id(struct amdgpu_device *adev,
713 				     int xcc_id, uint8_t *mem_id)
714 {
715 	/* memory/spatial modes validation check is already done */
716 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
717 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
718 
719 	return 0;
720 }
721 
722 static int soc_v1_0_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
723 				   struct amdgpu_xcp *xcp, uint8_t *mem_id)
724 {
725 	struct amdgpu_numa_info numa_info;
726 	struct amdgpu_device *adev;
727 	uint32_t xcc_mask;
728 	int r, i, xcc_id;
729 
730 	adev = xcp_mgr->adev;
731 	/* TODO: BIOS is not returning the right info now
732 	 * Check on this later
733 	 */
734 	/*
735 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
736 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
737 	*/
738 	if (adev->gmc.num_mem_partitions == 1) {
739 		/* Only one range */
740 		*mem_id = 0;
741 		return 0;
742 	}
743 
744 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
745 	if (r || !xcc_mask)
746 		return -EINVAL;
747 
748 	xcc_id = ffs(xcc_mask) - 1;
749 	if (!adev->gmc.is_app_apu)
750 		return __soc_v1_0_get_xcp_mem_id(adev, xcc_id, mem_id);
751 
752 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
753 
754 	if (r)
755 		return r;
756 
757 	r = -EINVAL;
758 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
759 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
760 			*mem_id = i;
761 			r = 0;
762 			break;
763 		}
764 	}
765 
766 	return r;
767 }
768 #endif
769 
770 static int soc_v1_0_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
771 				       enum AMDGPU_XCP_IP_BLOCK ip_id,
772 				       struct amdgpu_xcp_ip *ip)
773 {
774 	if (!ip)
775 		return -EINVAL;
776 
777 	return __soc_v1_0_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
778 }
779 
780 struct amdgpu_xcp_mgr_funcs soc_v1_0_xcp_funcs = {
781 	.switch_partition_mode = &soc_v1_0_switch_partition_mode,
782 	.query_partition_mode = &soc_v1_0_query_partition_mode,
783 	.get_ip_details = &soc_v1_0_get_xcp_ip_details,
784 	.get_xcp_res_info = &soc_v1_0_get_xcp_res_info,
785 #ifdef HAVE_ACPI_DEV_GET_FIRST_MATCH_DEV
786 	.get_xcp_mem_id = &soc_v1_0_get_xcp_mem_id,
787 #endif
788 };
789 
790 static int soc_v1_0_xcp_mgr_init(struct amdgpu_device *adev)
791 {
792 	int ret;
793 
794 	if (amdgpu_sriov_vf(adev))
795 		soc_v1_0_xcp_funcs.switch_partition_mode = NULL;
796 
797 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE,
798 				  1, &soc_v1_0_xcp_funcs);
799 	if (ret)
800 		return ret;
801 
802 	amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
803 	/* TODO: Default memory node affinity init */
804 
805 	return ret;
806 }
807 
808 int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
809 {
810 	int ret, i;
811 	int xcc_inst_per_aid = 4;
812 	uint16_t xcc_mask;
813 
814 	xcc_mask = adev->gfx.xcc_mask;
815 	adev->aid_mask = 0;
816 	for (i = 0; xcc_mask; xcc_mask >>= xcc_inst_per_aid, i++) {
817 		if (xcc_mask & ((1U << xcc_inst_per_aid) - 1))
818 			adev->aid_mask |= (1 << i);
819 	}
820 
821 	adev->sdma.num_inst_per_xcc = 2;
822 	adev->sdma.num_instances =
823 		NUM_XCC(adev->gfx.xcc_mask) * adev->sdma.num_inst_per_xcc;
824 	adev->sdma.sdma_mask =
825 		GENMASK(adev->sdma.num_instances - 1, 0);
826 
827 	ret = soc_v1_0_xcp_mgr_init(adev);
828 	if (ret)
829 		return ret;
830 
831 	amdgpu_ip_map_init(adev);
832 
833 	return 0;
834 }
835 
836 bool soc_v1_0_normalize_xcc_reg_range(uint32_t reg)
837 {
838 	if (((reg >= XCC_REG_RANGE_0_LOW) && (reg < XCC_REG_RANGE_0_HIGH)) ||
839 	    ((reg >= XCC_REG_RANGE_1_LOW) && (reg < XCC_REG_RANGE_1_HIGH)))
840 		return true;
841 	else
842 		return false;
843 }
844 
845 uint32_t soc_v1_0_normalize_xcc_reg_offset(uint32_t reg)
846 {
847 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
848 
849 	/* If it is an XCC reg, normalize the reg to keep
850 	 * lower 16 bits in local xcc */
851 
852 	if (soc_v1_0_normalize_xcc_reg_range(normalized_reg))
853 		return normalized_reg;
854 	else
855 		return reg;
856 }
857