xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision 7cd88efd16f46023d2359430bb49781e879229c7)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 #include "amdgpu_ip.h"
33 
34 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
35 {
36 	int i;
37 
38 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
39 
40 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
41 
42 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
43 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
44 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
45 
46 	adev->doorbell_index.sdma_doorbell_range = 20;
47 	for (i = 0; i < adev->sdma.num_instances; i++)
48 		adev->doorbell_index.sdma_engine[i] =
49 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
50 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
51 
52 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
53 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
54 
55 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
56 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
57 
58 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
59 }
60 
61 /* Fixed pattern for smn addressing on different AIDs:
62  *   bit[34]: indicate cross AID access
63  *   bit[33:32]: indicate target AID id
64  * AID id range is 0 ~ 3 as maximum AID number is 4.
65  */
66 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
67 {
68 	u64 ext_offset;
69 
70 	/* local routing and bit[34:32] will be zeros */
71 	if (ext_id == 0)
72 		return 0;
73 
74 	/* Initiated from host, accessing to all non-zero aids are cross traffic */
75 	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
76 
77 	return ext_offset;
78 }
79 
80 static enum amdgpu_gfx_partition
81 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
82 {
83 	struct amdgpu_device *adev = xcp_mgr->adev;
84 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
85 
86 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
87 	if (adev->gfx.funcs->get_xccs_per_xcp)
88 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
89 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
90 		mode = num_xcc / num_xcc_per_xcp;
91 
92 	if (num_xcc_per_xcp == 1)
93 		return AMDGPU_CPX_PARTITION_MODE;
94 
95 	switch (mode) {
96 	case 1:
97 		return AMDGPU_SPX_PARTITION_MODE;
98 	case 2:
99 		return AMDGPU_DPX_PARTITION_MODE;
100 	case 3:
101 		return AMDGPU_TPX_PARTITION_MODE;
102 	case 4:
103 		return AMDGPU_QPX_PARTITION_MODE;
104 	default:
105 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
106 	}
107 
108 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
109 }
110 
111 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
112 {
113 	enum amdgpu_gfx_partition derv_mode,
114 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
115 	struct amdgpu_device *adev = xcp_mgr->adev;
116 
117 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
118 
119 	if (amdgpu_sriov_vf(adev))
120 		return derv_mode;
121 
122 	if (adev->nbio.funcs->get_compute_partition_mode) {
123 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
124 		if (mode != derv_mode) {
125 			dev_warn(
126 				adev->dev,
127 				"Mismatch in compute partition mode - reported : %d derived : %d",
128 				mode, derv_mode);
129 			if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
130 				amdgpu_device_bus_status_check(adev);
131 		}
132 	}
133 
134 	return mode;
135 }
136 
137 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
138 {
139 	int num_xcc, num_xcc_per_xcp = 0;
140 
141 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
142 
143 	switch (mode) {
144 	case AMDGPU_SPX_PARTITION_MODE:
145 		num_xcc_per_xcp = num_xcc;
146 		break;
147 	case AMDGPU_DPX_PARTITION_MODE:
148 		num_xcc_per_xcp = num_xcc / 2;
149 		break;
150 	case AMDGPU_TPX_PARTITION_MODE:
151 		num_xcc_per_xcp = num_xcc / 3;
152 		break;
153 	case AMDGPU_QPX_PARTITION_MODE:
154 		num_xcc_per_xcp = num_xcc / 4;
155 		break;
156 	case AMDGPU_CPX_PARTITION_MODE:
157 		num_xcc_per_xcp = 1;
158 		break;
159 	}
160 
161 	return num_xcc_per_xcp;
162 }
163 
164 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
165 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
166 				    struct amdgpu_xcp_ip *ip)
167 {
168 	struct amdgpu_device *adev = xcp_mgr->adev;
169 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
170 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
171 
172 	num_sdma = adev->sdma.num_instances;
173 	num_vcn = adev->vcn.num_vcn_inst;
174 	num_shared_vcn = 1;
175 
176 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
177 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
178 
179 	switch (xcp_mgr->mode) {
180 	case AMDGPU_SPX_PARTITION_MODE:
181 	case AMDGPU_DPX_PARTITION_MODE:
182 	case AMDGPU_TPX_PARTITION_MODE:
183 	case AMDGPU_QPX_PARTITION_MODE:
184 	case AMDGPU_CPX_PARTITION_MODE:
185 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
186 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
187 		break;
188 	default:
189 		return -EINVAL;
190 	}
191 
192 	if (num_vcn && num_xcp > num_vcn)
193 		num_shared_vcn = num_xcp / num_vcn;
194 
195 	switch (ip_id) {
196 	case AMDGPU_XCP_GFXHUB:
197 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
198 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
199 		break;
200 	case AMDGPU_XCP_GFX:
201 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
202 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
203 		break;
204 	case AMDGPU_XCP_SDMA:
205 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
206 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
207 		break;
208 	case AMDGPU_XCP_VCN:
209 		ip->inst_mask =
210 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
211 		/* TODO : Assign IP funcs */
212 		break;
213 	default:
214 		return -EINVAL;
215 	}
216 
217 	ip->ip_id = ip_id;
218 
219 	return 0;
220 }
221 
222 static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
223 					    int px_mode, int *num_xcp,
224 					    uint16_t *nps_modes)
225 {
226 	struct amdgpu_device *adev = xcp_mgr->adev;
227 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
228 
229 	if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
230 		return -EINVAL;
231 
232 	switch (px_mode) {
233 	case AMDGPU_SPX_PARTITION_MODE:
234 		*num_xcp = 1;
235 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
236 		break;
237 	case AMDGPU_DPX_PARTITION_MODE:
238 		*num_xcp = 2;
239 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
240 			     BIT(AMDGPU_NPS2_PARTITION_MODE);
241 		break;
242 	case AMDGPU_TPX_PARTITION_MODE:
243 		*num_xcp = 3;
244 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
245 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
246 		break;
247 	case AMDGPU_QPX_PARTITION_MODE:
248 		*num_xcp = 4;
249 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
250 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
251 		if (gc_ver == IP_VERSION(9, 5, 0))
252 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
253 		break;
254 	case AMDGPU_CPX_PARTITION_MODE:
255 		*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
256 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
257 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
258 		if (gc_ver == IP_VERSION(9, 5, 0))
259 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
260 		break;
261 	default:
262 		return -EINVAL;
263 	}
264 
265 	return 0;
266 }
267 
268 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
269 					  int mode,
270 					  struct amdgpu_xcp_cfg *xcp_cfg)
271 {
272 	struct amdgpu_device *adev = xcp_mgr->adev;
273 	int max_res[AMDGPU_XCP_RES_MAX] = {};
274 	bool res_lt_xcp;
275 	int num_xcp, i, r;
276 	u16 nps_modes;
277 
278 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
279 		return -EINVAL;
280 
281 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
282 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
283 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
284 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
285 
286 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
287 	if (r)
288 		return r;
289 
290 	xcp_cfg->compatible_nps_modes =
291 		(adev->gmc.supported_nps_modes & nps_modes);
292 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
293 
294 	for (i = 0; i < xcp_cfg->num_res; i++) {
295 		res_lt_xcp = max_res[i] < num_xcp;
296 		xcp_cfg->xcp_res[i].id = i;
297 		xcp_cfg->xcp_res[i].num_inst =
298 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
299 		xcp_cfg->xcp_res[i].num_inst =
300 			i == AMDGPU_XCP_RES_JPEG ?
301 			xcp_cfg->xcp_res[i].num_inst *
302 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
303 		xcp_cfg->xcp_res[i].num_shared =
304 			res_lt_xcp ? num_xcp / max_res[i] : 1;
305 	}
306 
307 	return 0;
308 }
309 
310 static enum amdgpu_gfx_partition
311 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
312 {
313 	struct amdgpu_device *adev = xcp_mgr->adev;
314 	int num_xcc;
315 
316 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
317 
318 	if (adev->gmc.num_mem_partitions == 1)
319 		return AMDGPU_SPX_PARTITION_MODE;
320 
321 	if (adev->gmc.num_mem_partitions == num_xcc)
322 		return AMDGPU_CPX_PARTITION_MODE;
323 
324 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
325 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
326 						    AMDGPU_CPX_PARTITION_MODE;
327 
328 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
329 		return AMDGPU_DPX_PARTITION_MODE;
330 
331 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
332 }
333 
334 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
335 					  enum amdgpu_gfx_partition mode)
336 {
337 	struct amdgpu_device *adev = xcp_mgr->adev;
338 	int num_xcc, num_xccs_per_xcp, r;
339 	int num_xcp, nps_mode;
340 	u16 supp_nps_modes;
341 	bool comp_mode;
342 
343 	nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
344 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
345 					       &supp_nps_modes);
346 	if (r)
347 		return false;
348 
349 	comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
350 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
351 	switch (mode) {
352 	case AMDGPU_SPX_PARTITION_MODE:
353 		return comp_mode && num_xcc > 0;
354 	case AMDGPU_DPX_PARTITION_MODE:
355 		return comp_mode && (num_xcc % 4) == 0;
356 	case AMDGPU_TPX_PARTITION_MODE:
357 		return comp_mode && ((num_xcc % 3) == 0);
358 	case AMDGPU_QPX_PARTITION_MODE:
359 		num_xccs_per_xcp = num_xcc / 4;
360 		return comp_mode && (num_xccs_per_xcp >= 2);
361 	case AMDGPU_CPX_PARTITION_MODE:
362 		return comp_mode && (num_xcc > 1);
363 	default:
364 		return false;
365 	}
366 
367 	return false;
368 }
369 
370 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
371 {
372 	int mode;
373 
374 	xcp_mgr->avail_xcp_modes = 0;
375 
376 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
377 		if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
378 			xcp_mgr->avail_xcp_modes |= BIT(mode);
379 	}
380 }
381 
382 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
383 					       int mode, int *num_xcps)
384 {
385 	int num_xcc_per_xcp, num_xcc, ret;
386 	struct amdgpu_device *adev;
387 	u32 flags = 0;
388 
389 	adev = xcp_mgr->adev;
390 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
391 
392 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
393 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
394 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
395 			dev_err(adev->dev,
396 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
397 				adev->gmc.num_mem_partitions);
398 			return -EINVAL;
399 		}
400 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
401 		dev_err(adev->dev,
402 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
403 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
404 		return -EINVAL;
405 	}
406 
407 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
408 		!adev->in_suspend)
409 		flags |= AMDGPU_XCP_OPS_KFD;
410 
411 	if (flags & AMDGPU_XCP_OPS_KFD) {
412 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
413 		if (ret)
414 			goto out;
415 	}
416 
417 	ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
418 	if (ret)
419 		goto unlock;
420 
421 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
422 	if (adev->gfx.funcs->switch_partition_mode)
423 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
424 						       num_xcc_per_xcp);
425 
426 	/* Init info about new xcps */
427 	*num_xcps = num_xcc / num_xcc_per_xcp;
428 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
429 
430 	ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
431 	if (!ret)
432 		__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
433 unlock:
434 	if (flags & AMDGPU_XCP_OPS_KFD)
435 		amdgpu_amdkfd_unlock_kfd(adev);
436 out:
437 	return ret;
438 }
439 
440 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
441 					  int xcc_id, uint8_t *mem_id)
442 {
443 	/* memory/spatial modes validation check is already done */
444 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
445 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
446 
447 	return 0;
448 }
449 
450 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
451 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
452 {
453 	struct amdgpu_numa_info numa_info;
454 	struct amdgpu_device *adev;
455 	uint32_t xcc_mask;
456 	int r, i, xcc_id;
457 
458 	adev = xcp_mgr->adev;
459 	/* TODO: BIOS is not returning the right info now
460 	 * Check on this later
461 	 */
462 	/*
463 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
464 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
465 	*/
466 	if (adev->gmc.num_mem_partitions == 1) {
467 		/* Only one range */
468 		*mem_id = 0;
469 		return 0;
470 	}
471 
472 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
473 	if (r || !xcc_mask)
474 		return -EINVAL;
475 
476 	xcc_id = ffs(xcc_mask) - 1;
477 	if (!adev->gmc.is_app_apu)
478 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
479 
480 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
481 
482 	if (r)
483 		return r;
484 
485 	r = -EINVAL;
486 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
487 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
488 			*mem_id = i;
489 			r = 0;
490 			break;
491 		}
492 	}
493 
494 	return r;
495 }
496 
497 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
498 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
499 				     struct amdgpu_xcp_ip *ip)
500 {
501 	if (!ip)
502 		return -EINVAL;
503 
504 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
505 }
506 
507 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
508 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
509 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
510 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
511 	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
512 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
513 };
514 
515 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
516 {
517 	int ret;
518 
519 	if (amdgpu_sriov_vf(adev))
520 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
521 
522 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
523 				  &aqua_vanjaram_xcp_funcs);
524 	if (ret)
525 		return ret;
526 
527 	amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
528 	/* TODO: Default memory node affinity init */
529 
530 	return ret;
531 }
532 
533 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
534 {
535 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
536 	int ret, i;
537 
538 	/* generally 1 AID supports 4 instances */
539 	adev->sdma.num_inst_per_aid = 4;
540 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
541 
542 	adev->aid_mask = i = 1;
543 	inst_mask >>= adev->sdma.num_inst_per_aid;
544 
545 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
546 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
547 		avail_inst = inst_mask & mask;
548 		if (avail_inst == mask || avail_inst == 0x3 ||
549 		    avail_inst == 0xc)
550 			adev->aid_mask |= (1 << i);
551 	}
552 
553 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
554 	 * addressed based on logical instance ids.
555 	 */
556 	adev->vcn.harvest_config = 0;
557 	adev->vcn.num_inst_per_aid = 1;
558 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
559 	adev->jpeg.harvest_config = 0;
560 	adev->jpeg.num_inst_per_aid = 1;
561 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
562 
563 	ret = aqua_vanjaram_xcp_mgr_init(adev);
564 	if (ret)
565 		return ret;
566 
567 	amdgpu_ip_map_init(adev);
568 
569 	return 0;
570 }
571 
572 static void aqua_read_smn(struct amdgpu_device *adev,
573 			  struct amdgpu_smn_reg_data *regdata,
574 			  uint64_t smn_addr)
575 {
576 	regdata->addr = smn_addr;
577 	regdata->value = RREG32_PCIE(smn_addr);
578 }
579 
580 struct aqua_reg_list {
581 	uint64_t start_addr;
582 	uint32_t num_regs;
583 	uint32_t incrx;
584 };
585 
586 #define DW_ADDR_INCR	4
587 
588 static void aqua_read_smn_ext(struct amdgpu_device *adev,
589 			      struct amdgpu_smn_reg_data *regdata,
590 			      uint64_t smn_addr, int i)
591 {
592 	regdata->addr =
593 		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
594 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
595 }
596 
597 #define smnreg_0x1A340218	0x1A340218
598 #define smnreg_0x1A3402E4	0x1A3402E4
599 #define smnreg_0x1A340294	0x1A340294
600 #define smreg_0x1A380088	0x1A380088
601 
602 #define NUM_PCIE_SMN_REGS	14
603 
604 static struct aqua_reg_list pcie_reg_addrs[] = {
605 	{ smnreg_0x1A340218, 1, 0 },
606 	{ smnreg_0x1A3402E4, 1, 0 },
607 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
608 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
609 };
610 
611 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
612 					     void *buf, size_t max_size)
613 {
614 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
615 	uint32_t start_addr, incrx, num_regs, szbuf;
616 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
617 	struct amdgpu_smn_reg_data *reg_data;
618 	struct pci_dev *us_pdev, *ds_pdev;
619 	int aer_cap, r, n;
620 
621 	if (!buf || !max_size)
622 		return -EINVAL;
623 
624 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
625 
626 	szbuf = sizeof(*pcie_reg_state) +
627 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
628 	/* Only one instance of pcie regs */
629 	if (max_size < szbuf)
630 		return -EOVERFLOW;
631 
632 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
633 						     sizeof(*pcie_reg_state));
634 	pcie_regs->inst_header.instance = 0;
635 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
636 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
637 
638 	reg_data = pcie_regs->smn_reg_values;
639 
640 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
641 		start_addr = pcie_reg_addrs[r].start_addr;
642 		incrx = pcie_reg_addrs[r].incrx;
643 		num_regs = pcie_reg_addrs[r].num_regs;
644 		for (n = 0; n < num_regs; n++) {
645 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
646 			++reg_data;
647 		}
648 	}
649 
650 	ds_pdev = pci_upstream_bridge(adev->pdev);
651 	us_pdev = pci_upstream_bridge(ds_pdev);
652 
653 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
654 				  &pcie_regs->device_status);
655 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
656 				  &pcie_regs->link_status);
657 
658 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
659 	if (aer_cap) {
660 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
661 				      &pcie_regs->pcie_corr_err_status);
662 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
663 				      &pcie_regs->pcie_uncorr_err_status);
664 	}
665 
666 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
667 			      &pcie_regs->sub_bus_number_latency);
668 
669 	pcie_reg_state->common_header.structure_size = szbuf;
670 	pcie_reg_state->common_header.format_revision = 1;
671 	pcie_reg_state->common_header.content_revision = 0;
672 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
673 	pcie_reg_state->common_header.num_instances = 1;
674 
675 	return pcie_reg_state->common_header.structure_size;
676 }
677 
678 #define smnreg_0x11A00050	0x11A00050
679 #define smnreg_0x11A00180	0x11A00180
680 #define smnreg_0x11A00070	0x11A00070
681 #define smnreg_0x11A00200	0x11A00200
682 #define smnreg_0x11A0020C	0x11A0020C
683 #define smnreg_0x11A00210	0x11A00210
684 #define smnreg_0x11A00108	0x11A00108
685 
686 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
687 
688 #define NUM_XGMI_SMN_REGS 25
689 
690 static struct aqua_reg_list xgmi_reg_addrs[] = {
691 	{ smnreg_0x11A00050, 1, 0 },
692 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
693 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
694 	{ smnreg_0x11A00200, 1, 0 },
695 	{ smnreg_0x11A0020C, 1, 0 },
696 	{ smnreg_0x11A00210, 1, 0 },
697 	{ smnreg_0x11A00108, 1, 0 },
698 };
699 
700 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
701 					     void *buf, size_t max_size)
702 {
703 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
704 	uint32_t start_addr, incrx, num_regs, szbuf;
705 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
706 	struct amdgpu_smn_reg_data *reg_data;
707 	const int max_xgmi_instances = 8;
708 	int inst = 0, i, j, r, n;
709 	const int xgmi_inst = 2;
710 	void *p;
711 
712 	if (!buf || !max_size)
713 		return -EINVAL;
714 
715 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
716 
717 	szbuf = sizeof(*xgmi_reg_state) +
718 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
719 				    NUM_XGMI_SMN_REGS);
720 	/* Only one instance of pcie regs */
721 	if (max_size < szbuf)
722 		return -EOVERFLOW;
723 
724 	p = &xgmi_reg_state->xgmi_state_regs[0];
725 	for_each_inst(i, adev->aid_mask) {
726 		for (j = 0; j < xgmi_inst; ++j) {
727 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
728 			xgmi_regs->inst_header.instance = inst++;
729 
730 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
731 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
732 
733 			reg_data = xgmi_regs->smn_reg_values;
734 
735 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
736 				start_addr = xgmi_reg_addrs[r].start_addr;
737 				incrx = xgmi_reg_addrs[r].incrx;
738 				num_regs = xgmi_reg_addrs[r].num_regs;
739 
740 				for (n = 0; n < num_regs; n++) {
741 					aqua_read_smn_ext(
742 						adev, reg_data,
743 						XGMI_LINK_REG(start_addr, j) +
744 							n * incrx,
745 						i);
746 					++reg_data;
747 				}
748 			}
749 			p = reg_data;
750 		}
751 	}
752 
753 	xgmi_reg_state->common_header.structure_size = szbuf;
754 	xgmi_reg_state->common_header.format_revision = 1;
755 	xgmi_reg_state->common_header.content_revision = 0;
756 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
757 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
758 
759 	return xgmi_reg_state->common_header.structure_size;
760 }
761 
762 #define smnreg_0x11C00070	0x11C00070
763 #define smnreg_0x11C00210	0x11C00210
764 
765 static struct aqua_reg_list wafl_reg_addrs[] = {
766 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
767 	{ smnreg_0x11C00210, 1, 0 },
768 };
769 
770 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
771 
772 #define NUM_WAFL_SMN_REGS 5
773 
774 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
775 					     void *buf, size_t max_size)
776 {
777 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
778 	uint32_t start_addr, incrx, num_regs, szbuf;
779 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
780 	struct amdgpu_smn_reg_data *reg_data;
781 	const int max_wafl_instances = 8;
782 	int inst = 0, i, j, r, n;
783 	const int wafl_inst = 2;
784 	void *p;
785 
786 	if (!buf || !max_size)
787 		return -EINVAL;
788 
789 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
790 
791 	szbuf = sizeof(*wafl_reg_state) +
792 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
793 				    NUM_WAFL_SMN_REGS);
794 
795 	if (max_size < szbuf)
796 		return -EOVERFLOW;
797 
798 	p = &wafl_reg_state->wafl_state_regs[0];
799 	for_each_inst(i, adev->aid_mask) {
800 		for (j = 0; j < wafl_inst; ++j) {
801 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
802 			wafl_regs->inst_header.instance = inst++;
803 
804 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
805 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
806 
807 			reg_data = wafl_regs->smn_reg_values;
808 
809 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
810 				start_addr = wafl_reg_addrs[r].start_addr;
811 				incrx = wafl_reg_addrs[r].incrx;
812 				num_regs = wafl_reg_addrs[r].num_regs;
813 				for (n = 0; n < num_regs; n++) {
814 					aqua_read_smn_ext(
815 						adev, reg_data,
816 						WAFL_LINK_REG(start_addr, j) +
817 							n * incrx,
818 						i);
819 					++reg_data;
820 				}
821 			}
822 			p = reg_data;
823 		}
824 	}
825 
826 	wafl_reg_state->common_header.structure_size = szbuf;
827 	wafl_reg_state->common_header.format_revision = 1;
828 	wafl_reg_state->common_header.content_revision = 0;
829 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
830 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
831 
832 	return wafl_reg_state->common_header.structure_size;
833 }
834 
835 #define smnreg_0x1B311060 0x1B311060
836 #define smnreg_0x1B411060 0x1B411060
837 #define smnreg_0x1B511060 0x1B511060
838 #define smnreg_0x1B611060 0x1B611060
839 
840 #define smnreg_0x1C307120 0x1C307120
841 #define smnreg_0x1C317120 0x1C317120
842 
843 #define smnreg_0x1C320830 0x1C320830
844 #define smnreg_0x1C380830 0x1C380830
845 #define smnreg_0x1C3D0830 0x1C3D0830
846 #define smnreg_0x1C420830 0x1C420830
847 
848 #define smnreg_0x1C320100 0x1C320100
849 #define smnreg_0x1C380100 0x1C380100
850 #define smnreg_0x1C3D0100 0x1C3D0100
851 #define smnreg_0x1C420100 0x1C420100
852 
853 #define smnreg_0x1B310500 0x1B310500
854 #define smnreg_0x1C300400 0x1C300400
855 
856 #define USR_CAKE_INCR 0x11000
857 #define USR_LINK_INCR 0x100000
858 #define USR_CP_INCR 0x10000
859 
860 #define NUM_USR_SMN_REGS	20
861 
862 struct aqua_reg_list usr_reg_addrs[] = {
863 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
864 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
865 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
866 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
867 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
868 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
869 };
870 
871 #define NUM_USR1_SMN_REGS	46
872 struct aqua_reg_list usr1_reg_addrs[] = {
873 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
874 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
875 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
876 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
877 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
878 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
879 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
880 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
881 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
882 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
883 };
884 
885 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
886 					    void *buf, size_t max_size,
887 					    int reg_state)
888 {
889 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
890 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
891 	struct amdgpu_regs_usr_v1_0 *usr_regs;
892 	struct amdgpu_smn_reg_data *reg_data;
893 	const int max_usr_instances = 4;
894 	struct aqua_reg_list *reg_addrs;
895 	int inst = 0, i, n, r, arr_size;
896 	void *p;
897 
898 	if (!buf || !max_size)
899 		return -EINVAL;
900 
901 	switch (reg_state) {
902 	case AMDGPU_REG_STATE_TYPE_USR:
903 		arr_size = ARRAY_SIZE(usr_reg_addrs);
904 		reg_addrs = usr_reg_addrs;
905 		num_smn = NUM_USR_SMN_REGS;
906 		break;
907 	case AMDGPU_REG_STATE_TYPE_USR_1:
908 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
909 		reg_addrs = usr1_reg_addrs;
910 		num_smn = NUM_USR1_SMN_REGS;
911 		break;
912 	default:
913 		return -EINVAL;
914 	}
915 
916 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
917 
918 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
919 							     sizeof(*usr_regs),
920 							     num_smn);
921 	if (max_size < szbuf)
922 		return -EOVERFLOW;
923 
924 	p = &usr_reg_state->usr_state_regs[0];
925 	for_each_inst(i, adev->aid_mask) {
926 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
927 		usr_regs->inst_header.instance = inst++;
928 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
929 		usr_regs->inst_header.num_smn_regs = num_smn;
930 		reg_data = usr_regs->smn_reg_values;
931 
932 		for (r = 0; r < arr_size; r++) {
933 			start_addr = reg_addrs[r].start_addr;
934 			incrx = reg_addrs[r].incrx;
935 			num_regs = reg_addrs[r].num_regs;
936 			for (n = 0; n < num_regs; n++) {
937 				aqua_read_smn_ext(adev, reg_data,
938 						  start_addr + n * incrx, i);
939 				reg_data++;
940 			}
941 		}
942 		p = reg_data;
943 	}
944 
945 	usr_reg_state->common_header.structure_size = szbuf;
946 	usr_reg_state->common_header.format_revision = 1;
947 	usr_reg_state->common_header.content_revision = 0;
948 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
949 	usr_reg_state->common_header.num_instances = max_usr_instances;
950 
951 	return usr_reg_state->common_header.structure_size;
952 }
953 
954 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
955 				    enum amdgpu_reg_state reg_state, void *buf,
956 				    size_t max_size)
957 {
958 	ssize_t size;
959 
960 	switch (reg_state) {
961 	case AMDGPU_REG_STATE_TYPE_PCIE:
962 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
963 		break;
964 	case AMDGPU_REG_STATE_TYPE_XGMI:
965 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
966 		break;
967 	case AMDGPU_REG_STATE_TYPE_WAFL:
968 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
969 		break;
970 	case AMDGPU_REG_STATE_TYPE_USR:
971 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
972 						    AMDGPU_REG_STATE_TYPE_USR);
973 		break;
974 	case AMDGPU_REG_STATE_TYPE_USR_1:
975 		size = aqua_vanjaram_read_usr_state(
976 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
977 		break;
978 	default:
979 		return -EINVAL;
980 	}
981 
982 	return size;
983 }
984