xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 #include "amdgpu_ip.h"
33 
34 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
35 {
36 	int i;
37 
38 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
39 
40 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
41 
42 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
43 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
44 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
45 
46 	adev->doorbell_index.sdma_doorbell_range = 20;
47 	for (i = 0; i < adev->sdma.num_instances; i++)
48 		adev->doorbell_index.sdma_engine[i] =
49 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
50 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
51 
52 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
53 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
54 
55 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
56 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
57 
58 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
59 }
60 
61 static enum amdgpu_gfx_partition
62 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
63 {
64 	struct amdgpu_device *adev = xcp_mgr->adev;
65 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
66 
67 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
68 	if (adev->gfx.funcs->get_xccs_per_xcp)
69 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
70 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
71 		mode = num_xcc / num_xcc_per_xcp;
72 
73 	if (num_xcc_per_xcp == 1)
74 		return AMDGPU_CPX_PARTITION_MODE;
75 
76 	switch (mode) {
77 	case 1:
78 		return AMDGPU_SPX_PARTITION_MODE;
79 	case 2:
80 		return AMDGPU_DPX_PARTITION_MODE;
81 	case 3:
82 		return AMDGPU_TPX_PARTITION_MODE;
83 	case 4:
84 		return AMDGPU_QPX_PARTITION_MODE;
85 	default:
86 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
87 	}
88 
89 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
90 }
91 
92 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
93 {
94 	enum amdgpu_gfx_partition derv_mode,
95 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
96 	struct amdgpu_device *adev = xcp_mgr->adev;
97 
98 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
99 
100 	if (amdgpu_sriov_vf(adev))
101 		return derv_mode;
102 
103 	if (adev->nbio.funcs->get_compute_partition_mode) {
104 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
105 		if (mode != derv_mode) {
106 			dev_warn(
107 				adev->dev,
108 				"Mismatch in compute partition mode - reported : %d derived : %d",
109 				mode, derv_mode);
110 			if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
111 				amdgpu_device_bus_status_check(adev);
112 		}
113 	}
114 
115 	return mode;
116 }
117 
118 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
119 {
120 	int num_xcc, num_xcc_per_xcp = 0;
121 
122 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
123 
124 	switch (mode) {
125 	case AMDGPU_SPX_PARTITION_MODE:
126 		num_xcc_per_xcp = num_xcc;
127 		break;
128 	case AMDGPU_DPX_PARTITION_MODE:
129 		num_xcc_per_xcp = num_xcc / 2;
130 		break;
131 	case AMDGPU_TPX_PARTITION_MODE:
132 		num_xcc_per_xcp = num_xcc / 3;
133 		break;
134 	case AMDGPU_QPX_PARTITION_MODE:
135 		num_xcc_per_xcp = num_xcc / 4;
136 		break;
137 	case AMDGPU_CPX_PARTITION_MODE:
138 		num_xcc_per_xcp = 1;
139 		break;
140 	}
141 
142 	return num_xcc_per_xcp;
143 }
144 
145 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
146 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
147 				    struct amdgpu_xcp_ip *ip)
148 {
149 	struct amdgpu_device *adev = xcp_mgr->adev;
150 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
151 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
152 
153 	num_sdma = adev->sdma.num_instances;
154 	num_vcn = adev->vcn.num_vcn_inst;
155 	num_shared_vcn = 1;
156 
157 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
158 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
159 
160 	switch (xcp_mgr->mode) {
161 	case AMDGPU_SPX_PARTITION_MODE:
162 	case AMDGPU_DPX_PARTITION_MODE:
163 	case AMDGPU_TPX_PARTITION_MODE:
164 	case AMDGPU_QPX_PARTITION_MODE:
165 	case AMDGPU_CPX_PARTITION_MODE:
166 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
167 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
168 		break;
169 	default:
170 		return -EINVAL;
171 	}
172 
173 	if (num_vcn && num_xcp > num_vcn)
174 		num_shared_vcn = num_xcp / num_vcn;
175 
176 	switch (ip_id) {
177 	case AMDGPU_XCP_GFXHUB:
178 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
179 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
180 		break;
181 	case AMDGPU_XCP_GFX:
182 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
183 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
184 		break;
185 	case AMDGPU_XCP_SDMA:
186 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
187 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
188 		break;
189 	case AMDGPU_XCP_VCN:
190 		ip->inst_mask =
191 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
192 		/* TODO : Assign IP funcs */
193 		break;
194 	default:
195 		return -EINVAL;
196 	}
197 
198 	ip->ip_id = ip_id;
199 
200 	return 0;
201 }
202 
203 static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
204 					    int px_mode, int *num_xcp,
205 					    uint16_t *nps_modes)
206 {
207 	struct amdgpu_device *adev = xcp_mgr->adev;
208 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
209 
210 	if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
211 		return -EINVAL;
212 
213 	switch (px_mode) {
214 	case AMDGPU_SPX_PARTITION_MODE:
215 		*num_xcp = 1;
216 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
217 		break;
218 	case AMDGPU_DPX_PARTITION_MODE:
219 		*num_xcp = 2;
220 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
221 			     BIT(AMDGPU_NPS2_PARTITION_MODE);
222 		break;
223 	case AMDGPU_TPX_PARTITION_MODE:
224 		*num_xcp = 3;
225 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
226 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
227 		break;
228 	case AMDGPU_QPX_PARTITION_MODE:
229 		*num_xcp = 4;
230 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
231 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
232 		if (gc_ver == IP_VERSION(9, 5, 0))
233 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
234 		break;
235 	case AMDGPU_CPX_PARTITION_MODE:
236 		*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
237 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
238 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
239 		if (gc_ver == IP_VERSION(9, 5, 0))
240 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
241 		break;
242 	default:
243 		return -EINVAL;
244 	}
245 
246 	return 0;
247 }
248 
249 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
250 					  int mode,
251 					  struct amdgpu_xcp_cfg *xcp_cfg)
252 {
253 	struct amdgpu_device *adev = xcp_mgr->adev;
254 	int max_res[AMDGPU_XCP_RES_MAX] = {};
255 	bool res_lt_xcp;
256 	int num_xcp, i, r;
257 	u16 nps_modes;
258 
259 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
260 		return -EINVAL;
261 
262 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
263 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
264 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
265 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
266 
267 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
268 	if (r)
269 		return r;
270 
271 	xcp_cfg->compatible_nps_modes =
272 		(adev->gmc.supported_nps_modes & nps_modes);
273 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
274 
275 	for (i = 0; i < xcp_cfg->num_res; i++) {
276 		res_lt_xcp = max_res[i] < num_xcp;
277 		xcp_cfg->xcp_res[i].id = i;
278 		xcp_cfg->xcp_res[i].num_inst =
279 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
280 		xcp_cfg->xcp_res[i].num_inst =
281 			i == AMDGPU_XCP_RES_JPEG ?
282 			xcp_cfg->xcp_res[i].num_inst *
283 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
284 		xcp_cfg->xcp_res[i].num_shared =
285 			res_lt_xcp ? num_xcp / max_res[i] : 1;
286 	}
287 
288 	return 0;
289 }
290 
291 static enum amdgpu_gfx_partition
292 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
293 {
294 	struct amdgpu_device *adev = xcp_mgr->adev;
295 	int num_xcc;
296 
297 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
298 
299 	if (adev->gmc.num_mem_partitions == 1)
300 		return AMDGPU_SPX_PARTITION_MODE;
301 
302 	if (adev->gmc.num_mem_partitions == num_xcc)
303 		return AMDGPU_CPX_PARTITION_MODE;
304 
305 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
306 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
307 						    AMDGPU_CPX_PARTITION_MODE;
308 
309 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
310 		return AMDGPU_DPX_PARTITION_MODE;
311 
312 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
313 }
314 
315 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
316 					  enum amdgpu_gfx_partition mode)
317 {
318 	struct amdgpu_device *adev = xcp_mgr->adev;
319 	int num_xcc, num_xccs_per_xcp, r;
320 	int num_xcp, nps_mode;
321 	u16 supp_nps_modes;
322 	bool comp_mode;
323 
324 	nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
325 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
326 					       &supp_nps_modes);
327 	if (r)
328 		return false;
329 
330 	comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
331 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
332 	switch (mode) {
333 	case AMDGPU_SPX_PARTITION_MODE:
334 		return comp_mode && num_xcc > 0;
335 	case AMDGPU_DPX_PARTITION_MODE:
336 		return comp_mode && (num_xcc % 4) == 0;
337 	case AMDGPU_TPX_PARTITION_MODE:
338 		return comp_mode && ((num_xcc % 3) == 0);
339 	case AMDGPU_QPX_PARTITION_MODE:
340 		num_xccs_per_xcp = num_xcc / 4;
341 		return comp_mode && (num_xccs_per_xcp >= 2);
342 	case AMDGPU_CPX_PARTITION_MODE:
343 		return comp_mode && (num_xcc > 1);
344 	default:
345 		return false;
346 	}
347 
348 	return false;
349 }
350 
351 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
352 {
353 	int mode;
354 
355 	xcp_mgr->avail_xcp_modes = 0;
356 
357 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
358 		if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
359 			xcp_mgr->avail_xcp_modes |= BIT(mode);
360 	}
361 }
362 
363 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
364 					       int mode, int *num_xcps)
365 {
366 	int num_xcc_per_xcp, num_xcc, ret;
367 	struct amdgpu_device *adev;
368 	u32 flags = 0;
369 
370 	adev = xcp_mgr->adev;
371 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
372 
373 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
374 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
375 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
376 			dev_err(adev->dev,
377 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
378 				adev->gmc.num_mem_partitions);
379 			return -EINVAL;
380 		}
381 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
382 		dev_err(adev->dev,
383 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
384 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
385 		return -EINVAL;
386 	}
387 
388 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
389 		!adev->in_suspend)
390 		flags |= AMDGPU_XCP_OPS_KFD;
391 
392 	if (flags & AMDGPU_XCP_OPS_KFD) {
393 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
394 		if (ret)
395 			goto out;
396 	}
397 
398 	ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
399 	if (ret)
400 		goto unlock;
401 
402 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
403 	if (adev->gfx.funcs->switch_partition_mode)
404 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
405 						       num_xcc_per_xcp);
406 
407 	/* Init info about new xcps */
408 	*num_xcps = num_xcc / num_xcc_per_xcp;
409 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
410 
411 	ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
412 	if (!ret)
413 		__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
414 unlock:
415 	if (flags & AMDGPU_XCP_OPS_KFD)
416 		amdgpu_amdkfd_unlock_kfd(adev);
417 out:
418 	return ret;
419 }
420 
421 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
422 					  int xcc_id, uint8_t *mem_id)
423 {
424 	/* memory/spatial modes validation check is already done */
425 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
426 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
427 
428 	return 0;
429 }
430 
431 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
432 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
433 {
434 	struct amdgpu_numa_info numa_info;
435 	struct amdgpu_device *adev;
436 	uint32_t xcc_mask;
437 	int r, i, xcc_id;
438 
439 	adev = xcp_mgr->adev;
440 	/* TODO: BIOS is not returning the right info now
441 	 * Check on this later
442 	 */
443 	/*
444 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
445 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
446 	*/
447 	if (adev->gmc.num_mem_partitions == 1) {
448 		/* Only one range */
449 		*mem_id = 0;
450 		return 0;
451 	}
452 
453 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
454 	if (r || !xcc_mask)
455 		return -EINVAL;
456 
457 	xcc_id = ffs(xcc_mask) - 1;
458 	if (!adev->gmc.is_app_apu)
459 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
460 
461 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
462 
463 	if (r)
464 		return r;
465 
466 	r = -EINVAL;
467 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
468 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
469 			*mem_id = i;
470 			r = 0;
471 			break;
472 		}
473 	}
474 
475 	return r;
476 }
477 
478 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
479 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
480 				     struct amdgpu_xcp_ip *ip)
481 {
482 	if (!ip)
483 		return -EINVAL;
484 
485 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
486 }
487 
488 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
489 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
490 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
491 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
492 	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
493 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
494 };
495 
496 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
497 {
498 	int ret;
499 
500 	if (amdgpu_sriov_vf(adev))
501 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
502 
503 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
504 				  &aqua_vanjaram_xcp_funcs);
505 	if (ret)
506 		return ret;
507 
508 	amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
509 	/* TODO: Default memory node affinity init */
510 
511 	return ret;
512 }
513 
514 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
515 {
516 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
517 	int ret, i;
518 
519 	/* generally 1 AID supports 4 instances */
520 	adev->sdma.num_inst_per_aid = 4;
521 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
522 
523 	adev->aid_mask = i = 1;
524 	inst_mask >>= adev->sdma.num_inst_per_aid;
525 
526 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
527 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
528 		avail_inst = inst_mask & mask;
529 		if (avail_inst == mask || avail_inst == 0x3 ||
530 		    avail_inst == 0xc)
531 			adev->aid_mask |= (1 << i);
532 	}
533 
534 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
535 	 * addressed based on logical instance ids.
536 	 */
537 	adev->vcn.harvest_config = 0;
538 	adev->vcn.num_inst_per_aid = 1;
539 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
540 	adev->jpeg.harvest_config = 0;
541 	adev->jpeg.num_inst_per_aid = 1;
542 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
543 
544 	ret = aqua_vanjaram_xcp_mgr_init(adev);
545 	if (ret)
546 		return ret;
547 
548 	amdgpu_ip_map_init(adev);
549 
550 	return 0;
551 }
552 
553 static void aqua_read_smn(struct amdgpu_device *adev,
554 			  struct amdgpu_smn_reg_data *regdata,
555 			  uint64_t smn_addr)
556 {
557 	regdata->addr = smn_addr;
558 	regdata->value = RREG32_PCIE(smn_addr);
559 }
560 
561 struct aqua_reg_list {
562 	uint64_t start_addr;
563 	uint32_t num_regs;
564 	uint32_t incrx;
565 };
566 
567 #define DW_ADDR_INCR	4
568 
569 static void aqua_read_smn_ext(struct amdgpu_device *adev,
570 			      struct amdgpu_smn_reg_data *regdata,
571 			      uint64_t smn_addr, int i)
572 {
573 	regdata->addr =
574 		smn_addr + amdgpu_reg_get_smn_base64(adev, XGMI_HWIP, i);
575 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
576 }
577 
578 #define smnreg_0x1A340218	0x1A340218
579 #define smnreg_0x1A3402E4	0x1A3402E4
580 #define smnreg_0x1A340294	0x1A340294
581 #define smreg_0x1A380088	0x1A380088
582 
583 #define NUM_PCIE_SMN_REGS	14
584 
585 static struct aqua_reg_list pcie_reg_addrs[] = {
586 	{ smnreg_0x1A340218, 1, 0 },
587 	{ smnreg_0x1A3402E4, 1, 0 },
588 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
589 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
590 };
591 
592 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
593 					     void *buf, size_t max_size)
594 {
595 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
596 	uint32_t start_addr, incrx, num_regs, szbuf;
597 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
598 	struct amdgpu_smn_reg_data *reg_data;
599 	struct pci_dev *us_pdev, *ds_pdev;
600 	int aer_cap, r, n;
601 
602 	if (!buf || !max_size)
603 		return -EINVAL;
604 
605 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
606 
607 	szbuf = sizeof(*pcie_reg_state) +
608 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
609 	/* Only one instance of pcie regs */
610 	if (max_size < szbuf)
611 		return -EOVERFLOW;
612 
613 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
614 						     sizeof(*pcie_reg_state));
615 	pcie_regs->inst_header.instance = 0;
616 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
617 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
618 
619 	reg_data = pcie_regs->smn_reg_values;
620 
621 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
622 		start_addr = pcie_reg_addrs[r].start_addr;
623 		incrx = pcie_reg_addrs[r].incrx;
624 		num_regs = pcie_reg_addrs[r].num_regs;
625 		for (n = 0; n < num_regs; n++) {
626 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
627 			++reg_data;
628 		}
629 	}
630 
631 	ds_pdev = pci_upstream_bridge(adev->pdev);
632 	us_pdev = pci_upstream_bridge(ds_pdev);
633 
634 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
635 				  &pcie_regs->device_status);
636 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
637 				  &pcie_regs->link_status);
638 
639 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
640 	if (aer_cap) {
641 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
642 				      &pcie_regs->pcie_corr_err_status);
643 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
644 				      &pcie_regs->pcie_uncorr_err_status);
645 	}
646 
647 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
648 			      &pcie_regs->sub_bus_number_latency);
649 
650 	pcie_reg_state->common_header.structure_size = szbuf;
651 	pcie_reg_state->common_header.format_revision = 1;
652 	pcie_reg_state->common_header.content_revision = 0;
653 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
654 	pcie_reg_state->common_header.num_instances = 1;
655 
656 	return pcie_reg_state->common_header.structure_size;
657 }
658 
659 #define smnreg_0x11A00050	0x11A00050
660 #define smnreg_0x11A00180	0x11A00180
661 #define smnreg_0x11A00070	0x11A00070
662 #define smnreg_0x11A00200	0x11A00200
663 #define smnreg_0x11A0020C	0x11A0020C
664 #define smnreg_0x11A00210	0x11A00210
665 #define smnreg_0x11A00108	0x11A00108
666 
667 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
668 
669 #define NUM_XGMI_SMN_REGS 25
670 
671 static struct aqua_reg_list xgmi_reg_addrs[] = {
672 	{ smnreg_0x11A00050, 1, 0 },
673 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
674 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
675 	{ smnreg_0x11A00200, 1, 0 },
676 	{ smnreg_0x11A0020C, 1, 0 },
677 	{ smnreg_0x11A00210, 1, 0 },
678 	{ smnreg_0x11A00108, 1, 0 },
679 };
680 
681 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
682 					     void *buf, size_t max_size)
683 {
684 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
685 	uint32_t start_addr, incrx, num_regs, szbuf;
686 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
687 	struct amdgpu_smn_reg_data *reg_data;
688 	const int max_xgmi_instances = 8;
689 	int inst = 0, i, j, r, n;
690 	const int xgmi_inst = 2;
691 	void *p;
692 
693 	if (!buf || !max_size)
694 		return -EINVAL;
695 
696 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
697 
698 	szbuf = sizeof(*xgmi_reg_state) +
699 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
700 				    NUM_XGMI_SMN_REGS);
701 	/* Only one instance of pcie regs */
702 	if (max_size < szbuf)
703 		return -EOVERFLOW;
704 
705 	p = &xgmi_reg_state->xgmi_state_regs[0];
706 	for_each_inst(i, adev->aid_mask) {
707 		for (j = 0; j < xgmi_inst; ++j) {
708 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
709 			xgmi_regs->inst_header.instance = inst++;
710 
711 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
712 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
713 
714 			reg_data = xgmi_regs->smn_reg_values;
715 
716 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
717 				start_addr = xgmi_reg_addrs[r].start_addr;
718 				incrx = xgmi_reg_addrs[r].incrx;
719 				num_regs = xgmi_reg_addrs[r].num_regs;
720 
721 				for (n = 0; n < num_regs; n++) {
722 					aqua_read_smn_ext(
723 						adev, reg_data,
724 						XGMI_LINK_REG(start_addr, j) +
725 							n * incrx,
726 						i);
727 					++reg_data;
728 				}
729 			}
730 			p = reg_data;
731 		}
732 	}
733 
734 	xgmi_reg_state->common_header.structure_size = szbuf;
735 	xgmi_reg_state->common_header.format_revision = 1;
736 	xgmi_reg_state->common_header.content_revision = 0;
737 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
738 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
739 
740 	return xgmi_reg_state->common_header.structure_size;
741 }
742 
743 #define smnreg_0x11C00070	0x11C00070
744 #define smnreg_0x11C00210	0x11C00210
745 
746 static struct aqua_reg_list wafl_reg_addrs[] = {
747 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
748 	{ smnreg_0x11C00210, 1, 0 },
749 };
750 
751 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
752 
753 #define NUM_WAFL_SMN_REGS 5
754 
755 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
756 					     void *buf, size_t max_size)
757 {
758 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
759 	uint32_t start_addr, incrx, num_regs, szbuf;
760 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
761 	struct amdgpu_smn_reg_data *reg_data;
762 	const int max_wafl_instances = 8;
763 	int inst = 0, i, j, r, n;
764 	const int wafl_inst = 2;
765 	void *p;
766 
767 	if (!buf || !max_size)
768 		return -EINVAL;
769 
770 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
771 
772 	szbuf = sizeof(*wafl_reg_state) +
773 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
774 				    NUM_WAFL_SMN_REGS);
775 
776 	if (max_size < szbuf)
777 		return -EOVERFLOW;
778 
779 	p = &wafl_reg_state->wafl_state_regs[0];
780 	for_each_inst(i, adev->aid_mask) {
781 		for (j = 0; j < wafl_inst; ++j) {
782 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
783 			wafl_regs->inst_header.instance = inst++;
784 
785 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
786 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
787 
788 			reg_data = wafl_regs->smn_reg_values;
789 
790 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
791 				start_addr = wafl_reg_addrs[r].start_addr;
792 				incrx = wafl_reg_addrs[r].incrx;
793 				num_regs = wafl_reg_addrs[r].num_regs;
794 				for (n = 0; n < num_regs; n++) {
795 					aqua_read_smn_ext(
796 						adev, reg_data,
797 						WAFL_LINK_REG(start_addr, j) +
798 							n * incrx,
799 						i);
800 					++reg_data;
801 				}
802 			}
803 			p = reg_data;
804 		}
805 	}
806 
807 	wafl_reg_state->common_header.structure_size = szbuf;
808 	wafl_reg_state->common_header.format_revision = 1;
809 	wafl_reg_state->common_header.content_revision = 0;
810 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
811 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
812 
813 	return wafl_reg_state->common_header.structure_size;
814 }
815 
816 #define smnreg_0x1B311060 0x1B311060
817 #define smnreg_0x1B411060 0x1B411060
818 #define smnreg_0x1B511060 0x1B511060
819 #define smnreg_0x1B611060 0x1B611060
820 
821 #define smnreg_0x1C307120 0x1C307120
822 #define smnreg_0x1C317120 0x1C317120
823 
824 #define smnreg_0x1C320830 0x1C320830
825 #define smnreg_0x1C380830 0x1C380830
826 #define smnreg_0x1C3D0830 0x1C3D0830
827 #define smnreg_0x1C420830 0x1C420830
828 
829 #define smnreg_0x1C320100 0x1C320100
830 #define smnreg_0x1C380100 0x1C380100
831 #define smnreg_0x1C3D0100 0x1C3D0100
832 #define smnreg_0x1C420100 0x1C420100
833 
834 #define smnreg_0x1B310500 0x1B310500
835 #define smnreg_0x1C300400 0x1C300400
836 
837 #define USR_CAKE_INCR 0x11000
838 #define USR_LINK_INCR 0x100000
839 #define USR_CP_INCR 0x10000
840 
841 #define NUM_USR_SMN_REGS	20
842 
843 struct aqua_reg_list usr_reg_addrs[] = {
844 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
845 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
846 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
847 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
848 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
849 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
850 };
851 
852 #define NUM_USR1_SMN_REGS	46
853 struct aqua_reg_list usr1_reg_addrs[] = {
854 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
855 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
856 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
857 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
858 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
859 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
860 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
861 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
862 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
863 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
864 };
865 
866 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
867 					    void *buf, size_t max_size,
868 					    int reg_state)
869 {
870 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
871 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
872 	struct amdgpu_regs_usr_v1_0 *usr_regs;
873 	struct amdgpu_smn_reg_data *reg_data;
874 	const int max_usr_instances = 4;
875 	struct aqua_reg_list *reg_addrs;
876 	int inst = 0, i, n, r, arr_size;
877 	void *p;
878 
879 	if (!buf || !max_size)
880 		return -EINVAL;
881 
882 	switch (reg_state) {
883 	case AMDGPU_REG_STATE_TYPE_USR:
884 		arr_size = ARRAY_SIZE(usr_reg_addrs);
885 		reg_addrs = usr_reg_addrs;
886 		num_smn = NUM_USR_SMN_REGS;
887 		break;
888 	case AMDGPU_REG_STATE_TYPE_USR_1:
889 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
890 		reg_addrs = usr1_reg_addrs;
891 		num_smn = NUM_USR1_SMN_REGS;
892 		break;
893 	default:
894 		return -EINVAL;
895 	}
896 
897 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
898 
899 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
900 							     sizeof(*usr_regs),
901 							     num_smn);
902 	if (max_size < szbuf)
903 		return -EOVERFLOW;
904 
905 	p = &usr_reg_state->usr_state_regs[0];
906 	for_each_inst(i, adev->aid_mask) {
907 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
908 		usr_regs->inst_header.instance = inst++;
909 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
910 		usr_regs->inst_header.num_smn_regs = num_smn;
911 		reg_data = usr_regs->smn_reg_values;
912 
913 		for (r = 0; r < arr_size; r++) {
914 			start_addr = reg_addrs[r].start_addr;
915 			incrx = reg_addrs[r].incrx;
916 			num_regs = reg_addrs[r].num_regs;
917 			for (n = 0; n < num_regs; n++) {
918 				aqua_read_smn_ext(adev, reg_data,
919 						  start_addr + n * incrx, i);
920 				reg_data++;
921 			}
922 		}
923 		p = reg_data;
924 	}
925 
926 	usr_reg_state->common_header.structure_size = szbuf;
927 	usr_reg_state->common_header.format_revision = 1;
928 	usr_reg_state->common_header.content_revision = 0;
929 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
930 	usr_reg_state->common_header.num_instances = max_usr_instances;
931 
932 	return usr_reg_state->common_header.structure_size;
933 }
934 
935 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
936 				    enum amdgpu_reg_state reg_state, void *buf,
937 				    size_t max_size)
938 {
939 	ssize_t size;
940 
941 	switch (reg_state) {
942 	case AMDGPU_REG_STATE_TYPE_PCIE:
943 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
944 		break;
945 	case AMDGPU_REG_STATE_TYPE_XGMI:
946 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
947 		break;
948 	case AMDGPU_REG_STATE_TYPE_WAFL:
949 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
950 		break;
951 	case AMDGPU_REG_STATE_TYPE_USR:
952 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
953 						    AMDGPU_REG_STATE_TYPE_USR);
954 		break;
955 	case AMDGPU_REG_STATE_TYPE_USR_1:
956 		size = aqua_vanjaram_read_usr_state(
957 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
958 		break;
959 	default:
960 		return -EINVAL;
961 	}
962 
963 	return size;
964 }
965