xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34 	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35 
36 #define AMDGPU_XCP_OPS_KFD	(1 << 0)
37 
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40 	int i;
41 
42 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43 
44 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45 
46 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49 
50 	adev->doorbell_index.sdma_doorbell_range = 20;
51 	for (i = 0; i < adev->sdma.num_instances; i++)
52 		adev->doorbell_index.sdma_engine[i] =
53 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55 
56 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58 
59 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61 
62 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64 
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67 	return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69 
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 			     uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73 	int xcp_id;
74 	enum AMDGPU_XCP_IP_BLOCK ip_blk;
75 	uint32_t inst_mask;
76 
77 	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
79 		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
80 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
81 		return;
82 
83 	inst_mask = 1 << inst_idx;
84 
85 	switch (ring->funcs->type) {
86 	case AMDGPU_HW_IP_GFX:
87 	case AMDGPU_RING_TYPE_COMPUTE:
88 	case AMDGPU_RING_TYPE_KIQ:
89 		ip_blk = AMDGPU_XCP_GFX;
90 		break;
91 	case AMDGPU_RING_TYPE_SDMA:
92 		ip_blk = AMDGPU_XCP_SDMA;
93 		break;
94 	case AMDGPU_RING_TYPE_VCN_ENC:
95 	case AMDGPU_RING_TYPE_VCN_JPEG:
96 		ip_blk = AMDGPU_XCP_VCN;
97 		break;
98 	default:
99 		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
100 		return;
101 	}
102 
103 	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
104 		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
105 			ring->xcp_id = xcp_id;
106 			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
107 				ring->xcp_id);
108 			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
109 				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
110 			break;
111 		}
112 	}
113 }
114 
115 static void aqua_vanjaram_xcp_gpu_sched_update(
116 		struct amdgpu_device *adev,
117 		struct amdgpu_ring *ring,
118 		unsigned int sel_xcp_id)
119 {
120 	unsigned int *num_gpu_sched;
121 
122 	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
123 			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
124 	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
125 			.sched[(*num_gpu_sched)++] = &ring->sched;
126 	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
127 			sel_xcp_id, ring->funcs->type,
128 			ring->hw_prio, *num_gpu_sched);
129 }
130 
131 static int aqua_vanjaram_xcp_sched_list_update(
132 		struct amdgpu_device *adev)
133 {
134 	struct amdgpu_ring *ring;
135 	int i;
136 
137 	for (i = 0; i < MAX_XCP; i++) {
138 		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
139 		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
140 	}
141 
142 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
143 		return 0;
144 
145 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
146 		ring = adev->rings[i];
147 		if (!ring || !ring->sched.ready || ring->no_scheduler)
148 			continue;
149 
150 		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
151 
152 		/* VCN may be shared by two partitions under CPX MODE in certain
153 		 * configs.
154 		 */
155 		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
156 		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
157 		    aqua_vanjaram_xcp_vcn_shared(adev))
158 			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
159 	}
160 
161 	return 0;
162 }
163 
164 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
165 {
166 	int i;
167 
168 	for (i = 0; i < adev->num_rings; i++) {
169 		struct amdgpu_ring *ring = adev->rings[i];
170 
171 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
172 			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
173 			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
174 		else
175 			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
176 	}
177 
178 	return aqua_vanjaram_xcp_sched_list_update(adev);
179 }
180 
181 static int aqua_vanjaram_select_scheds(
182 		struct amdgpu_device *adev,
183 		u32 hw_ip,
184 		u32 hw_prio,
185 		struct amdgpu_fpriv *fpriv,
186 		unsigned int *num_scheds,
187 		struct drm_gpu_scheduler ***scheds)
188 {
189 	u32 sel_xcp_id;
190 	int i;
191 
192 	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
193 		u32 least_ref_cnt = ~0;
194 
195 		fpriv->xcp_id = 0;
196 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
197 			u32 total_ref_cnt;
198 
199 			total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
200 			if (total_ref_cnt < least_ref_cnt) {
201 				fpriv->xcp_id = i;
202 				least_ref_cnt = total_ref_cnt;
203 			}
204 		}
205 	}
206 	sel_xcp_id = fpriv->xcp_id;
207 
208 	if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
209 		*num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
210 		*scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
211 		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
212 		DRM_DEBUG("Selected partition #%d", sel_xcp_id);
213 	} else {
214 		DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
215 		return -ENOENT;
216 	}
217 
218 	return 0;
219 }
220 
221 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
222 					 enum amd_hw_ip_block_type block,
223 					 int8_t inst)
224 {
225 	int8_t dev_inst;
226 
227 	switch (block) {
228 	case GC_HWIP:
229 	case SDMA0_HWIP:
230 	/* Both JPEG and VCN as JPEG is only alias of VCN */
231 	case VCN_HWIP:
232 		dev_inst = adev->ip_map.dev_inst[block][inst];
233 		break;
234 	default:
235 		/* For rest of the IPs, no look up required.
236 		 * Assume 'logical instance == physical instance' for all configs. */
237 		dev_inst = inst;
238 		break;
239 	}
240 
241 	return dev_inst;
242 }
243 
244 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
245 					 enum amd_hw_ip_block_type block,
246 					 uint32_t mask)
247 {
248 	uint32_t dev_mask = 0;
249 	int8_t log_inst, dev_inst;
250 
251 	while (mask) {
252 		log_inst = ffs(mask) - 1;
253 		dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
254 		dev_mask |= (1 << dev_inst);
255 		mask &= ~(1 << log_inst);
256 	}
257 
258 	return dev_mask;
259 }
260 
261 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
262 					  enum amd_hw_ip_block_type ip_block,
263 					  uint32_t inst_mask)
264 {
265 	int l = 0, i;
266 
267 	while (inst_mask) {
268 		i = ffs(inst_mask) - 1;
269 		adev->ip_map.dev_inst[ip_block][l++] = i;
270 		inst_mask &= ~(1 << i);
271 	}
272 	for (; l < HWIP_MAX_INSTANCE; l++)
273 		adev->ip_map.dev_inst[ip_block][l] = -1;
274 }
275 
276 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
277 {
278 	u32 ip_map[][2] = {
279 		{ GC_HWIP, adev->gfx.xcc_mask },
280 		{ SDMA0_HWIP, adev->sdma.sdma_mask },
281 		{ VCN_HWIP, adev->vcn.inst_mask },
282 	};
283 	int i;
284 
285 	for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
286 		aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
287 
288 	adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
289 	adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
290 }
291 
292 /* Fixed pattern for smn addressing on different AIDs:
293  *   bit[34]: indicate cross AID access
294  *   bit[33:32]: indicate target AID id
295  * AID id range is 0 ~ 3 as maximum AID number is 4.
296  */
297 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
298 {
299 	u64 ext_offset;
300 
301 	/* local routing and bit[34:32] will be zeros */
302 	if (ext_id == 0)
303 		return 0;
304 
305 	/* Initiated from host, accessing to all non-zero aids are cross traffic */
306 	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
307 
308 	return ext_offset;
309 }
310 
311 static enum amdgpu_gfx_partition
312 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
313 {
314 	struct amdgpu_device *adev = xcp_mgr->adev;
315 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
316 
317 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
318 	if (adev->gfx.funcs->get_xccs_per_xcp)
319 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
320 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
321 		mode = num_xcc / num_xcc_per_xcp;
322 
323 	if (num_xcc_per_xcp == 1)
324 		return AMDGPU_CPX_PARTITION_MODE;
325 
326 	switch (mode) {
327 	case 1:
328 		return AMDGPU_SPX_PARTITION_MODE;
329 	case 2:
330 		return AMDGPU_DPX_PARTITION_MODE;
331 	case 3:
332 		return AMDGPU_TPX_PARTITION_MODE;
333 	case 4:
334 		return AMDGPU_QPX_PARTITION_MODE;
335 	default:
336 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
337 	}
338 
339 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
340 }
341 
342 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
343 {
344 	enum amdgpu_gfx_partition derv_mode,
345 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
346 	struct amdgpu_device *adev = xcp_mgr->adev;
347 
348 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
349 
350 	if (amdgpu_sriov_vf(adev))
351 		return derv_mode;
352 
353 	if (adev->nbio.funcs->get_compute_partition_mode) {
354 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
355 		if (mode != derv_mode)
356 			dev_warn(
357 				adev->dev,
358 				"Mismatch in compute partition mode - reported : %d derived : %d",
359 				mode, derv_mode);
360 	}
361 
362 	return mode;
363 }
364 
365 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
366 {
367 	int num_xcc, num_xcc_per_xcp = 0;
368 
369 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
370 
371 	switch (mode) {
372 	case AMDGPU_SPX_PARTITION_MODE:
373 		num_xcc_per_xcp = num_xcc;
374 		break;
375 	case AMDGPU_DPX_PARTITION_MODE:
376 		num_xcc_per_xcp = num_xcc / 2;
377 		break;
378 	case AMDGPU_TPX_PARTITION_MODE:
379 		num_xcc_per_xcp = num_xcc / 3;
380 		break;
381 	case AMDGPU_QPX_PARTITION_MODE:
382 		num_xcc_per_xcp = num_xcc / 4;
383 		break;
384 	case AMDGPU_CPX_PARTITION_MODE:
385 		num_xcc_per_xcp = 1;
386 		break;
387 	}
388 
389 	return num_xcc_per_xcp;
390 }
391 
392 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
393 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
394 				    struct amdgpu_xcp_ip *ip)
395 {
396 	struct amdgpu_device *adev = xcp_mgr->adev;
397 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
398 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
399 
400 	num_sdma = adev->sdma.num_instances;
401 	num_vcn = adev->vcn.num_vcn_inst;
402 	num_shared_vcn = 1;
403 
404 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
405 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
406 
407 	switch (xcp_mgr->mode) {
408 	case AMDGPU_SPX_PARTITION_MODE:
409 	case AMDGPU_DPX_PARTITION_MODE:
410 	case AMDGPU_TPX_PARTITION_MODE:
411 	case AMDGPU_QPX_PARTITION_MODE:
412 	case AMDGPU_CPX_PARTITION_MODE:
413 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
414 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
415 		break;
416 	default:
417 		return -EINVAL;
418 	}
419 
420 	if (num_vcn && num_xcp > num_vcn)
421 		num_shared_vcn = num_xcp / num_vcn;
422 
423 	switch (ip_id) {
424 	case AMDGPU_XCP_GFXHUB:
425 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
426 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
427 		break;
428 	case AMDGPU_XCP_GFX:
429 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
430 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
431 		break;
432 	case AMDGPU_XCP_SDMA:
433 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
434 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
435 		break;
436 	case AMDGPU_XCP_VCN:
437 		ip->inst_mask =
438 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
439 		/* TODO : Assign IP funcs */
440 		break;
441 	default:
442 		return -EINVAL;
443 	}
444 
445 	ip->ip_id = ip_id;
446 
447 	return 0;
448 }
449 
450 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
451 					  int mode,
452 					  struct amdgpu_xcp_cfg *xcp_cfg)
453 {
454 	struct amdgpu_device *adev = xcp_mgr->adev;
455 	int max_res[AMDGPU_XCP_RES_MAX] = {};
456 	bool res_lt_xcp;
457 	int num_xcp, i;
458 	u16 nps_modes;
459 
460 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
461 		return -EINVAL;
462 
463 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
464 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
465 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
466 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
467 
468 	switch (mode) {
469 	case AMDGPU_SPX_PARTITION_MODE:
470 		num_xcp = 1;
471 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
472 		break;
473 	case AMDGPU_DPX_PARTITION_MODE:
474 		num_xcp = 2;
475 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
476 		break;
477 	case AMDGPU_TPX_PARTITION_MODE:
478 		num_xcp = 3;
479 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
480 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
481 		break;
482 	case AMDGPU_QPX_PARTITION_MODE:
483 		num_xcp = 4;
484 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
485 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
486 		break;
487 	case AMDGPU_CPX_PARTITION_MODE:
488 		num_xcp = NUM_XCC(adev->gfx.xcc_mask);
489 		nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
490 			    BIT(AMDGPU_NPS4_PARTITION_MODE);
491 		break;
492 	default:
493 		return -EINVAL;
494 	}
495 
496 	xcp_cfg->compatible_nps_modes =
497 		(adev->gmc.supported_nps_modes & nps_modes);
498 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
499 
500 	for (i = 0; i < xcp_cfg->num_res; i++) {
501 		res_lt_xcp = max_res[i] < num_xcp;
502 		xcp_cfg->xcp_res[i].id = i;
503 		xcp_cfg->xcp_res[i].num_inst =
504 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
505 		xcp_cfg->xcp_res[i].num_inst =
506 			i == AMDGPU_XCP_RES_JPEG ?
507 			xcp_cfg->xcp_res[i].num_inst *
508 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
509 		xcp_cfg->xcp_res[i].num_shared =
510 			res_lt_xcp ? num_xcp / max_res[i] : 1;
511 	}
512 
513 	return 0;
514 }
515 
516 static enum amdgpu_gfx_partition
517 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
518 {
519 	struct amdgpu_device *adev = xcp_mgr->adev;
520 	int num_xcc;
521 
522 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
523 
524 	if (adev->gmc.num_mem_partitions == 1)
525 		return AMDGPU_SPX_PARTITION_MODE;
526 
527 	if (adev->gmc.num_mem_partitions == num_xcc)
528 		return AMDGPU_CPX_PARTITION_MODE;
529 
530 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
531 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
532 						    AMDGPU_CPX_PARTITION_MODE;
533 
534 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
535 		return AMDGPU_DPX_PARTITION_MODE;
536 
537 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
538 }
539 
540 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
541 					  enum amdgpu_gfx_partition mode)
542 {
543 	struct amdgpu_device *adev = xcp_mgr->adev;
544 	int num_xcc, num_xccs_per_xcp;
545 
546 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
547 	switch (mode) {
548 	case AMDGPU_SPX_PARTITION_MODE:
549 		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
550 	case AMDGPU_DPX_PARTITION_MODE:
551 		return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
552 	case AMDGPU_TPX_PARTITION_MODE:
553 		return (adev->gmc.num_mem_partitions == 1 ||
554 			adev->gmc.num_mem_partitions == 3) &&
555 		       ((num_xcc % 3) == 0);
556 	case AMDGPU_QPX_PARTITION_MODE:
557 		num_xccs_per_xcp = num_xcc / 4;
558 		return (adev->gmc.num_mem_partitions == 1 ||
559 			adev->gmc.num_mem_partitions == 4) &&
560 		       (num_xccs_per_xcp >= 2);
561 	case AMDGPU_CPX_PARTITION_MODE:
562 		return ((num_xcc > 1) &&
563 		       (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
564 		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
565 	default:
566 		return false;
567 	}
568 
569 	return false;
570 }
571 
572 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
573 {
574 	/* TODO:
575 	 * Stop user queues and threads, and make sure GPU is empty of work.
576 	 */
577 
578 	if (flags & AMDGPU_XCP_OPS_KFD)
579 		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
580 
581 	return 0;
582 }
583 
584 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
585 {
586 	int ret = 0;
587 
588 	if (flags & AMDGPU_XCP_OPS_KFD) {
589 		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
590 		amdgpu_amdkfd_device_init(xcp_mgr->adev);
591 		/* If KFD init failed, return failure */
592 		if (!xcp_mgr->adev->kfd.init_complete)
593 			ret = -EIO;
594 	}
595 
596 	return ret;
597 }
598 
599 static void
600 __aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
601 {
602 	struct amdgpu_device *adev = xcp_mgr->adev;
603 
604 	xcp_mgr->supp_xcp_modes = 0;
605 
606 	switch (NUM_XCC(adev->gfx.xcc_mask)) {
607 	case 8:
608 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
609 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
610 					  BIT(AMDGPU_QPX_PARTITION_MODE) |
611 					  BIT(AMDGPU_CPX_PARTITION_MODE);
612 		break;
613 	case 6:
614 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
615 					  BIT(AMDGPU_TPX_PARTITION_MODE) |
616 					  BIT(AMDGPU_CPX_PARTITION_MODE);
617 		break;
618 	case 4:
619 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
620 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
621 					  BIT(AMDGPU_CPX_PARTITION_MODE);
622 		break;
623 	/* this seems only existing in emulation phase */
624 	case 2:
625 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
626 					  BIT(AMDGPU_CPX_PARTITION_MODE);
627 		break;
628 	case 1:
629 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
630 					  BIT(AMDGPU_CPX_PARTITION_MODE);
631 		break;
632 
633 	default:
634 		break;
635 	}
636 }
637 
638 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
639 {
640 	int mode;
641 
642 	xcp_mgr->avail_xcp_modes = 0;
643 
644 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
645 		if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
646 			xcp_mgr->avail_xcp_modes |= BIT(mode);
647 	}
648 }
649 
650 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
651 					       int mode, int *num_xcps)
652 {
653 	int num_xcc_per_xcp, num_xcc, ret;
654 	struct amdgpu_device *adev;
655 	u32 flags = 0;
656 
657 	adev = xcp_mgr->adev;
658 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
659 
660 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
661 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
662 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
663 			dev_err(adev->dev,
664 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
665 				adev->gmc.num_mem_partitions);
666 			return -EINVAL;
667 		}
668 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
669 		dev_err(adev->dev,
670 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
671 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
672 		return -EINVAL;
673 	}
674 
675 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
676 		flags |= AMDGPU_XCP_OPS_KFD;
677 
678 	if (flags & AMDGPU_XCP_OPS_KFD) {
679 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
680 		if (ret)
681 			goto out;
682 	}
683 
684 	ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
685 	if (ret)
686 		goto unlock;
687 
688 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
689 	if (adev->gfx.funcs->switch_partition_mode)
690 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
691 						       num_xcc_per_xcp);
692 
693 	/* Init info about new xcps */
694 	*num_xcps = num_xcc / num_xcc_per_xcp;
695 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
696 
697 	ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
698 	if (!ret)
699 		__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
700 unlock:
701 	if (flags & AMDGPU_XCP_OPS_KFD)
702 		amdgpu_amdkfd_unlock_kfd(adev);
703 out:
704 	return ret;
705 }
706 
707 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
708 					  int xcc_id, uint8_t *mem_id)
709 {
710 	/* memory/spatial modes validation check is already done */
711 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
712 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
713 
714 	return 0;
715 }
716 
717 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
718 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
719 {
720 	struct amdgpu_numa_info numa_info;
721 	struct amdgpu_device *adev;
722 	uint32_t xcc_mask;
723 	int r, i, xcc_id;
724 
725 	adev = xcp_mgr->adev;
726 	/* TODO: BIOS is not returning the right info now
727 	 * Check on this later
728 	 */
729 	/*
730 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
731 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
732 	*/
733 	if (adev->gmc.num_mem_partitions == 1) {
734 		/* Only one range */
735 		*mem_id = 0;
736 		return 0;
737 	}
738 
739 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
740 	if (r || !xcc_mask)
741 		return -EINVAL;
742 
743 	xcc_id = ffs(xcc_mask) - 1;
744 	if (!adev->gmc.is_app_apu)
745 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
746 
747 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
748 
749 	if (r)
750 		return r;
751 
752 	r = -EINVAL;
753 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
754 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
755 			*mem_id = i;
756 			r = 0;
757 			break;
758 		}
759 	}
760 
761 	return r;
762 }
763 
764 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
765 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
766 				     struct amdgpu_xcp_ip *ip)
767 {
768 	if (!ip)
769 		return -EINVAL;
770 
771 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
772 }
773 
774 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
775 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
776 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
777 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
778 	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
779 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
780 	.select_scheds = &aqua_vanjaram_select_scheds,
781 	.update_partition_sched_list =
782 		&aqua_vanjaram_update_partition_sched_list
783 };
784 
785 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
786 {
787 	int ret;
788 
789 	if (amdgpu_sriov_vf(adev))
790 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
791 
792 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
793 				  &aqua_vanjaram_xcp_funcs);
794 	if (ret)
795 		return ret;
796 
797 	__aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
798 	/* TODO: Default memory node affinity init */
799 
800 	return ret;
801 }
802 
803 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
804 {
805 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
806 	int ret, i;
807 
808 	/* generally 1 AID supports 4 instances */
809 	adev->sdma.num_inst_per_aid = 4;
810 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
811 
812 	adev->aid_mask = i = 1;
813 	inst_mask >>= adev->sdma.num_inst_per_aid;
814 
815 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
816 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
817 		avail_inst = inst_mask & mask;
818 		if (avail_inst == mask || avail_inst == 0x3 ||
819 		    avail_inst == 0xc)
820 			adev->aid_mask |= (1 << i);
821 	}
822 
823 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
824 	 * addressed based on logical instance ids.
825 	 */
826 	adev->vcn.harvest_config = 0;
827 	adev->vcn.num_inst_per_aid = 1;
828 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
829 	adev->jpeg.harvest_config = 0;
830 	adev->jpeg.num_inst_per_aid = 1;
831 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
832 
833 	ret = aqua_vanjaram_xcp_mgr_init(adev);
834 	if (ret)
835 		return ret;
836 
837 	aqua_vanjaram_ip_map_init(adev);
838 
839 	return 0;
840 }
841 
842 static void aqua_read_smn(struct amdgpu_device *adev,
843 			  struct amdgpu_smn_reg_data *regdata,
844 			  uint64_t smn_addr)
845 {
846 	regdata->addr = smn_addr;
847 	regdata->value = RREG32_PCIE(smn_addr);
848 }
849 
850 struct aqua_reg_list {
851 	uint64_t start_addr;
852 	uint32_t num_regs;
853 	uint32_t incrx;
854 };
855 
856 #define DW_ADDR_INCR	4
857 
858 static void aqua_read_smn_ext(struct amdgpu_device *adev,
859 			      struct amdgpu_smn_reg_data *regdata,
860 			      uint64_t smn_addr, int i)
861 {
862 	regdata->addr =
863 		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
864 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
865 }
866 
867 #define smnreg_0x1A340218	0x1A340218
868 #define smnreg_0x1A3402E4	0x1A3402E4
869 #define smnreg_0x1A340294	0x1A340294
870 #define smreg_0x1A380088	0x1A380088
871 
872 #define NUM_PCIE_SMN_REGS	14
873 
874 static struct aqua_reg_list pcie_reg_addrs[] = {
875 	{ smnreg_0x1A340218, 1, 0 },
876 	{ smnreg_0x1A3402E4, 1, 0 },
877 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
878 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
879 };
880 
881 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
882 					     void *buf, size_t max_size)
883 {
884 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
885 	uint32_t start_addr, incrx, num_regs, szbuf;
886 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
887 	struct amdgpu_smn_reg_data *reg_data;
888 	struct pci_dev *us_pdev, *ds_pdev;
889 	int aer_cap, r, n;
890 
891 	if (!buf || !max_size)
892 		return -EINVAL;
893 
894 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
895 
896 	szbuf = sizeof(*pcie_reg_state) +
897 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
898 	/* Only one instance of pcie regs */
899 	if (max_size < szbuf)
900 		return -EOVERFLOW;
901 
902 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
903 						     sizeof(*pcie_reg_state));
904 	pcie_regs->inst_header.instance = 0;
905 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
906 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
907 
908 	reg_data = pcie_regs->smn_reg_values;
909 
910 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
911 		start_addr = pcie_reg_addrs[r].start_addr;
912 		incrx = pcie_reg_addrs[r].incrx;
913 		num_regs = pcie_reg_addrs[r].num_regs;
914 		for (n = 0; n < num_regs; n++) {
915 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
916 			++reg_data;
917 		}
918 	}
919 
920 	ds_pdev = pci_upstream_bridge(adev->pdev);
921 	us_pdev = pci_upstream_bridge(ds_pdev);
922 
923 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
924 				  &pcie_regs->device_status);
925 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
926 				  &pcie_regs->link_status);
927 
928 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
929 	if (aer_cap) {
930 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
931 				      &pcie_regs->pcie_corr_err_status);
932 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
933 				      &pcie_regs->pcie_uncorr_err_status);
934 	}
935 
936 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
937 			      &pcie_regs->sub_bus_number_latency);
938 
939 	pcie_reg_state->common_header.structure_size = szbuf;
940 	pcie_reg_state->common_header.format_revision = 1;
941 	pcie_reg_state->common_header.content_revision = 0;
942 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
943 	pcie_reg_state->common_header.num_instances = 1;
944 
945 	return pcie_reg_state->common_header.structure_size;
946 }
947 
948 #define smnreg_0x11A00050	0x11A00050
949 #define smnreg_0x11A00180	0x11A00180
950 #define smnreg_0x11A00070	0x11A00070
951 #define smnreg_0x11A00200	0x11A00200
952 #define smnreg_0x11A0020C	0x11A0020C
953 #define smnreg_0x11A00210	0x11A00210
954 #define smnreg_0x11A00108	0x11A00108
955 
956 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
957 
958 #define NUM_XGMI_SMN_REGS 25
959 
960 static struct aqua_reg_list xgmi_reg_addrs[] = {
961 	{ smnreg_0x11A00050, 1, 0 },
962 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
963 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
964 	{ smnreg_0x11A00200, 1, 0 },
965 	{ smnreg_0x11A0020C, 1, 0 },
966 	{ smnreg_0x11A00210, 1, 0 },
967 	{ smnreg_0x11A00108, 1, 0 },
968 };
969 
970 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
971 					     void *buf, size_t max_size)
972 {
973 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
974 	uint32_t start_addr, incrx, num_regs, szbuf;
975 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
976 	struct amdgpu_smn_reg_data *reg_data;
977 	const int max_xgmi_instances = 8;
978 	int inst = 0, i, j, r, n;
979 	const int xgmi_inst = 2;
980 	void *p;
981 
982 	if (!buf || !max_size)
983 		return -EINVAL;
984 
985 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
986 
987 	szbuf = sizeof(*xgmi_reg_state) +
988 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
989 				    NUM_XGMI_SMN_REGS);
990 	/* Only one instance of pcie regs */
991 	if (max_size < szbuf)
992 		return -EOVERFLOW;
993 
994 	p = &xgmi_reg_state->xgmi_state_regs[0];
995 	for_each_inst(i, adev->aid_mask) {
996 		for (j = 0; j < xgmi_inst; ++j) {
997 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
998 			xgmi_regs->inst_header.instance = inst++;
999 
1000 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
1001 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
1002 
1003 			reg_data = xgmi_regs->smn_reg_values;
1004 
1005 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
1006 				start_addr = xgmi_reg_addrs[r].start_addr;
1007 				incrx = xgmi_reg_addrs[r].incrx;
1008 				num_regs = xgmi_reg_addrs[r].num_regs;
1009 
1010 				for (n = 0; n < num_regs; n++) {
1011 					aqua_read_smn_ext(
1012 						adev, reg_data,
1013 						XGMI_LINK_REG(start_addr, j) +
1014 							n * incrx,
1015 						i);
1016 					++reg_data;
1017 				}
1018 			}
1019 			p = reg_data;
1020 		}
1021 	}
1022 
1023 	xgmi_reg_state->common_header.structure_size = szbuf;
1024 	xgmi_reg_state->common_header.format_revision = 1;
1025 	xgmi_reg_state->common_header.content_revision = 0;
1026 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
1027 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
1028 
1029 	return xgmi_reg_state->common_header.structure_size;
1030 }
1031 
1032 #define smnreg_0x11C00070	0x11C00070
1033 #define smnreg_0x11C00210	0x11C00210
1034 
1035 static struct aqua_reg_list wafl_reg_addrs[] = {
1036 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
1037 	{ smnreg_0x11C00210, 1, 0 },
1038 };
1039 
1040 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
1041 
1042 #define NUM_WAFL_SMN_REGS 5
1043 
1044 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
1045 					     void *buf, size_t max_size)
1046 {
1047 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
1048 	uint32_t start_addr, incrx, num_regs, szbuf;
1049 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
1050 	struct amdgpu_smn_reg_data *reg_data;
1051 	const int max_wafl_instances = 8;
1052 	int inst = 0, i, j, r, n;
1053 	const int wafl_inst = 2;
1054 	void *p;
1055 
1056 	if (!buf || !max_size)
1057 		return -EINVAL;
1058 
1059 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
1060 
1061 	szbuf = sizeof(*wafl_reg_state) +
1062 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
1063 				    NUM_WAFL_SMN_REGS);
1064 
1065 	if (max_size < szbuf)
1066 		return -EOVERFLOW;
1067 
1068 	p = &wafl_reg_state->wafl_state_regs[0];
1069 	for_each_inst(i, adev->aid_mask) {
1070 		for (j = 0; j < wafl_inst; ++j) {
1071 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
1072 			wafl_regs->inst_header.instance = inst++;
1073 
1074 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
1075 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
1076 
1077 			reg_data = wafl_regs->smn_reg_values;
1078 
1079 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
1080 				start_addr = wafl_reg_addrs[r].start_addr;
1081 				incrx = wafl_reg_addrs[r].incrx;
1082 				num_regs = wafl_reg_addrs[r].num_regs;
1083 				for (n = 0; n < num_regs; n++) {
1084 					aqua_read_smn_ext(
1085 						adev, reg_data,
1086 						WAFL_LINK_REG(start_addr, j) +
1087 							n * incrx,
1088 						i);
1089 					++reg_data;
1090 				}
1091 			}
1092 			p = reg_data;
1093 		}
1094 	}
1095 
1096 	wafl_reg_state->common_header.structure_size = szbuf;
1097 	wafl_reg_state->common_header.format_revision = 1;
1098 	wafl_reg_state->common_header.content_revision = 0;
1099 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
1100 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
1101 
1102 	return wafl_reg_state->common_header.structure_size;
1103 }
1104 
1105 #define smnreg_0x1B311060 0x1B311060
1106 #define smnreg_0x1B411060 0x1B411060
1107 #define smnreg_0x1B511060 0x1B511060
1108 #define smnreg_0x1B611060 0x1B611060
1109 
1110 #define smnreg_0x1C307120 0x1C307120
1111 #define smnreg_0x1C317120 0x1C317120
1112 
1113 #define smnreg_0x1C320830 0x1C320830
1114 #define smnreg_0x1C380830 0x1C380830
1115 #define smnreg_0x1C3D0830 0x1C3D0830
1116 #define smnreg_0x1C420830 0x1C420830
1117 
1118 #define smnreg_0x1C320100 0x1C320100
1119 #define smnreg_0x1C380100 0x1C380100
1120 #define smnreg_0x1C3D0100 0x1C3D0100
1121 #define smnreg_0x1C420100 0x1C420100
1122 
1123 #define smnreg_0x1B310500 0x1B310500
1124 #define smnreg_0x1C300400 0x1C300400
1125 
1126 #define USR_CAKE_INCR 0x11000
1127 #define USR_LINK_INCR 0x100000
1128 #define USR_CP_INCR 0x10000
1129 
1130 #define NUM_USR_SMN_REGS	20
1131 
1132 struct aqua_reg_list usr_reg_addrs[] = {
1133 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
1134 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
1135 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
1136 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
1137 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
1138 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
1139 };
1140 
1141 #define NUM_USR1_SMN_REGS	46
1142 struct aqua_reg_list usr1_reg_addrs[] = {
1143 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
1144 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
1145 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1146 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
1147 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
1148 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
1149 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1150 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
1151 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
1152 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
1153 };
1154 
1155 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1156 					    void *buf, size_t max_size,
1157 					    int reg_state)
1158 {
1159 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1160 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1161 	struct amdgpu_regs_usr_v1_0 *usr_regs;
1162 	struct amdgpu_smn_reg_data *reg_data;
1163 	const int max_usr_instances = 4;
1164 	struct aqua_reg_list *reg_addrs;
1165 	int inst = 0, i, n, r, arr_size;
1166 	void *p;
1167 
1168 	if (!buf || !max_size)
1169 		return -EINVAL;
1170 
1171 	switch (reg_state) {
1172 	case AMDGPU_REG_STATE_TYPE_USR:
1173 		arr_size = ARRAY_SIZE(usr_reg_addrs);
1174 		reg_addrs = usr_reg_addrs;
1175 		num_smn = NUM_USR_SMN_REGS;
1176 		break;
1177 	case AMDGPU_REG_STATE_TYPE_USR_1:
1178 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
1179 		reg_addrs = usr1_reg_addrs;
1180 		num_smn = NUM_USR1_SMN_REGS;
1181 		break;
1182 	default:
1183 		return -EINVAL;
1184 	}
1185 
1186 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1187 
1188 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1189 							     sizeof(*usr_regs),
1190 							     num_smn);
1191 	if (max_size < szbuf)
1192 		return -EOVERFLOW;
1193 
1194 	p = &usr_reg_state->usr_state_regs[0];
1195 	for_each_inst(i, adev->aid_mask) {
1196 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1197 		usr_regs->inst_header.instance = inst++;
1198 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1199 		usr_regs->inst_header.num_smn_regs = num_smn;
1200 		reg_data = usr_regs->smn_reg_values;
1201 
1202 		for (r = 0; r < arr_size; r++) {
1203 			start_addr = reg_addrs[r].start_addr;
1204 			incrx = reg_addrs[r].incrx;
1205 			num_regs = reg_addrs[r].num_regs;
1206 			for (n = 0; n < num_regs; n++) {
1207 				aqua_read_smn_ext(adev, reg_data,
1208 						  start_addr + n * incrx, i);
1209 				reg_data++;
1210 			}
1211 		}
1212 		p = reg_data;
1213 	}
1214 
1215 	usr_reg_state->common_header.structure_size = szbuf;
1216 	usr_reg_state->common_header.format_revision = 1;
1217 	usr_reg_state->common_header.content_revision = 0;
1218 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1219 	usr_reg_state->common_header.num_instances = max_usr_instances;
1220 
1221 	return usr_reg_state->common_header.structure_size;
1222 }
1223 
1224 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1225 				    enum amdgpu_reg_state reg_state, void *buf,
1226 				    size_t max_size)
1227 {
1228 	ssize_t size;
1229 
1230 	switch (reg_state) {
1231 	case AMDGPU_REG_STATE_TYPE_PCIE:
1232 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1233 		break;
1234 	case AMDGPU_REG_STATE_TYPE_XGMI:
1235 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1236 		break;
1237 	case AMDGPU_REG_STATE_TYPE_WAFL:
1238 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1239 		break;
1240 	case AMDGPU_REG_STATE_TYPE_USR:
1241 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1242 						    AMDGPU_REG_STATE_TYPE_USR);
1243 		break;
1244 	case AMDGPU_REG_STATE_TYPE_USR_1:
1245 		size = aqua_vanjaram_read_usr_state(
1246 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1247 		break;
1248 	default:
1249 		return -EINVAL;
1250 	}
1251 
1252 	return size;
1253 }
1254