xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34 	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35 
36 #define AMDGPU_XCP_OPS_KFD	(1 << 0)
37 
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40 	int i;
41 
42 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43 
44 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45 
46 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49 
50 	adev->doorbell_index.sdma_doorbell_range = 20;
51 	for (i = 0; i < adev->sdma.num_instances; i++)
52 		adev->doorbell_index.sdma_engine[i] =
53 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55 
56 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58 
59 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61 
62 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64 
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67 	return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69 
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 			     uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73 	int xcp_id;
74 	enum AMDGPU_XCP_IP_BLOCK ip_blk;
75 	uint32_t inst_mask;
76 
77 	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
79 		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
80 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
81 		return;
82 
83 	inst_mask = 1 << inst_idx;
84 
85 	switch (ring->funcs->type) {
86 	case AMDGPU_HW_IP_GFX:
87 	case AMDGPU_RING_TYPE_COMPUTE:
88 	case AMDGPU_RING_TYPE_KIQ:
89 		ip_blk = AMDGPU_XCP_GFX;
90 		break;
91 	case AMDGPU_RING_TYPE_SDMA:
92 		ip_blk = AMDGPU_XCP_SDMA;
93 		break;
94 	case AMDGPU_RING_TYPE_VCN_ENC:
95 	case AMDGPU_RING_TYPE_VCN_JPEG:
96 		ip_blk = AMDGPU_XCP_VCN;
97 		if (aqua_vanjaram_xcp_vcn_shared(adev))
98 			inst_mask = 1 << (inst_idx * 2);
99 		break;
100 	default:
101 		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
102 		return;
103 	}
104 
105 	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
106 		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
107 			ring->xcp_id = xcp_id;
108 			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
109 				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
110 			break;
111 		}
112 	}
113 }
114 
115 static void aqua_vanjaram_xcp_gpu_sched_update(
116 		struct amdgpu_device *adev,
117 		struct amdgpu_ring *ring,
118 		unsigned int sel_xcp_id)
119 {
120 	unsigned int *num_gpu_sched;
121 
122 	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
123 			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
124 	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
125 			.sched[(*num_gpu_sched)++] = &ring->sched;
126 	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
127 			sel_xcp_id, ring->funcs->type,
128 			ring->hw_prio, *num_gpu_sched);
129 }
130 
131 static int aqua_vanjaram_xcp_sched_list_update(
132 		struct amdgpu_device *adev)
133 {
134 	struct amdgpu_ring *ring;
135 	int i;
136 
137 	for (i = 0; i < MAX_XCP; i++) {
138 		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
139 		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
140 	}
141 
142 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
143 		return 0;
144 
145 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
146 		ring = adev->rings[i];
147 		if (!ring || !ring->sched.ready || ring->no_scheduler)
148 			continue;
149 
150 		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
151 
152 		/* VCN may be shared by two partitions under CPX MODE in certain
153 		 * configs.
154 		 */
155 		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
156 		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
157 		    aqua_vanjaram_xcp_vcn_shared(adev))
158 			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
159 	}
160 
161 	return 0;
162 }
163 
164 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
165 {
166 	int i;
167 
168 	for (i = 0; i < adev->num_rings; i++) {
169 		struct amdgpu_ring *ring = adev->rings[i];
170 
171 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
172 			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
173 			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
174 		else
175 			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
176 	}
177 
178 	return aqua_vanjaram_xcp_sched_list_update(adev);
179 }
180 
181 static int aqua_vanjaram_select_scheds(
182 		struct amdgpu_device *adev,
183 		u32 hw_ip,
184 		u32 hw_prio,
185 		struct amdgpu_fpriv *fpriv,
186 		unsigned int *num_scheds,
187 		struct drm_gpu_scheduler ***scheds)
188 {
189 	u32 sel_xcp_id;
190 	int i;
191 
192 	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
193 		u32 least_ref_cnt = ~0;
194 
195 		fpriv->xcp_id = 0;
196 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
197 			u32 total_ref_cnt;
198 
199 			total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
200 			if (total_ref_cnt < least_ref_cnt) {
201 				fpriv->xcp_id = i;
202 				least_ref_cnt = total_ref_cnt;
203 			}
204 		}
205 	}
206 	sel_xcp_id = fpriv->xcp_id;
207 
208 	if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
209 		*num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
210 		*scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
211 		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
212 		DRM_DEBUG("Selected partition #%d", sel_xcp_id);
213 	} else {
214 		DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
215 		return -ENOENT;
216 	}
217 
218 	return 0;
219 }
220 
221 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
222 					 enum amd_hw_ip_block_type block,
223 					 int8_t inst)
224 {
225 	int8_t dev_inst;
226 
227 	switch (block) {
228 	case GC_HWIP:
229 	case SDMA0_HWIP:
230 	/* Both JPEG and VCN as JPEG is only alias of VCN */
231 	case VCN_HWIP:
232 		dev_inst = adev->ip_map.dev_inst[block][inst];
233 		break;
234 	default:
235 		/* For rest of the IPs, no look up required.
236 		 * Assume 'logical instance == physical instance' for all configs. */
237 		dev_inst = inst;
238 		break;
239 	}
240 
241 	return dev_inst;
242 }
243 
244 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
245 					 enum amd_hw_ip_block_type block,
246 					 uint32_t mask)
247 {
248 	uint32_t dev_mask = 0;
249 	int8_t log_inst, dev_inst;
250 
251 	while (mask) {
252 		log_inst = ffs(mask) - 1;
253 		dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
254 		dev_mask |= (1 << dev_inst);
255 		mask &= ~(1 << log_inst);
256 	}
257 
258 	return dev_mask;
259 }
260 
261 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
262 					  enum amd_hw_ip_block_type ip_block,
263 					  uint32_t inst_mask)
264 {
265 	int l = 0, i;
266 
267 	while (inst_mask) {
268 		i = ffs(inst_mask) - 1;
269 		adev->ip_map.dev_inst[ip_block][l++] = i;
270 		inst_mask &= ~(1 << i);
271 	}
272 	for (; l < HWIP_MAX_INSTANCE; l++)
273 		adev->ip_map.dev_inst[ip_block][l] = -1;
274 }
275 
276 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
277 {
278 	u32 ip_map[][2] = {
279 		{ GC_HWIP, adev->gfx.xcc_mask },
280 		{ SDMA0_HWIP, adev->sdma.sdma_mask },
281 		{ VCN_HWIP, adev->vcn.inst_mask },
282 	};
283 	int i;
284 
285 	for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
286 		aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
287 
288 	adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
289 	adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
290 }
291 
292 /* Fixed pattern for smn addressing on different AIDs:
293  *   bit[34]: indicate cross AID access
294  *   bit[33:32]: indicate target AID id
295  * AID id range is 0 ~ 3 as maximum AID number is 4.
296  */
297 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
298 {
299 	u64 ext_offset;
300 
301 	/* local routing and bit[34:32] will be zeros */
302 	if (ext_id == 0)
303 		return 0;
304 
305 	/* Initiated from host, accessing to all non-zero aids are cross traffic */
306 	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
307 
308 	return ext_offset;
309 }
310 
311 static enum amdgpu_gfx_partition
312 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
313 {
314 	struct amdgpu_device *adev = xcp_mgr->adev;
315 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
316 
317 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
318 	if (adev->gfx.funcs->get_xccs_per_xcp)
319 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
320 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
321 		mode = num_xcc / num_xcc_per_xcp;
322 
323 	if (num_xcc_per_xcp == 1)
324 		return AMDGPU_CPX_PARTITION_MODE;
325 
326 	switch (mode) {
327 	case 1:
328 		return AMDGPU_SPX_PARTITION_MODE;
329 	case 2:
330 		return AMDGPU_DPX_PARTITION_MODE;
331 	case 3:
332 		return AMDGPU_TPX_PARTITION_MODE;
333 	case 4:
334 		return AMDGPU_QPX_PARTITION_MODE;
335 	default:
336 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
337 	}
338 
339 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
340 }
341 
342 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
343 {
344 	enum amdgpu_gfx_partition derv_mode,
345 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
346 	struct amdgpu_device *adev = xcp_mgr->adev;
347 
348 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
349 
350 	if (amdgpu_sriov_vf(adev))
351 		return derv_mode;
352 
353 	if (adev->nbio.funcs->get_compute_partition_mode) {
354 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
355 		if (mode != derv_mode)
356 			dev_warn(
357 				adev->dev,
358 				"Mismatch in compute partition mode - reported : %d derived : %d",
359 				mode, derv_mode);
360 	}
361 
362 	return mode;
363 }
364 
365 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
366 {
367 	int num_xcc, num_xcc_per_xcp = 0;
368 
369 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
370 
371 	switch (mode) {
372 	case AMDGPU_SPX_PARTITION_MODE:
373 		num_xcc_per_xcp = num_xcc;
374 		break;
375 	case AMDGPU_DPX_PARTITION_MODE:
376 		num_xcc_per_xcp = num_xcc / 2;
377 		break;
378 	case AMDGPU_TPX_PARTITION_MODE:
379 		num_xcc_per_xcp = num_xcc / 3;
380 		break;
381 	case AMDGPU_QPX_PARTITION_MODE:
382 		num_xcc_per_xcp = num_xcc / 4;
383 		break;
384 	case AMDGPU_CPX_PARTITION_MODE:
385 		num_xcc_per_xcp = 1;
386 		break;
387 	}
388 
389 	return num_xcc_per_xcp;
390 }
391 
392 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
393 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
394 				    struct amdgpu_xcp_ip *ip)
395 {
396 	struct amdgpu_device *adev = xcp_mgr->adev;
397 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
398 	int num_sdma, num_vcn;
399 
400 	num_sdma = adev->sdma.num_instances;
401 	num_vcn = adev->vcn.num_vcn_inst;
402 
403 	switch (xcp_mgr->mode) {
404 	case AMDGPU_SPX_PARTITION_MODE:
405 		num_sdma_xcp = num_sdma;
406 		num_vcn_xcp = num_vcn;
407 		break;
408 	case AMDGPU_DPX_PARTITION_MODE:
409 		num_sdma_xcp = num_sdma / 2;
410 		num_vcn_xcp = num_vcn / 2;
411 		break;
412 	case AMDGPU_TPX_PARTITION_MODE:
413 		num_sdma_xcp = num_sdma / 3;
414 		num_vcn_xcp = num_vcn / 3;
415 		break;
416 	case AMDGPU_QPX_PARTITION_MODE:
417 		num_sdma_xcp = num_sdma / 4;
418 		num_vcn_xcp = num_vcn / 4;
419 		break;
420 	case AMDGPU_CPX_PARTITION_MODE:
421 		num_sdma_xcp = 2;
422 		num_vcn_xcp = num_vcn ? 1 : 0;
423 		break;
424 	default:
425 		return -EINVAL;
426 	}
427 
428 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
429 
430 	switch (ip_id) {
431 	case AMDGPU_XCP_GFXHUB:
432 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
433 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
434 		break;
435 	case AMDGPU_XCP_GFX:
436 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
437 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
438 		break;
439 	case AMDGPU_XCP_SDMA:
440 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
441 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
442 		break;
443 	case AMDGPU_XCP_VCN:
444 		ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
445 		/* TODO : Assign IP funcs */
446 		break;
447 	default:
448 		return -EINVAL;
449 	}
450 
451 	ip->ip_id = ip_id;
452 
453 	return 0;
454 }
455 
456 static enum amdgpu_gfx_partition
457 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
458 {
459 	struct amdgpu_device *adev = xcp_mgr->adev;
460 	int num_xcc;
461 
462 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
463 
464 	if (adev->gmc.num_mem_partitions == 1)
465 		return AMDGPU_SPX_PARTITION_MODE;
466 
467 	if (adev->gmc.num_mem_partitions == num_xcc)
468 		return AMDGPU_CPX_PARTITION_MODE;
469 
470 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
471 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
472 						    AMDGPU_CPX_PARTITION_MODE;
473 
474 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
475 		return AMDGPU_DPX_PARTITION_MODE;
476 
477 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
478 }
479 
480 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
481 					  enum amdgpu_gfx_partition mode)
482 {
483 	struct amdgpu_device *adev = xcp_mgr->adev;
484 	int num_xcc, num_xccs_per_xcp;
485 
486 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
487 	switch (mode) {
488 	case AMDGPU_SPX_PARTITION_MODE:
489 		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
490 	case AMDGPU_DPX_PARTITION_MODE:
491 		return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
492 	case AMDGPU_TPX_PARTITION_MODE:
493 		return (adev->gmc.num_mem_partitions == 1 ||
494 			adev->gmc.num_mem_partitions == 3) &&
495 		       ((num_xcc % 3) == 0);
496 	case AMDGPU_QPX_PARTITION_MODE:
497 		num_xccs_per_xcp = num_xcc / 4;
498 		return (adev->gmc.num_mem_partitions == 1 ||
499 			adev->gmc.num_mem_partitions == 4) &&
500 		       (num_xccs_per_xcp >= 2);
501 	case AMDGPU_CPX_PARTITION_MODE:
502 		return ((num_xcc > 1) &&
503 		       (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
504 		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
505 	default:
506 		return false;
507 	}
508 
509 	return false;
510 }
511 
512 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
513 {
514 	/* TODO:
515 	 * Stop user queues and threads, and make sure GPU is empty of work.
516 	 */
517 
518 	if (flags & AMDGPU_XCP_OPS_KFD)
519 		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
520 
521 	return 0;
522 }
523 
524 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
525 {
526 	int ret = 0;
527 
528 	if (flags & AMDGPU_XCP_OPS_KFD) {
529 		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
530 		amdgpu_amdkfd_device_init(xcp_mgr->adev);
531 		/* If KFD init failed, return failure */
532 		if (!xcp_mgr->adev->kfd.init_complete)
533 			ret = -EIO;
534 	}
535 
536 	return ret;
537 }
538 
539 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
540 					       int mode, int *num_xcps)
541 {
542 	int num_xcc_per_xcp, num_xcc, ret;
543 	struct amdgpu_device *adev;
544 	u32 flags = 0;
545 
546 	adev = xcp_mgr->adev;
547 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
548 
549 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
550 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
551 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
552 			dev_err(adev->dev,
553 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
554 				adev->gmc.num_mem_partitions);
555 			return -EINVAL;
556 		}
557 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
558 		dev_err(adev->dev,
559 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
560 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
561 		return -EINVAL;
562 	}
563 
564 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
565 		flags |= AMDGPU_XCP_OPS_KFD;
566 
567 	if (flags & AMDGPU_XCP_OPS_KFD) {
568 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
569 		if (ret)
570 			goto out;
571 	}
572 
573 	ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
574 	if (ret)
575 		goto unlock;
576 
577 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
578 	if (adev->gfx.funcs->switch_partition_mode)
579 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
580 						       num_xcc_per_xcp);
581 
582 	/* Init info about new xcps */
583 	*num_xcps = num_xcc / num_xcc_per_xcp;
584 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
585 
586 	ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
587 unlock:
588 	if (flags & AMDGPU_XCP_OPS_KFD)
589 		amdgpu_amdkfd_unlock_kfd(adev);
590 out:
591 	return ret;
592 }
593 
594 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
595 					  int xcc_id, uint8_t *mem_id)
596 {
597 	/* memory/spatial modes validation check is already done */
598 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
599 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
600 
601 	return 0;
602 }
603 
604 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
605 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
606 {
607 	struct amdgpu_numa_info numa_info;
608 	struct amdgpu_device *adev;
609 	uint32_t xcc_mask;
610 	int r, i, xcc_id;
611 
612 	adev = xcp_mgr->adev;
613 	/* TODO: BIOS is not returning the right info now
614 	 * Check on this later
615 	 */
616 	/*
617 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
618 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
619 	*/
620 	if (adev->gmc.num_mem_partitions == 1) {
621 		/* Only one range */
622 		*mem_id = 0;
623 		return 0;
624 	}
625 
626 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
627 	if (r || !xcc_mask)
628 		return -EINVAL;
629 
630 	xcc_id = ffs(xcc_mask) - 1;
631 	if (!adev->gmc.is_app_apu)
632 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
633 
634 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
635 
636 	if (r)
637 		return r;
638 
639 	r = -EINVAL;
640 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
641 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
642 			*mem_id = i;
643 			r = 0;
644 			break;
645 		}
646 	}
647 
648 	return r;
649 }
650 
651 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
652 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
653 				     struct amdgpu_xcp_ip *ip)
654 {
655 	if (!ip)
656 		return -EINVAL;
657 
658 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
659 }
660 
661 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
662 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
663 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
664 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
665 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
666 	.select_scheds = &aqua_vanjaram_select_scheds,
667 	.update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
668 };
669 
670 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
671 {
672 	int ret;
673 
674 	if (amdgpu_sriov_vf(adev))
675 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
676 
677 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
678 				  &aqua_vanjaram_xcp_funcs);
679 	if (ret)
680 		return ret;
681 
682 	/* TODO: Default memory node affinity init */
683 
684 	return ret;
685 }
686 
687 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
688 {
689 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
690 	int ret, i;
691 
692 	/* generally 1 AID supports 4 instances */
693 	adev->sdma.num_inst_per_aid = 4;
694 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
695 
696 	adev->aid_mask = i = 1;
697 	inst_mask >>= adev->sdma.num_inst_per_aid;
698 
699 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
700 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
701 		avail_inst = inst_mask & mask;
702 		if (avail_inst == mask || avail_inst == 0x3 ||
703 		    avail_inst == 0xc)
704 			adev->aid_mask |= (1 << i);
705 	}
706 
707 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
708 	 * addressed based on logical instance ids.
709 	 */
710 	adev->vcn.harvest_config = 0;
711 	adev->vcn.num_inst_per_aid = 1;
712 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
713 	adev->jpeg.harvest_config = 0;
714 	adev->jpeg.num_inst_per_aid = 1;
715 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
716 
717 	ret = aqua_vanjaram_xcp_mgr_init(adev);
718 	if (ret)
719 		return ret;
720 
721 	aqua_vanjaram_ip_map_init(adev);
722 
723 	return 0;
724 }
725 
726 static void aqua_read_smn(struct amdgpu_device *adev,
727 			  struct amdgpu_smn_reg_data *regdata,
728 			  uint64_t smn_addr)
729 {
730 	regdata->addr = smn_addr;
731 	regdata->value = RREG32_PCIE(smn_addr);
732 }
733 
734 struct aqua_reg_list {
735 	uint64_t start_addr;
736 	uint32_t num_regs;
737 	uint32_t incrx;
738 };
739 
740 #define DW_ADDR_INCR	4
741 
742 static void aqua_read_smn_ext(struct amdgpu_device *adev,
743 			      struct amdgpu_smn_reg_data *regdata,
744 			      uint64_t smn_addr, int i)
745 {
746 	regdata->addr =
747 		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
748 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
749 }
750 
751 #define smnreg_0x1A340218	0x1A340218
752 #define smnreg_0x1A3402E4	0x1A3402E4
753 #define smnreg_0x1A340294	0x1A340294
754 #define smreg_0x1A380088	0x1A380088
755 
756 #define NUM_PCIE_SMN_REGS	14
757 
758 static struct aqua_reg_list pcie_reg_addrs[] = {
759 	{ smnreg_0x1A340218, 1, 0 },
760 	{ smnreg_0x1A3402E4, 1, 0 },
761 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
762 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
763 };
764 
765 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
766 					     void *buf, size_t max_size)
767 {
768 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
769 	uint32_t start_addr, incrx, num_regs, szbuf;
770 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
771 	struct amdgpu_smn_reg_data *reg_data;
772 	struct pci_dev *us_pdev, *ds_pdev;
773 	int aer_cap, r, n;
774 
775 	if (!buf || !max_size)
776 		return -EINVAL;
777 
778 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
779 
780 	szbuf = sizeof(*pcie_reg_state) +
781 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
782 	/* Only one instance of pcie regs */
783 	if (max_size < szbuf)
784 		return -EOVERFLOW;
785 
786 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
787 						     sizeof(*pcie_reg_state));
788 	pcie_regs->inst_header.instance = 0;
789 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
790 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
791 
792 	reg_data = pcie_regs->smn_reg_values;
793 
794 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
795 		start_addr = pcie_reg_addrs[r].start_addr;
796 		incrx = pcie_reg_addrs[r].incrx;
797 		num_regs = pcie_reg_addrs[r].num_regs;
798 		for (n = 0; n < num_regs; n++) {
799 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
800 			++reg_data;
801 		}
802 	}
803 
804 	ds_pdev = pci_upstream_bridge(adev->pdev);
805 	us_pdev = pci_upstream_bridge(ds_pdev);
806 
807 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
808 				  &pcie_regs->device_status);
809 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
810 				  &pcie_regs->link_status);
811 
812 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
813 	if (aer_cap) {
814 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
815 				      &pcie_regs->pcie_corr_err_status);
816 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
817 				      &pcie_regs->pcie_uncorr_err_status);
818 	}
819 
820 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
821 			      &pcie_regs->sub_bus_number_latency);
822 
823 	pcie_reg_state->common_header.structure_size = szbuf;
824 	pcie_reg_state->common_header.format_revision = 1;
825 	pcie_reg_state->common_header.content_revision = 0;
826 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
827 	pcie_reg_state->common_header.num_instances = 1;
828 
829 	return pcie_reg_state->common_header.structure_size;
830 }
831 
832 #define smnreg_0x11A00050	0x11A00050
833 #define smnreg_0x11A00180	0x11A00180
834 #define smnreg_0x11A00070	0x11A00070
835 #define smnreg_0x11A00200	0x11A00200
836 #define smnreg_0x11A0020C	0x11A0020C
837 #define smnreg_0x11A00210	0x11A00210
838 #define smnreg_0x11A00108	0x11A00108
839 
840 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
841 
842 #define NUM_XGMI_SMN_REGS 25
843 
844 static struct aqua_reg_list xgmi_reg_addrs[] = {
845 	{ smnreg_0x11A00050, 1, 0 },
846 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
847 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
848 	{ smnreg_0x11A00200, 1, 0 },
849 	{ smnreg_0x11A0020C, 1, 0 },
850 	{ smnreg_0x11A00210, 1, 0 },
851 	{ smnreg_0x11A00108, 1, 0 },
852 };
853 
854 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
855 					     void *buf, size_t max_size)
856 {
857 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
858 	uint32_t start_addr, incrx, num_regs, szbuf;
859 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
860 	struct amdgpu_smn_reg_data *reg_data;
861 	const int max_xgmi_instances = 8;
862 	int inst = 0, i, j, r, n;
863 	const int xgmi_inst = 2;
864 	void *p;
865 
866 	if (!buf || !max_size)
867 		return -EINVAL;
868 
869 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
870 
871 	szbuf = sizeof(*xgmi_reg_state) +
872 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
873 				    NUM_XGMI_SMN_REGS);
874 	/* Only one instance of pcie regs */
875 	if (max_size < szbuf)
876 		return -EOVERFLOW;
877 
878 	p = &xgmi_reg_state->xgmi_state_regs[0];
879 	for_each_inst(i, adev->aid_mask) {
880 		for (j = 0; j < xgmi_inst; ++j) {
881 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
882 			xgmi_regs->inst_header.instance = inst++;
883 
884 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
885 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
886 
887 			reg_data = xgmi_regs->smn_reg_values;
888 
889 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
890 				start_addr = xgmi_reg_addrs[r].start_addr;
891 				incrx = xgmi_reg_addrs[r].incrx;
892 				num_regs = xgmi_reg_addrs[r].num_regs;
893 
894 				for (n = 0; n < num_regs; n++) {
895 					aqua_read_smn_ext(
896 						adev, reg_data,
897 						XGMI_LINK_REG(start_addr, j) +
898 							n * incrx,
899 						i);
900 					++reg_data;
901 				}
902 			}
903 			p = reg_data;
904 		}
905 	}
906 
907 	xgmi_reg_state->common_header.structure_size = szbuf;
908 	xgmi_reg_state->common_header.format_revision = 1;
909 	xgmi_reg_state->common_header.content_revision = 0;
910 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
911 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
912 
913 	return xgmi_reg_state->common_header.structure_size;
914 }
915 
916 #define smnreg_0x11C00070	0x11C00070
917 #define smnreg_0x11C00210	0x11C00210
918 
919 static struct aqua_reg_list wafl_reg_addrs[] = {
920 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
921 	{ smnreg_0x11C00210, 1, 0 },
922 };
923 
924 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
925 
926 #define NUM_WAFL_SMN_REGS 5
927 
928 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
929 					     void *buf, size_t max_size)
930 {
931 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
932 	uint32_t start_addr, incrx, num_regs, szbuf;
933 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
934 	struct amdgpu_smn_reg_data *reg_data;
935 	const int max_wafl_instances = 8;
936 	int inst = 0, i, j, r, n;
937 	const int wafl_inst = 2;
938 	void *p;
939 
940 	if (!buf || !max_size)
941 		return -EINVAL;
942 
943 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
944 
945 	szbuf = sizeof(*wafl_reg_state) +
946 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
947 				    NUM_WAFL_SMN_REGS);
948 
949 	if (max_size < szbuf)
950 		return -EOVERFLOW;
951 
952 	p = &wafl_reg_state->wafl_state_regs[0];
953 	for_each_inst(i, adev->aid_mask) {
954 		for (j = 0; j < wafl_inst; ++j) {
955 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
956 			wafl_regs->inst_header.instance = inst++;
957 
958 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
959 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
960 
961 			reg_data = wafl_regs->smn_reg_values;
962 
963 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
964 				start_addr = wafl_reg_addrs[r].start_addr;
965 				incrx = wafl_reg_addrs[r].incrx;
966 				num_regs = wafl_reg_addrs[r].num_regs;
967 				for (n = 0; n < num_regs; n++) {
968 					aqua_read_smn_ext(
969 						adev, reg_data,
970 						WAFL_LINK_REG(start_addr, j) +
971 							n * incrx,
972 						i);
973 					++reg_data;
974 				}
975 			}
976 			p = reg_data;
977 		}
978 	}
979 
980 	wafl_reg_state->common_header.structure_size = szbuf;
981 	wafl_reg_state->common_header.format_revision = 1;
982 	wafl_reg_state->common_header.content_revision = 0;
983 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
984 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
985 
986 	return wafl_reg_state->common_header.structure_size;
987 }
988 
989 #define smnreg_0x1B311060 0x1B311060
990 #define smnreg_0x1B411060 0x1B411060
991 #define smnreg_0x1B511060 0x1B511060
992 #define smnreg_0x1B611060 0x1B611060
993 
994 #define smnreg_0x1C307120 0x1C307120
995 #define smnreg_0x1C317120 0x1C317120
996 
997 #define smnreg_0x1C320830 0x1C320830
998 #define smnreg_0x1C380830 0x1C380830
999 #define smnreg_0x1C3D0830 0x1C3D0830
1000 #define smnreg_0x1C420830 0x1C420830
1001 
1002 #define smnreg_0x1C320100 0x1C320100
1003 #define smnreg_0x1C380100 0x1C380100
1004 #define smnreg_0x1C3D0100 0x1C3D0100
1005 #define smnreg_0x1C420100 0x1C420100
1006 
1007 #define smnreg_0x1B310500 0x1B310500
1008 #define smnreg_0x1C300400 0x1C300400
1009 
1010 #define USR_CAKE_INCR 0x11000
1011 #define USR_LINK_INCR 0x100000
1012 #define USR_CP_INCR 0x10000
1013 
1014 #define NUM_USR_SMN_REGS	20
1015 
1016 struct aqua_reg_list usr_reg_addrs[] = {
1017 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
1018 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
1019 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
1020 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
1021 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
1022 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
1023 };
1024 
1025 #define NUM_USR1_SMN_REGS	46
1026 struct aqua_reg_list usr1_reg_addrs[] = {
1027 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
1028 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
1029 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1030 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
1031 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
1032 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
1033 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1034 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
1035 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
1036 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
1037 };
1038 
1039 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1040 					    void *buf, size_t max_size,
1041 					    int reg_state)
1042 {
1043 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1044 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1045 	struct amdgpu_regs_usr_v1_0 *usr_regs;
1046 	struct amdgpu_smn_reg_data *reg_data;
1047 	const int max_usr_instances = 4;
1048 	struct aqua_reg_list *reg_addrs;
1049 	int inst = 0, i, n, r, arr_size;
1050 	void *p;
1051 
1052 	if (!buf || !max_size)
1053 		return -EINVAL;
1054 
1055 	switch (reg_state) {
1056 	case AMDGPU_REG_STATE_TYPE_USR:
1057 		arr_size = ARRAY_SIZE(usr_reg_addrs);
1058 		reg_addrs = usr_reg_addrs;
1059 		num_smn = NUM_USR_SMN_REGS;
1060 		break;
1061 	case AMDGPU_REG_STATE_TYPE_USR_1:
1062 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
1063 		reg_addrs = usr1_reg_addrs;
1064 		num_smn = NUM_USR1_SMN_REGS;
1065 		break;
1066 	default:
1067 		return -EINVAL;
1068 	}
1069 
1070 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1071 
1072 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1073 							     sizeof(*usr_regs),
1074 							     num_smn);
1075 	if (max_size < szbuf)
1076 		return -EOVERFLOW;
1077 
1078 	p = &usr_reg_state->usr_state_regs[0];
1079 	for_each_inst(i, adev->aid_mask) {
1080 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1081 		usr_regs->inst_header.instance = inst++;
1082 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1083 		usr_regs->inst_header.num_smn_regs = num_smn;
1084 		reg_data = usr_regs->smn_reg_values;
1085 
1086 		for (r = 0; r < arr_size; r++) {
1087 			start_addr = reg_addrs[r].start_addr;
1088 			incrx = reg_addrs[r].incrx;
1089 			num_regs = reg_addrs[r].num_regs;
1090 			for (n = 0; n < num_regs; n++) {
1091 				aqua_read_smn_ext(adev, reg_data,
1092 						  start_addr + n * incrx, i);
1093 				reg_data++;
1094 			}
1095 		}
1096 		p = reg_data;
1097 	}
1098 
1099 	usr_reg_state->common_header.structure_size = szbuf;
1100 	usr_reg_state->common_header.format_revision = 1;
1101 	usr_reg_state->common_header.content_revision = 0;
1102 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1103 	usr_reg_state->common_header.num_instances = max_usr_instances;
1104 
1105 	return usr_reg_state->common_header.structure_size;
1106 }
1107 
1108 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1109 				    enum amdgpu_reg_state reg_state, void *buf,
1110 				    size_t max_size)
1111 {
1112 	ssize_t size;
1113 
1114 	switch (reg_state) {
1115 	case AMDGPU_REG_STATE_TYPE_PCIE:
1116 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1117 		break;
1118 	case AMDGPU_REG_STATE_TYPE_XGMI:
1119 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1120 		break;
1121 	case AMDGPU_REG_STATE_TYPE_WAFL:
1122 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1123 		break;
1124 	case AMDGPU_REG_STATE_TYPE_USR:
1125 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1126 						    AMDGPU_REG_STATE_TYPE_USR);
1127 		break;
1128 	case AMDGPU_REG_STATE_TYPE_USR_1:
1129 		size = aqua_vanjaram_read_usr_state(
1130 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1131 		break;
1132 	default:
1133 		return -EINVAL;
1134 	}
1135 
1136 	return size;
1137 }
1138