xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision d40981350844c2cfa437abfc80596e10ea8f1149)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34 	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35 
36 #define AMDGPU_XCP_OPS_KFD	(1 << 0)
37 
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40 	int i;
41 
42 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43 
44 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45 
46 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49 
50 	adev->doorbell_index.sdma_doorbell_range = 20;
51 	for (i = 0; i < adev->sdma.num_instances; i++)
52 		adev->doorbell_index.sdma_engine[i] =
53 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55 
56 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58 
59 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61 
62 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64 
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67 	return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69 
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 			     uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73 	int xcp_id;
74 	enum AMDGPU_XCP_IP_BLOCK ip_blk;
75 	uint32_t inst_mask;
76 
77 	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
79 		return;
80 
81 	inst_mask = 1 << inst_idx;
82 
83 	switch (ring->funcs->type) {
84 	case AMDGPU_HW_IP_GFX:
85 	case AMDGPU_RING_TYPE_COMPUTE:
86 	case AMDGPU_RING_TYPE_KIQ:
87 		ip_blk = AMDGPU_XCP_GFX;
88 		break;
89 	case AMDGPU_RING_TYPE_SDMA:
90 		ip_blk = AMDGPU_XCP_SDMA;
91 		break;
92 	case AMDGPU_RING_TYPE_VCN_ENC:
93 	case AMDGPU_RING_TYPE_VCN_JPEG:
94 		ip_blk = AMDGPU_XCP_VCN;
95 		if (aqua_vanjaram_xcp_vcn_shared(adev))
96 			inst_mask = 1 << (inst_idx * 2);
97 		break;
98 	default:
99 		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
100 		return;
101 	}
102 
103 	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
104 		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
105 			ring->xcp_id = xcp_id;
106 			break;
107 		}
108 	}
109 }
110 
111 static void aqua_vanjaram_xcp_gpu_sched_update(
112 		struct amdgpu_device *adev,
113 		struct amdgpu_ring *ring,
114 		unsigned int sel_xcp_id)
115 {
116 	unsigned int *num_gpu_sched;
117 
118 	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
119 			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
120 	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
121 			.sched[(*num_gpu_sched)++] = &ring->sched;
122 	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
123 			sel_xcp_id, ring->funcs->type,
124 			ring->hw_prio, *num_gpu_sched);
125 }
126 
127 static int aqua_vanjaram_xcp_sched_list_update(
128 		struct amdgpu_device *adev)
129 {
130 	struct amdgpu_ring *ring;
131 	int i;
132 
133 	for (i = 0; i < MAX_XCP; i++) {
134 		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
135 		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
136 	}
137 
138 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
139 		return 0;
140 
141 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
142 		ring = adev->rings[i];
143 		if (!ring || !ring->sched.ready || ring->no_scheduler)
144 			continue;
145 
146 		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
147 
148 		/* VCN may be shared by two partitions under CPX MODE in certain
149 		 * configs.
150 		 */
151 		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
152 		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
153 		    aqua_vanjaram_xcp_vcn_shared(adev))
154 			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
155 	}
156 
157 	return 0;
158 }
159 
160 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
161 {
162 	int i;
163 
164 	for (i = 0; i < adev->num_rings; i++) {
165 		struct amdgpu_ring *ring = adev->rings[i];
166 
167 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
168 			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
169 			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
170 		else
171 			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
172 	}
173 
174 	return aqua_vanjaram_xcp_sched_list_update(adev);
175 }
176 
177 static int aqua_vanjaram_select_scheds(
178 		struct amdgpu_device *adev,
179 		u32 hw_ip,
180 		u32 hw_prio,
181 		struct amdgpu_fpriv *fpriv,
182 		unsigned int *num_scheds,
183 		struct drm_gpu_scheduler ***scheds)
184 {
185 	u32 sel_xcp_id;
186 	int i;
187 
188 	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
189 		u32 least_ref_cnt = ~0;
190 
191 		fpriv->xcp_id = 0;
192 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
193 			u32 total_ref_cnt;
194 
195 			total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
196 			if (total_ref_cnt < least_ref_cnt) {
197 				fpriv->xcp_id = i;
198 				least_ref_cnt = total_ref_cnt;
199 			}
200 		}
201 	}
202 	sel_xcp_id = fpriv->xcp_id;
203 
204 	if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
205 		*num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
206 		*scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
207 		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
208 		DRM_DEBUG("Selected partition #%d", sel_xcp_id);
209 	} else {
210 		DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
211 		return -ENOENT;
212 	}
213 
214 	return 0;
215 }
216 
217 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
218 					 enum amd_hw_ip_block_type block,
219 					 int8_t inst)
220 {
221 	int8_t dev_inst;
222 
223 	switch (block) {
224 	case GC_HWIP:
225 	case SDMA0_HWIP:
226 	/* Both JPEG and VCN as JPEG is only alias of VCN */
227 	case VCN_HWIP:
228 		dev_inst = adev->ip_map.dev_inst[block][inst];
229 		break;
230 	default:
231 		/* For rest of the IPs, no look up required.
232 		 * Assume 'logical instance == physical instance' for all configs. */
233 		dev_inst = inst;
234 		break;
235 	}
236 
237 	return dev_inst;
238 }
239 
240 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
241 					 enum amd_hw_ip_block_type block,
242 					 uint32_t mask)
243 {
244 	uint32_t dev_mask = 0;
245 	int8_t log_inst, dev_inst;
246 
247 	while (mask) {
248 		log_inst = ffs(mask) - 1;
249 		dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
250 		dev_mask |= (1 << dev_inst);
251 		mask &= ~(1 << log_inst);
252 	}
253 
254 	return dev_mask;
255 }
256 
257 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
258 					  enum amd_hw_ip_block_type ip_block,
259 					  uint32_t inst_mask)
260 {
261 	int l = 0, i;
262 
263 	while (inst_mask) {
264 		i = ffs(inst_mask) - 1;
265 		adev->ip_map.dev_inst[ip_block][l++] = i;
266 		inst_mask &= ~(1 << i);
267 	}
268 	for (; l < HWIP_MAX_INSTANCE; l++)
269 		adev->ip_map.dev_inst[ip_block][l] = -1;
270 }
271 
272 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
273 {
274 	u32 ip_map[][2] = {
275 		{ GC_HWIP, adev->gfx.xcc_mask },
276 		{ SDMA0_HWIP, adev->sdma.sdma_mask },
277 		{ VCN_HWIP, adev->vcn.inst_mask },
278 	};
279 	int i;
280 
281 	for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
282 		aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
283 
284 	adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
285 	adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
286 }
287 
288 /* Fixed pattern for smn addressing on different AIDs:
289  *   bit[34]: indicate cross AID access
290  *   bit[33:32]: indicate target AID id
291  * AID id range is 0 ~ 3 as maximum AID number is 4.
292  */
293 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
294 {
295 	u64 ext_offset;
296 
297 	/* local routing and bit[34:32] will be zeros */
298 	if (ext_id == 0)
299 		return 0;
300 
301 	/* Initiated from host, accessing to all non-zero aids are cross traffic */
302 	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
303 
304 	return ext_offset;
305 }
306 
307 static enum amdgpu_gfx_partition
308 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
309 {
310 	struct amdgpu_device *adev = xcp_mgr->adev;
311 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
312 
313 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
314 	if (adev->gfx.funcs->get_xccs_per_xcp)
315 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
316 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
317 		mode = num_xcc / num_xcc_per_xcp;
318 
319 	if (num_xcc_per_xcp == 1)
320 		return AMDGPU_CPX_PARTITION_MODE;
321 
322 	switch (mode) {
323 	case 1:
324 		return AMDGPU_SPX_PARTITION_MODE;
325 	case 2:
326 		return AMDGPU_DPX_PARTITION_MODE;
327 	case 3:
328 		return AMDGPU_TPX_PARTITION_MODE;
329 	case 4:
330 		return AMDGPU_QPX_PARTITION_MODE;
331 	default:
332 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
333 	}
334 
335 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
336 }
337 
338 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
339 {
340 	enum amdgpu_gfx_partition derv_mode,
341 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
342 	struct amdgpu_device *adev = xcp_mgr->adev;
343 
344 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
345 
346 	if (amdgpu_sriov_vf(adev))
347 		return derv_mode;
348 
349 	if (adev->nbio.funcs->get_compute_partition_mode) {
350 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
351 		if (mode != derv_mode)
352 			dev_warn(
353 				adev->dev,
354 				"Mismatch in compute partition mode - reported : %d derived : %d",
355 				mode, derv_mode);
356 	}
357 
358 	return mode;
359 }
360 
361 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
362 {
363 	int num_xcc, num_xcc_per_xcp = 0;
364 
365 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
366 
367 	switch (mode) {
368 	case AMDGPU_SPX_PARTITION_MODE:
369 		num_xcc_per_xcp = num_xcc;
370 		break;
371 	case AMDGPU_DPX_PARTITION_MODE:
372 		num_xcc_per_xcp = num_xcc / 2;
373 		break;
374 	case AMDGPU_TPX_PARTITION_MODE:
375 		num_xcc_per_xcp = num_xcc / 3;
376 		break;
377 	case AMDGPU_QPX_PARTITION_MODE:
378 		num_xcc_per_xcp = num_xcc / 4;
379 		break;
380 	case AMDGPU_CPX_PARTITION_MODE:
381 		num_xcc_per_xcp = 1;
382 		break;
383 	}
384 
385 	return num_xcc_per_xcp;
386 }
387 
388 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
389 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
390 				    struct amdgpu_xcp_ip *ip)
391 {
392 	struct amdgpu_device *adev = xcp_mgr->adev;
393 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
394 	int num_sdma, num_vcn;
395 
396 	num_sdma = adev->sdma.num_instances;
397 	num_vcn = adev->vcn.num_vcn_inst;
398 
399 	switch (xcp_mgr->mode) {
400 	case AMDGPU_SPX_PARTITION_MODE:
401 		num_sdma_xcp = num_sdma;
402 		num_vcn_xcp = num_vcn;
403 		break;
404 	case AMDGPU_DPX_PARTITION_MODE:
405 		num_sdma_xcp = num_sdma / 2;
406 		num_vcn_xcp = num_vcn / 2;
407 		break;
408 	case AMDGPU_TPX_PARTITION_MODE:
409 		num_sdma_xcp = num_sdma / 3;
410 		num_vcn_xcp = num_vcn / 3;
411 		break;
412 	case AMDGPU_QPX_PARTITION_MODE:
413 		num_sdma_xcp = num_sdma / 4;
414 		num_vcn_xcp = num_vcn / 4;
415 		break;
416 	case AMDGPU_CPX_PARTITION_MODE:
417 		num_sdma_xcp = 2;
418 		num_vcn_xcp = num_vcn ? 1 : 0;
419 		break;
420 	default:
421 		return -EINVAL;
422 	}
423 
424 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
425 
426 	switch (ip_id) {
427 	case AMDGPU_XCP_GFXHUB:
428 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
429 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
430 		break;
431 	case AMDGPU_XCP_GFX:
432 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
433 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
434 		break;
435 	case AMDGPU_XCP_SDMA:
436 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
437 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
438 		break;
439 	case AMDGPU_XCP_VCN:
440 		ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
441 		/* TODO : Assign IP funcs */
442 		break;
443 	default:
444 		return -EINVAL;
445 	}
446 
447 	ip->ip_id = ip_id;
448 
449 	return 0;
450 }
451 
452 static enum amdgpu_gfx_partition
453 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
454 {
455 	struct amdgpu_device *adev = xcp_mgr->adev;
456 	int num_xcc;
457 
458 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
459 
460 	if (adev->gmc.num_mem_partitions == 1)
461 		return AMDGPU_SPX_PARTITION_MODE;
462 
463 	if (adev->gmc.num_mem_partitions == num_xcc)
464 		return AMDGPU_CPX_PARTITION_MODE;
465 
466 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
467 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
468 						    AMDGPU_CPX_PARTITION_MODE;
469 
470 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
471 		return AMDGPU_DPX_PARTITION_MODE;
472 
473 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
474 }
475 
476 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
477 					  enum amdgpu_gfx_partition mode)
478 {
479 	struct amdgpu_device *adev = xcp_mgr->adev;
480 	int num_xcc, num_xccs_per_xcp;
481 
482 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
483 	switch (mode) {
484 	case AMDGPU_SPX_PARTITION_MODE:
485 		return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
486 	case AMDGPU_DPX_PARTITION_MODE:
487 		return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
488 	case AMDGPU_TPX_PARTITION_MODE:
489 		return (adev->gmc.num_mem_partitions == 1 ||
490 			adev->gmc.num_mem_partitions == 3) &&
491 		       ((num_xcc % 3) == 0);
492 	case AMDGPU_QPX_PARTITION_MODE:
493 		num_xccs_per_xcp = num_xcc / 4;
494 		return (adev->gmc.num_mem_partitions == 1 ||
495 			adev->gmc.num_mem_partitions == 4) &&
496 		       (num_xccs_per_xcp >= 2);
497 	case AMDGPU_CPX_PARTITION_MODE:
498 		return ((num_xcc > 1) &&
499 		       (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
500 		       (num_xcc % adev->gmc.num_mem_partitions) == 0);
501 	default:
502 		return false;
503 	}
504 
505 	return false;
506 }
507 
508 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
509 {
510 	/* TODO:
511 	 * Stop user queues and threads, and make sure GPU is empty of work.
512 	 */
513 
514 	if (flags & AMDGPU_XCP_OPS_KFD)
515 		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
516 
517 	return 0;
518 }
519 
520 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
521 {
522 	int ret = 0;
523 
524 	if (flags & AMDGPU_XCP_OPS_KFD) {
525 		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
526 		amdgpu_amdkfd_device_init(xcp_mgr->adev);
527 		/* If KFD init failed, return failure */
528 		if (!xcp_mgr->adev->kfd.init_complete)
529 			ret = -EIO;
530 	}
531 
532 	return ret;
533 }
534 
535 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
536 					       int mode, int *num_xcps)
537 {
538 	int num_xcc_per_xcp, num_xcc, ret;
539 	struct amdgpu_device *adev;
540 	u32 flags = 0;
541 
542 	adev = xcp_mgr->adev;
543 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
544 
545 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
546 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
547 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
548 			dev_err(adev->dev,
549 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
550 				adev->gmc.num_mem_partitions);
551 			return -EINVAL;
552 		}
553 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
554 		dev_err(adev->dev,
555 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
556 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
557 		return -EINVAL;
558 	}
559 
560 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
561 		flags |= AMDGPU_XCP_OPS_KFD;
562 
563 	if (flags & AMDGPU_XCP_OPS_KFD) {
564 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
565 		if (ret)
566 			goto out;
567 	}
568 
569 	ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
570 	if (ret)
571 		goto unlock;
572 
573 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
574 	if (adev->gfx.funcs->switch_partition_mode)
575 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
576 						       num_xcc_per_xcp);
577 
578 	/* Init info about new xcps */
579 	*num_xcps = num_xcc / num_xcc_per_xcp;
580 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
581 
582 	ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
583 unlock:
584 	if (flags & AMDGPU_XCP_OPS_KFD)
585 		amdgpu_amdkfd_unlock_kfd(adev);
586 out:
587 	return ret;
588 }
589 
590 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
591 					  int xcc_id, uint8_t *mem_id)
592 {
593 	/* memory/spatial modes validation check is already done */
594 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
595 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
596 
597 	return 0;
598 }
599 
600 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
601 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
602 {
603 	struct amdgpu_numa_info numa_info;
604 	struct amdgpu_device *adev;
605 	uint32_t xcc_mask;
606 	int r, i, xcc_id;
607 
608 	adev = xcp_mgr->adev;
609 	/* TODO: BIOS is not returning the right info now
610 	 * Check on this later
611 	 */
612 	/*
613 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
614 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
615 	*/
616 	if (adev->gmc.num_mem_partitions == 1) {
617 		/* Only one range */
618 		*mem_id = 0;
619 		return 0;
620 	}
621 
622 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
623 	if (r || !xcc_mask)
624 		return -EINVAL;
625 
626 	xcc_id = ffs(xcc_mask) - 1;
627 	if (!adev->gmc.is_app_apu)
628 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
629 
630 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
631 
632 	if (r)
633 		return r;
634 
635 	r = -EINVAL;
636 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
637 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
638 			*mem_id = i;
639 			r = 0;
640 			break;
641 		}
642 	}
643 
644 	return r;
645 }
646 
647 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
648 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
649 				     struct amdgpu_xcp_ip *ip)
650 {
651 	if (!ip)
652 		return -EINVAL;
653 
654 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
655 }
656 
657 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
658 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
659 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
660 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
661 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
662 	.select_scheds = &aqua_vanjaram_select_scheds,
663 	.update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
664 };
665 
666 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
667 {
668 	int ret;
669 
670 	if (amdgpu_sriov_vf(adev))
671 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
672 
673 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
674 				  &aqua_vanjaram_xcp_funcs);
675 	if (ret)
676 		return ret;
677 
678 	/* TODO: Default memory node affinity init */
679 
680 	return ret;
681 }
682 
683 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
684 {
685 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
686 	int ret, i;
687 
688 	/* generally 1 AID supports 4 instances */
689 	adev->sdma.num_inst_per_aid = 4;
690 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
691 
692 	adev->aid_mask = i = 1;
693 	inst_mask >>= adev->sdma.num_inst_per_aid;
694 
695 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
696 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
697 		avail_inst = inst_mask & mask;
698 		if (avail_inst == mask || avail_inst == 0x3 ||
699 		    avail_inst == 0xc)
700 			adev->aid_mask |= (1 << i);
701 	}
702 
703 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
704 	 * addressed based on logical instance ids.
705 	 */
706 	adev->vcn.harvest_config = 0;
707 	adev->vcn.num_inst_per_aid = 1;
708 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
709 	adev->jpeg.harvest_config = 0;
710 	adev->jpeg.num_inst_per_aid = 1;
711 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
712 
713 	ret = aqua_vanjaram_xcp_mgr_init(adev);
714 	if (ret)
715 		return ret;
716 
717 	aqua_vanjaram_ip_map_init(adev);
718 
719 	return 0;
720 }
721 
722 static void aqua_read_smn(struct amdgpu_device *adev,
723 			  struct amdgpu_smn_reg_data *regdata,
724 			  uint64_t smn_addr)
725 {
726 	regdata->addr = smn_addr;
727 	regdata->value = RREG32_PCIE(smn_addr);
728 }
729 
730 struct aqua_reg_list {
731 	uint64_t start_addr;
732 	uint32_t num_regs;
733 	uint32_t incrx;
734 };
735 
736 #define DW_ADDR_INCR	4
737 
738 static void aqua_read_smn_ext(struct amdgpu_device *adev,
739 			      struct amdgpu_smn_reg_data *regdata,
740 			      uint64_t smn_addr, int i)
741 {
742 	regdata->addr =
743 		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
744 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
745 }
746 
747 #define smnreg_0x1A340218	0x1A340218
748 #define smnreg_0x1A3402E4	0x1A3402E4
749 #define smnreg_0x1A340294	0x1A340294
750 #define smreg_0x1A380088	0x1A380088
751 
752 #define NUM_PCIE_SMN_REGS	14
753 
754 static struct aqua_reg_list pcie_reg_addrs[] = {
755 	{ smnreg_0x1A340218, 1, 0 },
756 	{ smnreg_0x1A3402E4, 1, 0 },
757 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
758 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
759 };
760 
761 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
762 					     void *buf, size_t max_size)
763 {
764 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
765 	uint32_t start_addr, incrx, num_regs, szbuf;
766 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
767 	struct amdgpu_smn_reg_data *reg_data;
768 	struct pci_dev *us_pdev, *ds_pdev;
769 	int aer_cap, r, n;
770 
771 	if (!buf || !max_size)
772 		return -EINVAL;
773 
774 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
775 
776 	szbuf = sizeof(*pcie_reg_state) +
777 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
778 	/* Only one instance of pcie regs */
779 	if (max_size < szbuf)
780 		return -EOVERFLOW;
781 
782 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
783 						     sizeof(*pcie_reg_state));
784 	pcie_regs->inst_header.instance = 0;
785 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
786 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
787 
788 	reg_data = pcie_regs->smn_reg_values;
789 
790 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
791 		start_addr = pcie_reg_addrs[r].start_addr;
792 		incrx = pcie_reg_addrs[r].incrx;
793 		num_regs = pcie_reg_addrs[r].num_regs;
794 		for (n = 0; n < num_regs; n++) {
795 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
796 			++reg_data;
797 		}
798 	}
799 
800 	ds_pdev = pci_upstream_bridge(adev->pdev);
801 	us_pdev = pci_upstream_bridge(ds_pdev);
802 
803 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
804 				  &pcie_regs->device_status);
805 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
806 				  &pcie_regs->link_status);
807 
808 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
809 	if (aer_cap) {
810 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
811 				      &pcie_regs->pcie_corr_err_status);
812 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
813 				      &pcie_regs->pcie_uncorr_err_status);
814 	}
815 
816 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
817 			      &pcie_regs->sub_bus_number_latency);
818 
819 	pcie_reg_state->common_header.structure_size = szbuf;
820 	pcie_reg_state->common_header.format_revision = 1;
821 	pcie_reg_state->common_header.content_revision = 0;
822 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
823 	pcie_reg_state->common_header.num_instances = 1;
824 
825 	return pcie_reg_state->common_header.structure_size;
826 }
827 
828 #define smnreg_0x11A00050	0x11A00050
829 #define smnreg_0x11A00180	0x11A00180
830 #define smnreg_0x11A00070	0x11A00070
831 #define smnreg_0x11A00200	0x11A00200
832 #define smnreg_0x11A0020C	0x11A0020C
833 #define smnreg_0x11A00210	0x11A00210
834 #define smnreg_0x11A00108	0x11A00108
835 
836 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
837 
838 #define NUM_XGMI_SMN_REGS 25
839 
840 static struct aqua_reg_list xgmi_reg_addrs[] = {
841 	{ smnreg_0x11A00050, 1, 0 },
842 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
843 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
844 	{ smnreg_0x11A00200, 1, 0 },
845 	{ smnreg_0x11A0020C, 1, 0 },
846 	{ smnreg_0x11A00210, 1, 0 },
847 	{ smnreg_0x11A00108, 1, 0 },
848 };
849 
850 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
851 					     void *buf, size_t max_size)
852 {
853 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
854 	uint32_t start_addr, incrx, num_regs, szbuf;
855 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
856 	struct amdgpu_smn_reg_data *reg_data;
857 	const int max_xgmi_instances = 8;
858 	int inst = 0, i, j, r, n;
859 	const int xgmi_inst = 2;
860 	void *p;
861 
862 	if (!buf || !max_size)
863 		return -EINVAL;
864 
865 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
866 
867 	szbuf = sizeof(*xgmi_reg_state) +
868 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
869 				    NUM_XGMI_SMN_REGS);
870 	/* Only one instance of pcie regs */
871 	if (max_size < szbuf)
872 		return -EOVERFLOW;
873 
874 	p = &xgmi_reg_state->xgmi_state_regs[0];
875 	for_each_inst(i, adev->aid_mask) {
876 		for (j = 0; j < xgmi_inst; ++j) {
877 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
878 			xgmi_regs->inst_header.instance = inst++;
879 
880 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
881 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
882 
883 			reg_data = xgmi_regs->smn_reg_values;
884 
885 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
886 				start_addr = xgmi_reg_addrs[r].start_addr;
887 				incrx = xgmi_reg_addrs[r].incrx;
888 				num_regs = xgmi_reg_addrs[r].num_regs;
889 
890 				for (n = 0; n < num_regs; n++) {
891 					aqua_read_smn_ext(
892 						adev, reg_data,
893 						XGMI_LINK_REG(start_addr, j) +
894 							n * incrx,
895 						i);
896 					++reg_data;
897 				}
898 			}
899 			p = reg_data;
900 		}
901 	}
902 
903 	xgmi_reg_state->common_header.structure_size = szbuf;
904 	xgmi_reg_state->common_header.format_revision = 1;
905 	xgmi_reg_state->common_header.content_revision = 0;
906 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
907 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
908 
909 	return xgmi_reg_state->common_header.structure_size;
910 }
911 
912 #define smnreg_0x11C00070	0x11C00070
913 #define smnreg_0x11C00210	0x11C00210
914 
915 static struct aqua_reg_list wafl_reg_addrs[] = {
916 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
917 	{ smnreg_0x11C00210, 1, 0 },
918 };
919 
920 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
921 
922 #define NUM_WAFL_SMN_REGS 5
923 
924 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
925 					     void *buf, size_t max_size)
926 {
927 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
928 	uint32_t start_addr, incrx, num_regs, szbuf;
929 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
930 	struct amdgpu_smn_reg_data *reg_data;
931 	const int max_wafl_instances = 8;
932 	int inst = 0, i, j, r, n;
933 	const int wafl_inst = 2;
934 	void *p;
935 
936 	if (!buf || !max_size)
937 		return -EINVAL;
938 
939 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
940 
941 	szbuf = sizeof(*wafl_reg_state) +
942 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
943 				    NUM_WAFL_SMN_REGS);
944 
945 	if (max_size < szbuf)
946 		return -EOVERFLOW;
947 
948 	p = &wafl_reg_state->wafl_state_regs[0];
949 	for_each_inst(i, adev->aid_mask) {
950 		for (j = 0; j < wafl_inst; ++j) {
951 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
952 			wafl_regs->inst_header.instance = inst++;
953 
954 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
955 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
956 
957 			reg_data = wafl_regs->smn_reg_values;
958 
959 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
960 				start_addr = wafl_reg_addrs[r].start_addr;
961 				incrx = wafl_reg_addrs[r].incrx;
962 				num_regs = wafl_reg_addrs[r].num_regs;
963 				for (n = 0; n < num_regs; n++) {
964 					aqua_read_smn_ext(
965 						adev, reg_data,
966 						WAFL_LINK_REG(start_addr, j) +
967 							n * incrx,
968 						i);
969 					++reg_data;
970 				}
971 			}
972 			p = reg_data;
973 		}
974 	}
975 
976 	wafl_reg_state->common_header.structure_size = szbuf;
977 	wafl_reg_state->common_header.format_revision = 1;
978 	wafl_reg_state->common_header.content_revision = 0;
979 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
980 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
981 
982 	return wafl_reg_state->common_header.structure_size;
983 }
984 
985 #define smnreg_0x1B311060 0x1B311060
986 #define smnreg_0x1B411060 0x1B411060
987 #define smnreg_0x1B511060 0x1B511060
988 #define smnreg_0x1B611060 0x1B611060
989 
990 #define smnreg_0x1C307120 0x1C307120
991 #define smnreg_0x1C317120 0x1C317120
992 
993 #define smnreg_0x1C320830 0x1C320830
994 #define smnreg_0x1C380830 0x1C380830
995 #define smnreg_0x1C3D0830 0x1C3D0830
996 #define smnreg_0x1C420830 0x1C420830
997 
998 #define smnreg_0x1C320100 0x1C320100
999 #define smnreg_0x1C380100 0x1C380100
1000 #define smnreg_0x1C3D0100 0x1C3D0100
1001 #define smnreg_0x1C420100 0x1C420100
1002 
1003 #define smnreg_0x1B310500 0x1B310500
1004 #define smnreg_0x1C300400 0x1C300400
1005 
1006 #define USR_CAKE_INCR 0x11000
1007 #define USR_LINK_INCR 0x100000
1008 #define USR_CP_INCR 0x10000
1009 
1010 #define NUM_USR_SMN_REGS	20
1011 
1012 struct aqua_reg_list usr_reg_addrs[] = {
1013 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
1014 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
1015 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
1016 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
1017 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
1018 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
1019 };
1020 
1021 #define NUM_USR1_SMN_REGS	46
1022 struct aqua_reg_list usr1_reg_addrs[] = {
1023 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
1024 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
1025 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1026 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
1027 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
1028 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
1029 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1030 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
1031 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
1032 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
1033 };
1034 
1035 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1036 					    void *buf, size_t max_size,
1037 					    int reg_state)
1038 {
1039 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1040 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1041 	struct amdgpu_regs_usr_v1_0 *usr_regs;
1042 	struct amdgpu_smn_reg_data *reg_data;
1043 	const int max_usr_instances = 4;
1044 	struct aqua_reg_list *reg_addrs;
1045 	int inst = 0, i, n, r, arr_size;
1046 	void *p;
1047 
1048 	if (!buf || !max_size)
1049 		return -EINVAL;
1050 
1051 	switch (reg_state) {
1052 	case AMDGPU_REG_STATE_TYPE_USR:
1053 		arr_size = ARRAY_SIZE(usr_reg_addrs);
1054 		reg_addrs = usr_reg_addrs;
1055 		num_smn = NUM_USR_SMN_REGS;
1056 		break;
1057 	case AMDGPU_REG_STATE_TYPE_USR_1:
1058 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
1059 		reg_addrs = usr1_reg_addrs;
1060 		num_smn = NUM_USR1_SMN_REGS;
1061 		break;
1062 	default:
1063 		return -EINVAL;
1064 	}
1065 
1066 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1067 
1068 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1069 							     sizeof(*usr_regs),
1070 							     num_smn);
1071 	if (max_size < szbuf)
1072 		return -EOVERFLOW;
1073 
1074 	p = &usr_reg_state->usr_state_regs[0];
1075 	for_each_inst(i, adev->aid_mask) {
1076 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1077 		usr_regs->inst_header.instance = inst++;
1078 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1079 		usr_regs->inst_header.num_smn_regs = num_smn;
1080 		reg_data = usr_regs->smn_reg_values;
1081 
1082 		for (r = 0; r < arr_size; r++) {
1083 			start_addr = reg_addrs[r].start_addr;
1084 			incrx = reg_addrs[r].incrx;
1085 			num_regs = reg_addrs[r].num_regs;
1086 			for (n = 0; n < num_regs; n++) {
1087 				aqua_read_smn_ext(adev, reg_data,
1088 						  start_addr + n * incrx, i);
1089 				reg_data++;
1090 			}
1091 		}
1092 		p = reg_data;
1093 	}
1094 
1095 	usr_reg_state->common_header.structure_size = szbuf;
1096 	usr_reg_state->common_header.format_revision = 1;
1097 	usr_reg_state->common_header.content_revision = 0;
1098 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1099 	usr_reg_state->common_header.num_instances = max_usr_instances;
1100 
1101 	return usr_reg_state->common_header.structure_size;
1102 }
1103 
1104 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1105 				    enum amdgpu_reg_state reg_state, void *buf,
1106 				    size_t max_size)
1107 {
1108 	ssize_t size;
1109 
1110 	switch (reg_state) {
1111 	case AMDGPU_REG_STATE_TYPE_PCIE:
1112 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1113 		break;
1114 	case AMDGPU_REG_STATE_TYPE_XGMI:
1115 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1116 		break;
1117 	case AMDGPU_REG_STATE_TYPE_WAFL:
1118 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1119 		break;
1120 	case AMDGPU_REG_STATE_TYPE_USR:
1121 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1122 						    AMDGPU_REG_STATE_TYPE_USR);
1123 		break;
1124 	case AMDGPU_REG_STATE_TYPE_USR_1:
1125 		size = aqua_vanjaram_read_usr_state(
1126 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1127 		break;
1128 	default:
1129 		return -EINVAL;
1130 	}
1131 
1132 	return size;
1133 }
1134