xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34 	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35 
36 #define AMDGPU_XCP_OPS_KFD	(1 << 0)
37 
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40 	int i;
41 
42 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43 
44 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45 
46 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49 
50 	adev->doorbell_index.sdma_doorbell_range = 20;
51 	for (i = 0; i < adev->sdma.num_instances; i++)
52 		adev->doorbell_index.sdma_engine[i] =
53 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55 
56 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58 
59 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61 
62 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64 
65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
66 {
67 	return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
68 }
69 
70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
71 			     uint32_t inst_idx, struct amdgpu_ring *ring)
72 {
73 	int xcp_id;
74 	enum AMDGPU_XCP_IP_BLOCK ip_blk;
75 	uint32_t inst_mask;
76 
77 	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
78 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
79 		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
80 	if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
81 	    (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
82 		return;
83 
84 	inst_mask = 1 << inst_idx;
85 
86 	switch (ring->funcs->type) {
87 	case AMDGPU_HW_IP_GFX:
88 	case AMDGPU_RING_TYPE_COMPUTE:
89 	case AMDGPU_RING_TYPE_KIQ:
90 		ip_blk = AMDGPU_XCP_GFX;
91 		break;
92 	case AMDGPU_RING_TYPE_SDMA:
93 		ip_blk = AMDGPU_XCP_SDMA;
94 		break;
95 	case AMDGPU_RING_TYPE_VCN_ENC:
96 	case AMDGPU_RING_TYPE_VCN_JPEG:
97 		ip_blk = AMDGPU_XCP_VCN;
98 		break;
99 	default:
100 		DRM_ERROR("Not support ring type %d!", ring->funcs->type);
101 		return;
102 	}
103 
104 	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
105 		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
106 			ring->xcp_id = xcp_id;
107 			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
108 				ring->xcp_id);
109 			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
110 				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
111 			break;
112 		}
113 	}
114 }
115 
116 static void aqua_vanjaram_xcp_gpu_sched_update(
117 		struct amdgpu_device *adev,
118 		struct amdgpu_ring *ring,
119 		unsigned int sel_xcp_id)
120 {
121 	unsigned int *num_gpu_sched;
122 
123 	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
124 			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
125 	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
126 			.sched[(*num_gpu_sched)++] = &ring->sched;
127 	DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
128 			sel_xcp_id, ring->funcs->type,
129 			ring->hw_prio, *num_gpu_sched);
130 }
131 
132 static int aqua_vanjaram_xcp_sched_list_update(
133 		struct amdgpu_device *adev)
134 {
135 	struct amdgpu_ring *ring;
136 	int i;
137 
138 	for (i = 0; i < MAX_XCP; i++) {
139 		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
140 		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
141 	}
142 
143 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
144 		return 0;
145 
146 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
147 		ring = adev->rings[i];
148 		if (!ring || !ring->sched.ready || ring->no_scheduler)
149 			continue;
150 
151 		aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
152 
153 		/* VCN may be shared by two partitions under CPX MODE in certain
154 		 * configs.
155 		 */
156 		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
157 		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
158 		    aqua_vanjaram_xcp_vcn_shared(adev))
159 			aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
160 	}
161 
162 	return 0;
163 }
164 
165 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
166 {
167 	int i;
168 
169 	for (i = 0; i < adev->num_rings; i++) {
170 		struct amdgpu_ring *ring = adev->rings[i];
171 
172 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
173 			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
174 			aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
175 		else
176 			aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
177 	}
178 
179 	return aqua_vanjaram_xcp_sched_list_update(adev);
180 }
181 
182 static int aqua_vanjaram_select_scheds(
183 		struct amdgpu_device *adev,
184 		u32 hw_ip,
185 		u32 hw_prio,
186 		struct amdgpu_fpriv *fpriv,
187 		unsigned int *num_scheds,
188 		struct drm_gpu_scheduler ***scheds)
189 {
190 	u32 sel_xcp_id;
191 	int i;
192 
193 	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
194 		u32 least_ref_cnt = ~0;
195 
196 		fpriv->xcp_id = 0;
197 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
198 			u32 total_ref_cnt;
199 
200 			total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
201 			if (total_ref_cnt < least_ref_cnt) {
202 				fpriv->xcp_id = i;
203 				least_ref_cnt = total_ref_cnt;
204 			}
205 		}
206 	}
207 	sel_xcp_id = fpriv->xcp_id;
208 
209 	if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
210 		*num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
211 		*scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
212 		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
213 		DRM_DEBUG("Selected partition #%d", sel_xcp_id);
214 	} else {
215 		DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
216 		return -ENOENT;
217 	}
218 
219 	return 0;
220 }
221 
222 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
223 					 enum amd_hw_ip_block_type block,
224 					 int8_t inst)
225 {
226 	int8_t dev_inst;
227 
228 	switch (block) {
229 	case GC_HWIP:
230 	case SDMA0_HWIP:
231 	/* Both JPEG and VCN as JPEG is only alias of VCN */
232 	case VCN_HWIP:
233 		dev_inst = adev->ip_map.dev_inst[block][inst];
234 		break;
235 	default:
236 		/* For rest of the IPs, no look up required.
237 		 * Assume 'logical instance == physical instance' for all configs. */
238 		dev_inst = inst;
239 		break;
240 	}
241 
242 	return dev_inst;
243 }
244 
245 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
246 					 enum amd_hw_ip_block_type block,
247 					 uint32_t mask)
248 {
249 	uint32_t dev_mask = 0;
250 	int8_t log_inst, dev_inst;
251 
252 	while (mask) {
253 		log_inst = ffs(mask) - 1;
254 		dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
255 		dev_mask |= (1 << dev_inst);
256 		mask &= ~(1 << log_inst);
257 	}
258 
259 	return dev_mask;
260 }
261 
262 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
263 					  enum amd_hw_ip_block_type ip_block,
264 					  uint32_t inst_mask)
265 {
266 	int l = 0, i;
267 
268 	while (inst_mask) {
269 		i = ffs(inst_mask) - 1;
270 		adev->ip_map.dev_inst[ip_block][l++] = i;
271 		inst_mask &= ~(1 << i);
272 	}
273 	for (; l < HWIP_MAX_INSTANCE; l++)
274 		adev->ip_map.dev_inst[ip_block][l] = -1;
275 }
276 
277 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
278 {
279 	u32 ip_map[][2] = {
280 		{ GC_HWIP, adev->gfx.xcc_mask },
281 		{ SDMA0_HWIP, adev->sdma.sdma_mask },
282 		{ VCN_HWIP, adev->vcn.inst_mask },
283 	};
284 	int i;
285 
286 	for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
287 		aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
288 
289 	adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
290 	adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
291 }
292 
293 /* Fixed pattern for smn addressing on different AIDs:
294  *   bit[34]: indicate cross AID access
295  *   bit[33:32]: indicate target AID id
296  * AID id range is 0 ~ 3 as maximum AID number is 4.
297  */
298 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
299 {
300 	u64 ext_offset;
301 
302 	/* local routing and bit[34:32] will be zeros */
303 	if (ext_id == 0)
304 		return 0;
305 
306 	/* Initiated from host, accessing to all non-zero aids are cross traffic */
307 	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
308 
309 	return ext_offset;
310 }
311 
312 static enum amdgpu_gfx_partition
313 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
314 {
315 	struct amdgpu_device *adev = xcp_mgr->adev;
316 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
317 
318 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
319 	if (adev->gfx.funcs->get_xccs_per_xcp)
320 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
321 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
322 		mode = num_xcc / num_xcc_per_xcp;
323 
324 	if (num_xcc_per_xcp == 1)
325 		return AMDGPU_CPX_PARTITION_MODE;
326 
327 	switch (mode) {
328 	case 1:
329 		return AMDGPU_SPX_PARTITION_MODE;
330 	case 2:
331 		return AMDGPU_DPX_PARTITION_MODE;
332 	case 3:
333 		return AMDGPU_TPX_PARTITION_MODE;
334 	case 4:
335 		return AMDGPU_QPX_PARTITION_MODE;
336 	default:
337 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
338 	}
339 
340 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
341 }
342 
343 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
344 {
345 	enum amdgpu_gfx_partition derv_mode,
346 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
347 	struct amdgpu_device *adev = xcp_mgr->adev;
348 
349 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
350 
351 	if (amdgpu_sriov_vf(adev))
352 		return derv_mode;
353 
354 	if (adev->nbio.funcs->get_compute_partition_mode) {
355 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
356 		if (mode != derv_mode)
357 			dev_warn(
358 				adev->dev,
359 				"Mismatch in compute partition mode - reported : %d derived : %d",
360 				mode, derv_mode);
361 	}
362 
363 	return mode;
364 }
365 
366 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
367 {
368 	int num_xcc, num_xcc_per_xcp = 0;
369 
370 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
371 
372 	switch (mode) {
373 	case AMDGPU_SPX_PARTITION_MODE:
374 		num_xcc_per_xcp = num_xcc;
375 		break;
376 	case AMDGPU_DPX_PARTITION_MODE:
377 		num_xcc_per_xcp = num_xcc / 2;
378 		break;
379 	case AMDGPU_TPX_PARTITION_MODE:
380 		num_xcc_per_xcp = num_xcc / 3;
381 		break;
382 	case AMDGPU_QPX_PARTITION_MODE:
383 		num_xcc_per_xcp = num_xcc / 4;
384 		break;
385 	case AMDGPU_CPX_PARTITION_MODE:
386 		num_xcc_per_xcp = 1;
387 		break;
388 	}
389 
390 	return num_xcc_per_xcp;
391 }
392 
393 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
394 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
395 				    struct amdgpu_xcp_ip *ip)
396 {
397 	struct amdgpu_device *adev = xcp_mgr->adev;
398 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
399 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
400 
401 	num_sdma = adev->sdma.num_instances;
402 	num_vcn = adev->vcn.num_vcn_inst;
403 	num_shared_vcn = 1;
404 
405 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
406 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
407 
408 	switch (xcp_mgr->mode) {
409 	case AMDGPU_SPX_PARTITION_MODE:
410 	case AMDGPU_DPX_PARTITION_MODE:
411 	case AMDGPU_TPX_PARTITION_MODE:
412 	case AMDGPU_QPX_PARTITION_MODE:
413 	case AMDGPU_CPX_PARTITION_MODE:
414 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
415 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
416 		break;
417 	default:
418 		return -EINVAL;
419 	}
420 
421 	if (num_vcn && num_xcp > num_vcn)
422 		num_shared_vcn = num_xcp / num_vcn;
423 
424 	switch (ip_id) {
425 	case AMDGPU_XCP_GFXHUB:
426 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
427 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
428 		break;
429 	case AMDGPU_XCP_GFX:
430 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
431 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
432 		break;
433 	case AMDGPU_XCP_SDMA:
434 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
435 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
436 		break;
437 	case AMDGPU_XCP_VCN:
438 		ip->inst_mask =
439 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
440 		/* TODO : Assign IP funcs */
441 		break;
442 	default:
443 		return -EINVAL;
444 	}
445 
446 	ip->ip_id = ip_id;
447 
448 	return 0;
449 }
450 
451 static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
452 					    int px_mode, int *num_xcp,
453 					    uint16_t *nps_modes)
454 {
455 	struct amdgpu_device *adev = xcp_mgr->adev;
456 
457 	if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
458 		return -EINVAL;
459 
460 	switch (px_mode) {
461 	case AMDGPU_SPX_PARTITION_MODE:
462 		*num_xcp = 1;
463 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
464 		break;
465 	case AMDGPU_DPX_PARTITION_MODE:
466 		*num_xcp = 2;
467 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
468 			     BIT(AMDGPU_NPS2_PARTITION_MODE);
469 		break;
470 	case AMDGPU_TPX_PARTITION_MODE:
471 		*num_xcp = 3;
472 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
473 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
474 		break;
475 	case AMDGPU_QPX_PARTITION_MODE:
476 		*num_xcp = 4;
477 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
478 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
479 		break;
480 	case AMDGPU_CPX_PARTITION_MODE:
481 		*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
482 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
483 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
484 		if (amdgpu_sriov_vf(adev))
485 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
486 		break;
487 	default:
488 		return -EINVAL;
489 	}
490 
491 	return 0;
492 }
493 
494 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
495 					  int mode,
496 					  struct amdgpu_xcp_cfg *xcp_cfg)
497 {
498 	struct amdgpu_device *adev = xcp_mgr->adev;
499 	int max_res[AMDGPU_XCP_RES_MAX] = {};
500 	bool res_lt_xcp;
501 	int num_xcp, i, r;
502 	u16 nps_modes;
503 
504 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
505 		return -EINVAL;
506 
507 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
508 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
509 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
510 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
511 
512 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
513 	if (r)
514 		return r;
515 
516 	xcp_cfg->compatible_nps_modes =
517 		(adev->gmc.supported_nps_modes & nps_modes);
518 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
519 
520 	for (i = 0; i < xcp_cfg->num_res; i++) {
521 		res_lt_xcp = max_res[i] < num_xcp;
522 		xcp_cfg->xcp_res[i].id = i;
523 		xcp_cfg->xcp_res[i].num_inst =
524 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
525 		xcp_cfg->xcp_res[i].num_inst =
526 			i == AMDGPU_XCP_RES_JPEG ?
527 			xcp_cfg->xcp_res[i].num_inst *
528 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
529 		xcp_cfg->xcp_res[i].num_shared =
530 			res_lt_xcp ? num_xcp / max_res[i] : 1;
531 	}
532 
533 	return 0;
534 }
535 
536 static enum amdgpu_gfx_partition
537 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
538 {
539 	struct amdgpu_device *adev = xcp_mgr->adev;
540 	int num_xcc;
541 
542 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
543 
544 	if (adev->gmc.num_mem_partitions == 1)
545 		return AMDGPU_SPX_PARTITION_MODE;
546 
547 	if (adev->gmc.num_mem_partitions == num_xcc)
548 		return AMDGPU_CPX_PARTITION_MODE;
549 
550 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
551 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
552 						    AMDGPU_CPX_PARTITION_MODE;
553 
554 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
555 		return AMDGPU_DPX_PARTITION_MODE;
556 
557 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
558 }
559 
560 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
561 					  enum amdgpu_gfx_partition mode)
562 {
563 	struct amdgpu_device *adev = xcp_mgr->adev;
564 	int num_xcc, num_xccs_per_xcp, r;
565 	int num_xcp, nps_mode;
566 	u16 supp_nps_modes;
567 	bool comp_mode;
568 
569 	nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
570 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
571 					       &supp_nps_modes);
572 	if (r)
573 		return false;
574 
575 	comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
576 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
577 	switch (mode) {
578 	case AMDGPU_SPX_PARTITION_MODE:
579 		return comp_mode && num_xcc > 0;
580 	case AMDGPU_DPX_PARTITION_MODE:
581 		return comp_mode && (num_xcc % 4) == 0;
582 	case AMDGPU_TPX_PARTITION_MODE:
583 		return comp_mode && ((num_xcc % 3) == 0);
584 	case AMDGPU_QPX_PARTITION_MODE:
585 		num_xccs_per_xcp = num_xcc / 4;
586 		return comp_mode && (num_xccs_per_xcp >= 2);
587 	case AMDGPU_CPX_PARTITION_MODE:
588 		return comp_mode && (num_xcc > 1);
589 	default:
590 		return false;
591 	}
592 
593 	return false;
594 }
595 
596 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
597 {
598 	/* TODO:
599 	 * Stop user queues and threads, and make sure GPU is empty of work.
600 	 */
601 
602 	if (flags & AMDGPU_XCP_OPS_KFD)
603 		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
604 
605 	return 0;
606 }
607 
608 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
609 {
610 	int ret = 0;
611 
612 	if (flags & AMDGPU_XCP_OPS_KFD) {
613 		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
614 		amdgpu_amdkfd_device_init(xcp_mgr->adev);
615 		/* If KFD init failed, return failure */
616 		if (!xcp_mgr->adev->kfd.init_complete)
617 			ret = -EIO;
618 	}
619 
620 	return ret;
621 }
622 
623 static void
624 __aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
625 {
626 	struct amdgpu_device *adev = xcp_mgr->adev;
627 
628 	xcp_mgr->supp_xcp_modes = 0;
629 
630 	switch (NUM_XCC(adev->gfx.xcc_mask)) {
631 	case 8:
632 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
633 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
634 					  BIT(AMDGPU_QPX_PARTITION_MODE) |
635 					  BIT(AMDGPU_CPX_PARTITION_MODE);
636 		break;
637 	case 6:
638 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
639 					  BIT(AMDGPU_TPX_PARTITION_MODE) |
640 					  BIT(AMDGPU_CPX_PARTITION_MODE);
641 		break;
642 	case 4:
643 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
644 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
645 					  BIT(AMDGPU_CPX_PARTITION_MODE);
646 		break;
647 	/* this seems only existing in emulation phase */
648 	case 2:
649 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
650 					  BIT(AMDGPU_CPX_PARTITION_MODE);
651 		break;
652 	case 1:
653 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
654 					  BIT(AMDGPU_CPX_PARTITION_MODE);
655 		break;
656 
657 	default:
658 		break;
659 	}
660 }
661 
662 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
663 {
664 	int mode;
665 
666 	xcp_mgr->avail_xcp_modes = 0;
667 
668 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
669 		if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
670 			xcp_mgr->avail_xcp_modes |= BIT(mode);
671 	}
672 }
673 
674 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
675 					       int mode, int *num_xcps)
676 {
677 	int num_xcc_per_xcp, num_xcc, ret;
678 	struct amdgpu_device *adev;
679 	u32 flags = 0;
680 
681 	adev = xcp_mgr->adev;
682 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
683 
684 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
685 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
686 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
687 			dev_err(adev->dev,
688 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
689 				adev->gmc.num_mem_partitions);
690 			return -EINVAL;
691 		}
692 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
693 		dev_err(adev->dev,
694 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
695 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
696 		return -EINVAL;
697 	}
698 
699 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
700 		flags |= AMDGPU_XCP_OPS_KFD;
701 
702 	if (flags & AMDGPU_XCP_OPS_KFD) {
703 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
704 		if (ret)
705 			goto out;
706 	}
707 
708 	ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
709 	if (ret)
710 		goto unlock;
711 
712 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
713 	if (adev->gfx.funcs->switch_partition_mode)
714 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
715 						       num_xcc_per_xcp);
716 
717 	/* Init info about new xcps */
718 	*num_xcps = num_xcc / num_xcc_per_xcp;
719 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
720 
721 	ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
722 	if (!ret)
723 		__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
724 unlock:
725 	if (flags & AMDGPU_XCP_OPS_KFD)
726 		amdgpu_amdkfd_unlock_kfd(adev);
727 out:
728 	return ret;
729 }
730 
731 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
732 					  int xcc_id, uint8_t *mem_id)
733 {
734 	/* memory/spatial modes validation check is already done */
735 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
736 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
737 
738 	return 0;
739 }
740 
741 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
742 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
743 {
744 	struct amdgpu_numa_info numa_info;
745 	struct amdgpu_device *adev;
746 	uint32_t xcc_mask;
747 	int r, i, xcc_id;
748 
749 	adev = xcp_mgr->adev;
750 	/* TODO: BIOS is not returning the right info now
751 	 * Check on this later
752 	 */
753 	/*
754 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
755 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
756 	*/
757 	if (adev->gmc.num_mem_partitions == 1) {
758 		/* Only one range */
759 		*mem_id = 0;
760 		return 0;
761 	}
762 
763 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
764 	if (r || !xcc_mask)
765 		return -EINVAL;
766 
767 	xcc_id = ffs(xcc_mask) - 1;
768 	if (!adev->gmc.is_app_apu)
769 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
770 
771 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
772 
773 	if (r)
774 		return r;
775 
776 	r = -EINVAL;
777 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
778 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
779 			*mem_id = i;
780 			r = 0;
781 			break;
782 		}
783 	}
784 
785 	return r;
786 }
787 
788 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
789 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
790 				     struct amdgpu_xcp_ip *ip)
791 {
792 	if (!ip)
793 		return -EINVAL;
794 
795 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
796 }
797 
798 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
799 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
800 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
801 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
802 	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
803 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
804 	.select_scheds = &aqua_vanjaram_select_scheds,
805 	.update_partition_sched_list =
806 		&aqua_vanjaram_update_partition_sched_list
807 };
808 
809 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
810 {
811 	int ret;
812 
813 	if (amdgpu_sriov_vf(adev))
814 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
815 
816 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
817 				  &aqua_vanjaram_xcp_funcs);
818 	if (ret)
819 		return ret;
820 
821 	__aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
822 	/* TODO: Default memory node affinity init */
823 
824 	return ret;
825 }
826 
827 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
828 {
829 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
830 	int ret, i;
831 
832 	/* generally 1 AID supports 4 instances */
833 	adev->sdma.num_inst_per_aid = 4;
834 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
835 
836 	adev->aid_mask = i = 1;
837 	inst_mask >>= adev->sdma.num_inst_per_aid;
838 
839 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
840 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
841 		avail_inst = inst_mask & mask;
842 		if (avail_inst == mask || avail_inst == 0x3 ||
843 		    avail_inst == 0xc)
844 			adev->aid_mask |= (1 << i);
845 	}
846 
847 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
848 	 * addressed based on logical instance ids.
849 	 */
850 	adev->vcn.harvest_config = 0;
851 	adev->vcn.num_inst_per_aid = 1;
852 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
853 	adev->jpeg.harvest_config = 0;
854 	adev->jpeg.num_inst_per_aid = 1;
855 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
856 
857 	ret = aqua_vanjaram_xcp_mgr_init(adev);
858 	if (ret)
859 		return ret;
860 
861 	aqua_vanjaram_ip_map_init(adev);
862 
863 	return 0;
864 }
865 
866 static void aqua_read_smn(struct amdgpu_device *adev,
867 			  struct amdgpu_smn_reg_data *regdata,
868 			  uint64_t smn_addr)
869 {
870 	regdata->addr = smn_addr;
871 	regdata->value = RREG32_PCIE(smn_addr);
872 }
873 
874 struct aqua_reg_list {
875 	uint64_t start_addr;
876 	uint32_t num_regs;
877 	uint32_t incrx;
878 };
879 
880 #define DW_ADDR_INCR	4
881 
882 static void aqua_read_smn_ext(struct amdgpu_device *adev,
883 			      struct amdgpu_smn_reg_data *regdata,
884 			      uint64_t smn_addr, int i)
885 {
886 	regdata->addr =
887 		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
888 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
889 }
890 
891 #define smnreg_0x1A340218	0x1A340218
892 #define smnreg_0x1A3402E4	0x1A3402E4
893 #define smnreg_0x1A340294	0x1A340294
894 #define smreg_0x1A380088	0x1A380088
895 
896 #define NUM_PCIE_SMN_REGS	14
897 
898 static struct aqua_reg_list pcie_reg_addrs[] = {
899 	{ smnreg_0x1A340218, 1, 0 },
900 	{ smnreg_0x1A3402E4, 1, 0 },
901 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
902 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
903 };
904 
905 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
906 					     void *buf, size_t max_size)
907 {
908 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
909 	uint32_t start_addr, incrx, num_regs, szbuf;
910 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
911 	struct amdgpu_smn_reg_data *reg_data;
912 	struct pci_dev *us_pdev, *ds_pdev;
913 	int aer_cap, r, n;
914 
915 	if (!buf || !max_size)
916 		return -EINVAL;
917 
918 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
919 
920 	szbuf = sizeof(*pcie_reg_state) +
921 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
922 	/* Only one instance of pcie regs */
923 	if (max_size < szbuf)
924 		return -EOVERFLOW;
925 
926 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
927 						     sizeof(*pcie_reg_state));
928 	pcie_regs->inst_header.instance = 0;
929 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
930 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
931 
932 	reg_data = pcie_regs->smn_reg_values;
933 
934 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
935 		start_addr = pcie_reg_addrs[r].start_addr;
936 		incrx = pcie_reg_addrs[r].incrx;
937 		num_regs = pcie_reg_addrs[r].num_regs;
938 		for (n = 0; n < num_regs; n++) {
939 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
940 			++reg_data;
941 		}
942 	}
943 
944 	ds_pdev = pci_upstream_bridge(adev->pdev);
945 	us_pdev = pci_upstream_bridge(ds_pdev);
946 
947 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
948 				  &pcie_regs->device_status);
949 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
950 				  &pcie_regs->link_status);
951 
952 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
953 	if (aer_cap) {
954 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
955 				      &pcie_regs->pcie_corr_err_status);
956 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
957 				      &pcie_regs->pcie_uncorr_err_status);
958 	}
959 
960 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
961 			      &pcie_regs->sub_bus_number_latency);
962 
963 	pcie_reg_state->common_header.structure_size = szbuf;
964 	pcie_reg_state->common_header.format_revision = 1;
965 	pcie_reg_state->common_header.content_revision = 0;
966 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
967 	pcie_reg_state->common_header.num_instances = 1;
968 
969 	return pcie_reg_state->common_header.structure_size;
970 }
971 
972 #define smnreg_0x11A00050	0x11A00050
973 #define smnreg_0x11A00180	0x11A00180
974 #define smnreg_0x11A00070	0x11A00070
975 #define smnreg_0x11A00200	0x11A00200
976 #define smnreg_0x11A0020C	0x11A0020C
977 #define smnreg_0x11A00210	0x11A00210
978 #define smnreg_0x11A00108	0x11A00108
979 
980 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
981 
982 #define NUM_XGMI_SMN_REGS 25
983 
984 static struct aqua_reg_list xgmi_reg_addrs[] = {
985 	{ smnreg_0x11A00050, 1, 0 },
986 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
987 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
988 	{ smnreg_0x11A00200, 1, 0 },
989 	{ smnreg_0x11A0020C, 1, 0 },
990 	{ smnreg_0x11A00210, 1, 0 },
991 	{ smnreg_0x11A00108, 1, 0 },
992 };
993 
994 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
995 					     void *buf, size_t max_size)
996 {
997 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
998 	uint32_t start_addr, incrx, num_regs, szbuf;
999 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
1000 	struct amdgpu_smn_reg_data *reg_data;
1001 	const int max_xgmi_instances = 8;
1002 	int inst = 0, i, j, r, n;
1003 	const int xgmi_inst = 2;
1004 	void *p;
1005 
1006 	if (!buf || !max_size)
1007 		return -EINVAL;
1008 
1009 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
1010 
1011 	szbuf = sizeof(*xgmi_reg_state) +
1012 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
1013 				    NUM_XGMI_SMN_REGS);
1014 	/* Only one instance of pcie regs */
1015 	if (max_size < szbuf)
1016 		return -EOVERFLOW;
1017 
1018 	p = &xgmi_reg_state->xgmi_state_regs[0];
1019 	for_each_inst(i, adev->aid_mask) {
1020 		for (j = 0; j < xgmi_inst; ++j) {
1021 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
1022 			xgmi_regs->inst_header.instance = inst++;
1023 
1024 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
1025 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
1026 
1027 			reg_data = xgmi_regs->smn_reg_values;
1028 
1029 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
1030 				start_addr = xgmi_reg_addrs[r].start_addr;
1031 				incrx = xgmi_reg_addrs[r].incrx;
1032 				num_regs = xgmi_reg_addrs[r].num_regs;
1033 
1034 				for (n = 0; n < num_regs; n++) {
1035 					aqua_read_smn_ext(
1036 						adev, reg_data,
1037 						XGMI_LINK_REG(start_addr, j) +
1038 							n * incrx,
1039 						i);
1040 					++reg_data;
1041 				}
1042 			}
1043 			p = reg_data;
1044 		}
1045 	}
1046 
1047 	xgmi_reg_state->common_header.structure_size = szbuf;
1048 	xgmi_reg_state->common_header.format_revision = 1;
1049 	xgmi_reg_state->common_header.content_revision = 0;
1050 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
1051 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
1052 
1053 	return xgmi_reg_state->common_header.structure_size;
1054 }
1055 
1056 #define smnreg_0x11C00070	0x11C00070
1057 #define smnreg_0x11C00210	0x11C00210
1058 
1059 static struct aqua_reg_list wafl_reg_addrs[] = {
1060 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
1061 	{ smnreg_0x11C00210, 1, 0 },
1062 };
1063 
1064 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
1065 
1066 #define NUM_WAFL_SMN_REGS 5
1067 
1068 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
1069 					     void *buf, size_t max_size)
1070 {
1071 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
1072 	uint32_t start_addr, incrx, num_regs, szbuf;
1073 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
1074 	struct amdgpu_smn_reg_data *reg_data;
1075 	const int max_wafl_instances = 8;
1076 	int inst = 0, i, j, r, n;
1077 	const int wafl_inst = 2;
1078 	void *p;
1079 
1080 	if (!buf || !max_size)
1081 		return -EINVAL;
1082 
1083 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
1084 
1085 	szbuf = sizeof(*wafl_reg_state) +
1086 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
1087 				    NUM_WAFL_SMN_REGS);
1088 
1089 	if (max_size < szbuf)
1090 		return -EOVERFLOW;
1091 
1092 	p = &wafl_reg_state->wafl_state_regs[0];
1093 	for_each_inst(i, adev->aid_mask) {
1094 		for (j = 0; j < wafl_inst; ++j) {
1095 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
1096 			wafl_regs->inst_header.instance = inst++;
1097 
1098 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
1099 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
1100 
1101 			reg_data = wafl_regs->smn_reg_values;
1102 
1103 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
1104 				start_addr = wafl_reg_addrs[r].start_addr;
1105 				incrx = wafl_reg_addrs[r].incrx;
1106 				num_regs = wafl_reg_addrs[r].num_regs;
1107 				for (n = 0; n < num_regs; n++) {
1108 					aqua_read_smn_ext(
1109 						adev, reg_data,
1110 						WAFL_LINK_REG(start_addr, j) +
1111 							n * incrx,
1112 						i);
1113 					++reg_data;
1114 				}
1115 			}
1116 			p = reg_data;
1117 		}
1118 	}
1119 
1120 	wafl_reg_state->common_header.structure_size = szbuf;
1121 	wafl_reg_state->common_header.format_revision = 1;
1122 	wafl_reg_state->common_header.content_revision = 0;
1123 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
1124 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
1125 
1126 	return wafl_reg_state->common_header.structure_size;
1127 }
1128 
1129 #define smnreg_0x1B311060 0x1B311060
1130 #define smnreg_0x1B411060 0x1B411060
1131 #define smnreg_0x1B511060 0x1B511060
1132 #define smnreg_0x1B611060 0x1B611060
1133 
1134 #define smnreg_0x1C307120 0x1C307120
1135 #define smnreg_0x1C317120 0x1C317120
1136 
1137 #define smnreg_0x1C320830 0x1C320830
1138 #define smnreg_0x1C380830 0x1C380830
1139 #define smnreg_0x1C3D0830 0x1C3D0830
1140 #define smnreg_0x1C420830 0x1C420830
1141 
1142 #define smnreg_0x1C320100 0x1C320100
1143 #define smnreg_0x1C380100 0x1C380100
1144 #define smnreg_0x1C3D0100 0x1C3D0100
1145 #define smnreg_0x1C420100 0x1C420100
1146 
1147 #define smnreg_0x1B310500 0x1B310500
1148 #define smnreg_0x1C300400 0x1C300400
1149 
1150 #define USR_CAKE_INCR 0x11000
1151 #define USR_LINK_INCR 0x100000
1152 #define USR_CP_INCR 0x10000
1153 
1154 #define NUM_USR_SMN_REGS	20
1155 
1156 struct aqua_reg_list usr_reg_addrs[] = {
1157 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
1158 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
1159 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
1160 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
1161 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
1162 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
1163 };
1164 
1165 #define NUM_USR1_SMN_REGS	46
1166 struct aqua_reg_list usr1_reg_addrs[] = {
1167 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
1168 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
1169 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
1170 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
1171 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
1172 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
1173 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
1174 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
1175 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
1176 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
1177 };
1178 
1179 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
1180 					    void *buf, size_t max_size,
1181 					    int reg_state)
1182 {
1183 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
1184 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
1185 	struct amdgpu_regs_usr_v1_0 *usr_regs;
1186 	struct amdgpu_smn_reg_data *reg_data;
1187 	const int max_usr_instances = 4;
1188 	struct aqua_reg_list *reg_addrs;
1189 	int inst = 0, i, n, r, arr_size;
1190 	void *p;
1191 
1192 	if (!buf || !max_size)
1193 		return -EINVAL;
1194 
1195 	switch (reg_state) {
1196 	case AMDGPU_REG_STATE_TYPE_USR:
1197 		arr_size = ARRAY_SIZE(usr_reg_addrs);
1198 		reg_addrs = usr_reg_addrs;
1199 		num_smn = NUM_USR_SMN_REGS;
1200 		break;
1201 	case AMDGPU_REG_STATE_TYPE_USR_1:
1202 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
1203 		reg_addrs = usr1_reg_addrs;
1204 		num_smn = NUM_USR1_SMN_REGS;
1205 		break;
1206 	default:
1207 		return -EINVAL;
1208 	}
1209 
1210 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1211 
1212 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1213 							     sizeof(*usr_regs),
1214 							     num_smn);
1215 	if (max_size < szbuf)
1216 		return -EOVERFLOW;
1217 
1218 	p = &usr_reg_state->usr_state_regs[0];
1219 	for_each_inst(i, adev->aid_mask) {
1220 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1221 		usr_regs->inst_header.instance = inst++;
1222 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1223 		usr_regs->inst_header.num_smn_regs = num_smn;
1224 		reg_data = usr_regs->smn_reg_values;
1225 
1226 		for (r = 0; r < arr_size; r++) {
1227 			start_addr = reg_addrs[r].start_addr;
1228 			incrx = reg_addrs[r].incrx;
1229 			num_regs = reg_addrs[r].num_regs;
1230 			for (n = 0; n < num_regs; n++) {
1231 				aqua_read_smn_ext(adev, reg_data,
1232 						  start_addr + n * incrx, i);
1233 				reg_data++;
1234 			}
1235 		}
1236 		p = reg_data;
1237 	}
1238 
1239 	usr_reg_state->common_header.structure_size = szbuf;
1240 	usr_reg_state->common_header.format_revision = 1;
1241 	usr_reg_state->common_header.content_revision = 0;
1242 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1243 	usr_reg_state->common_header.num_instances = max_usr_instances;
1244 
1245 	return usr_reg_state->common_header.structure_size;
1246 }
1247 
1248 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1249 				    enum amdgpu_reg_state reg_state, void *buf,
1250 				    size_t max_size)
1251 {
1252 	ssize_t size;
1253 
1254 	switch (reg_state) {
1255 	case AMDGPU_REG_STATE_TYPE_PCIE:
1256 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1257 		break;
1258 	case AMDGPU_REG_STATE_TYPE_XGMI:
1259 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1260 		break;
1261 	case AMDGPU_REG_STATE_TYPE_WAFL:
1262 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1263 		break;
1264 	case AMDGPU_REG_STATE_TYPE_USR:
1265 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1266 						    AMDGPU_REG_STATE_TYPE_USR);
1267 		break;
1268 	case AMDGPU_REG_STATE_TYPE_USR_1:
1269 		size = aqua_vanjaram_read_usr_state(
1270 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1271 		break;
1272 	default:
1273 		return -EINVAL;
1274 	}
1275 
1276 	return size;
1277 }
1278