xref: /linux/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c (revision e811c33b1f137be26a20444b79db8cbc1fca1c89)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25 
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32 #include "amdgpu_ip.h"
33 
34 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
35 	(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
36 
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)37 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
38 {
39 	int i;
40 
41 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
42 
43 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
44 
45 	adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
46 	adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
47 	adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
48 
49 	adev->doorbell_index.sdma_doorbell_range = 20;
50 	for (i = 0; i < adev->sdma.num_instances; i++)
51 		adev->doorbell_index.sdma_engine[i] =
52 			AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
53 			i * (adev->doorbell_index.sdma_doorbell_range >> 1);
54 
55 	adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
56 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
57 
58 	adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
59 	adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
60 
61 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
62 }
63 
64 /* Fixed pattern for smn addressing on different AIDs:
65  *   bit[34]: indicate cross AID access
66  *   bit[33:32]: indicate target AID id
67  * AID id range is 0 ~ 3 as maximum AID number is 4.
68  */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)69 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
70 {
71 	u64 ext_offset;
72 
73 	/* local routing and bit[34:32] will be zeros */
74 	if (ext_id == 0)
75 		return 0;
76 
77 	/* Initiated from host, accessing to all non-zero aids are cross traffic */
78 	ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
79 
80 	return ext_offset;
81 }
82 
83 static enum amdgpu_gfx_partition
__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr * xcp_mgr)84 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
85 {
86 	struct amdgpu_device *adev = xcp_mgr->adev;
87 	int num_xcc, num_xcc_per_xcp = 0, mode = 0;
88 
89 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
90 	if (adev->gfx.funcs->get_xccs_per_xcp)
91 		num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
92 	if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
93 		mode = num_xcc / num_xcc_per_xcp;
94 
95 	if (num_xcc_per_xcp == 1)
96 		return AMDGPU_CPX_PARTITION_MODE;
97 
98 	switch (mode) {
99 	case 1:
100 		return AMDGPU_SPX_PARTITION_MODE;
101 	case 2:
102 		return AMDGPU_DPX_PARTITION_MODE;
103 	case 3:
104 		return AMDGPU_TPX_PARTITION_MODE;
105 	case 4:
106 		return AMDGPU_QPX_PARTITION_MODE;
107 	default:
108 		return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
109 	}
110 
111 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
112 }
113 
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)114 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
115 {
116 	enum amdgpu_gfx_partition derv_mode,
117 		mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
118 	struct amdgpu_device *adev = xcp_mgr->adev;
119 
120 	derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
121 
122 	if (amdgpu_sriov_vf(adev))
123 		return derv_mode;
124 
125 	if (adev->nbio.funcs->get_compute_partition_mode) {
126 		mode = adev->nbio.funcs->get_compute_partition_mode(adev);
127 		if (mode != derv_mode) {
128 			dev_warn(
129 				adev->dev,
130 				"Mismatch in compute partition mode - reported : %d derived : %d",
131 				mode, derv_mode);
132 			if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
133 				amdgpu_device_bus_status_check(adev);
134 		}
135 	}
136 
137 	return mode;
138 }
139 
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)140 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
141 {
142 	int num_xcc, num_xcc_per_xcp = 0;
143 
144 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
145 
146 	switch (mode) {
147 	case AMDGPU_SPX_PARTITION_MODE:
148 		num_xcc_per_xcp = num_xcc;
149 		break;
150 	case AMDGPU_DPX_PARTITION_MODE:
151 		num_xcc_per_xcp = num_xcc / 2;
152 		break;
153 	case AMDGPU_TPX_PARTITION_MODE:
154 		num_xcc_per_xcp = num_xcc / 3;
155 		break;
156 	case AMDGPU_QPX_PARTITION_MODE:
157 		num_xcc_per_xcp = num_xcc / 4;
158 		break;
159 	case AMDGPU_CPX_PARTITION_MODE:
160 		num_xcc_per_xcp = 1;
161 		break;
162 	}
163 
164 	return num_xcc_per_xcp;
165 }
166 
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)167 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
168 				    enum AMDGPU_XCP_IP_BLOCK ip_id,
169 				    struct amdgpu_xcp_ip *ip)
170 {
171 	struct amdgpu_device *adev = xcp_mgr->adev;
172 	int num_sdma, num_vcn, num_shared_vcn, num_xcp;
173 	int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
174 
175 	num_sdma = adev->sdma.num_instances;
176 	num_vcn = adev->vcn.num_vcn_inst;
177 	num_shared_vcn = 1;
178 
179 	num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
180 	num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
181 
182 	switch (xcp_mgr->mode) {
183 	case AMDGPU_SPX_PARTITION_MODE:
184 	case AMDGPU_DPX_PARTITION_MODE:
185 	case AMDGPU_TPX_PARTITION_MODE:
186 	case AMDGPU_QPX_PARTITION_MODE:
187 	case AMDGPU_CPX_PARTITION_MODE:
188 		num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
189 		num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
190 		break;
191 	default:
192 		return -EINVAL;
193 	}
194 
195 	if (num_vcn && num_xcp > num_vcn)
196 		num_shared_vcn = num_xcp / num_vcn;
197 
198 	switch (ip_id) {
199 	case AMDGPU_XCP_GFXHUB:
200 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
201 		ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
202 		break;
203 	case AMDGPU_XCP_GFX:
204 		ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
205 		ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
206 		break;
207 	case AMDGPU_XCP_SDMA:
208 		ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
209 		ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
210 		break;
211 	case AMDGPU_XCP_VCN:
212 		ip->inst_mask =
213 			XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
214 		/* TODO : Assign IP funcs */
215 		break;
216 	default:
217 		return -EINVAL;
218 	}
219 
220 	ip->ip_id = ip_id;
221 
222 	return 0;
223 }
224 
__aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr * xcp_mgr,int px_mode,int * num_xcp,uint16_t * nps_modes)225 static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
226 					    int px_mode, int *num_xcp,
227 					    uint16_t *nps_modes)
228 {
229 	struct amdgpu_device *adev = xcp_mgr->adev;
230 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
231 
232 	if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
233 		return -EINVAL;
234 
235 	switch (px_mode) {
236 	case AMDGPU_SPX_PARTITION_MODE:
237 		*num_xcp = 1;
238 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
239 		break;
240 	case AMDGPU_DPX_PARTITION_MODE:
241 		*num_xcp = 2;
242 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
243 			     BIT(AMDGPU_NPS2_PARTITION_MODE);
244 		break;
245 	case AMDGPU_TPX_PARTITION_MODE:
246 		*num_xcp = 3;
247 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
248 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
249 		break;
250 	case AMDGPU_QPX_PARTITION_MODE:
251 		*num_xcp = 4;
252 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
253 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
254 		if (gc_ver == IP_VERSION(9, 5, 0))
255 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
256 		break;
257 	case AMDGPU_CPX_PARTITION_MODE:
258 		*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
259 		*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
260 			     BIT(AMDGPU_NPS4_PARTITION_MODE);
261 		if (gc_ver == IP_VERSION(9, 5, 0))
262 			*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
263 		break;
264 	default:
265 		return -EINVAL;
266 	}
267 
268 	return 0;
269 }
270 
aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr * xcp_mgr,int mode,struct amdgpu_xcp_cfg * xcp_cfg)271 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
272 					  int mode,
273 					  struct amdgpu_xcp_cfg *xcp_cfg)
274 {
275 	struct amdgpu_device *adev = xcp_mgr->adev;
276 	int max_res[AMDGPU_XCP_RES_MAX] = {};
277 	bool res_lt_xcp;
278 	int num_xcp, i, r;
279 	u16 nps_modes;
280 
281 	if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
282 		return -EINVAL;
283 
284 	max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
285 	max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
286 	max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
287 	max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
288 
289 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
290 	if (r)
291 		return r;
292 
293 	xcp_cfg->compatible_nps_modes =
294 		(adev->gmc.supported_nps_modes & nps_modes);
295 	xcp_cfg->num_res = ARRAY_SIZE(max_res);
296 
297 	for (i = 0; i < xcp_cfg->num_res; i++) {
298 		res_lt_xcp = max_res[i] < num_xcp;
299 		xcp_cfg->xcp_res[i].id = i;
300 		xcp_cfg->xcp_res[i].num_inst =
301 			res_lt_xcp ? 1 : max_res[i] / num_xcp;
302 		xcp_cfg->xcp_res[i].num_inst =
303 			i == AMDGPU_XCP_RES_JPEG ?
304 			xcp_cfg->xcp_res[i].num_inst *
305 			adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
306 		xcp_cfg->xcp_res[i].num_shared =
307 			res_lt_xcp ? num_xcp / max_res[i] : 1;
308 	}
309 
310 	return 0;
311 }
312 
313 static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)314 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
315 {
316 	struct amdgpu_device *adev = xcp_mgr->adev;
317 	int num_xcc;
318 
319 	num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
320 
321 	if (adev->gmc.num_mem_partitions == 1)
322 		return AMDGPU_SPX_PARTITION_MODE;
323 
324 	if (adev->gmc.num_mem_partitions == num_xcc)
325 		return AMDGPU_CPX_PARTITION_MODE;
326 
327 	if (adev->gmc.num_mem_partitions == num_xcc / 2)
328 		return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
329 						    AMDGPU_CPX_PARTITION_MODE;
330 
331 	if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
332 		return AMDGPU_DPX_PARTITION_MODE;
333 
334 	return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
335 }
336 
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)337 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
338 					  enum amdgpu_gfx_partition mode)
339 {
340 	struct amdgpu_device *adev = xcp_mgr->adev;
341 	int num_xcc, num_xccs_per_xcp, r;
342 	int num_xcp, nps_mode;
343 	u16 supp_nps_modes;
344 	bool comp_mode;
345 
346 	nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
347 	r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
348 					       &supp_nps_modes);
349 	if (r)
350 		return false;
351 
352 	comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
353 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
354 	switch (mode) {
355 	case AMDGPU_SPX_PARTITION_MODE:
356 		return comp_mode && num_xcc > 0;
357 	case AMDGPU_DPX_PARTITION_MODE:
358 		return comp_mode && (num_xcc % 4) == 0;
359 	case AMDGPU_TPX_PARTITION_MODE:
360 		return comp_mode && ((num_xcc % 3) == 0);
361 	case AMDGPU_QPX_PARTITION_MODE:
362 		num_xccs_per_xcp = num_xcc / 4;
363 		return comp_mode && (num_xccs_per_xcp >= 2);
364 	case AMDGPU_CPX_PARTITION_MODE:
365 		return comp_mode && (num_xcc > 1);
366 	default:
367 		return false;
368 	}
369 
370 	return false;
371 }
372 
__aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)373 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
374 {
375 	int mode;
376 
377 	xcp_mgr->avail_xcp_modes = 0;
378 
379 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
380 		if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
381 			xcp_mgr->avail_xcp_modes |= BIT(mode);
382 	}
383 }
384 
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)385 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
386 					       int mode, int *num_xcps)
387 {
388 	int num_xcc_per_xcp, num_xcc, ret;
389 	struct amdgpu_device *adev;
390 	u32 flags = 0;
391 
392 	adev = xcp_mgr->adev;
393 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
394 
395 	if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
396 		mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
397 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
398 			dev_err(adev->dev,
399 				"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
400 				adev->gmc.num_mem_partitions);
401 			return -EINVAL;
402 		}
403 	} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
404 		dev_err(adev->dev,
405 			"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
406 			amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
407 		return -EINVAL;
408 	}
409 
410 	if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
411 		!adev->in_suspend)
412 		flags |= AMDGPU_XCP_OPS_KFD;
413 
414 	if (flags & AMDGPU_XCP_OPS_KFD) {
415 		ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
416 		if (ret)
417 			goto out;
418 	}
419 
420 	ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
421 	if (ret)
422 		goto unlock;
423 
424 	num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
425 	if (adev->gfx.funcs->switch_partition_mode)
426 		adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
427 						       num_xcc_per_xcp);
428 
429 	/* Init info about new xcps */
430 	*num_xcps = num_xcc / num_xcc_per_xcp;
431 	amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
432 
433 	ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
434 	if (!ret)
435 		__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
436 unlock:
437 	if (flags & AMDGPU_XCP_OPS_KFD)
438 		amdgpu_amdkfd_unlock_kfd(adev);
439 out:
440 	return ret;
441 }
442 
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)443 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
444 					  int xcc_id, uint8_t *mem_id)
445 {
446 	/* memory/spatial modes validation check is already done */
447 	*mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
448 	*mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
449 
450 	return 0;
451 }
452 
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)453 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
454 					struct amdgpu_xcp *xcp, uint8_t *mem_id)
455 {
456 	struct amdgpu_numa_info numa_info;
457 	struct amdgpu_device *adev;
458 	uint32_t xcc_mask;
459 	int r, i, xcc_id;
460 
461 	adev = xcp_mgr->adev;
462 	/* TODO: BIOS is not returning the right info now
463 	 * Check on this later
464 	 */
465 	/*
466 	if (adev->gmc.gmc_funcs->query_mem_partition_mode)
467 		mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
468 	*/
469 	if (adev->gmc.num_mem_partitions == 1) {
470 		/* Only one range */
471 		*mem_id = 0;
472 		return 0;
473 	}
474 
475 	r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
476 	if (r || !xcc_mask)
477 		return -EINVAL;
478 
479 	xcc_id = ffs(xcc_mask) - 1;
480 	if (!adev->gmc.is_app_apu)
481 		return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
482 
483 	r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
484 
485 	if (r)
486 		return r;
487 
488 	r = -EINVAL;
489 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
490 		if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
491 			*mem_id = i;
492 			r = 0;
493 			break;
494 		}
495 	}
496 
497 	return r;
498 }
499 
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)500 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
501 				     enum AMDGPU_XCP_IP_BLOCK ip_id,
502 				     struct amdgpu_xcp_ip *ip)
503 {
504 	if (!ip)
505 		return -EINVAL;
506 
507 	return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
508 }
509 
510 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
511 	.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
512 	.query_partition_mode = &aqua_vanjaram_query_partition_mode,
513 	.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
514 	.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
515 	.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
516 };
517 
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)518 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
519 {
520 	int ret;
521 
522 	if (amdgpu_sriov_vf(adev))
523 		aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
524 
525 	ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
526 				  &aqua_vanjaram_xcp_funcs);
527 	if (ret)
528 		return ret;
529 
530 	amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
531 	/* TODO: Default memory node affinity init */
532 
533 	return ret;
534 }
535 
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)536 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
537 {
538 	u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
539 	int ret, i;
540 
541 	/* generally 1 AID supports 4 instances */
542 	adev->sdma.num_inst_per_aid = 4;
543 	adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
544 
545 	adev->aid_mask = i = 1;
546 	inst_mask >>= adev->sdma.num_inst_per_aid;
547 
548 	for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
549 	     inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
550 		avail_inst = inst_mask & mask;
551 		if (avail_inst == mask || avail_inst == 0x3 ||
552 		    avail_inst == 0xc)
553 			adev->aid_mask |= (1 << i);
554 	}
555 
556 	/* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
557 	 * addressed based on logical instance ids.
558 	 */
559 	adev->vcn.harvest_config = 0;
560 	adev->vcn.num_inst_per_aid = 1;
561 	adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
562 	adev->jpeg.harvest_config = 0;
563 	adev->jpeg.num_inst_per_aid = 1;
564 	adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
565 
566 	ret = aqua_vanjaram_xcp_mgr_init(adev);
567 	if (ret)
568 		return ret;
569 
570 	amdgpu_ip_map_init(adev);
571 
572 	return 0;
573 }
574 
aqua_read_smn(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr)575 static void aqua_read_smn(struct amdgpu_device *adev,
576 			  struct amdgpu_smn_reg_data *regdata,
577 			  uint64_t smn_addr)
578 {
579 	regdata->addr = smn_addr;
580 	regdata->value = RREG32_PCIE(smn_addr);
581 }
582 
583 struct aqua_reg_list {
584 	uint64_t start_addr;
585 	uint32_t num_regs;
586 	uint32_t incrx;
587 };
588 
589 #define DW_ADDR_INCR	4
590 
aqua_read_smn_ext(struct amdgpu_device * adev,struct amdgpu_smn_reg_data * regdata,uint64_t smn_addr,int i)591 static void aqua_read_smn_ext(struct amdgpu_device *adev,
592 			      struct amdgpu_smn_reg_data *regdata,
593 			      uint64_t smn_addr, int i)
594 {
595 	regdata->addr =
596 		smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
597 	regdata->value = RREG32_PCIE_EXT(regdata->addr);
598 }
599 
600 #define smnreg_0x1A340218	0x1A340218
601 #define smnreg_0x1A3402E4	0x1A3402E4
602 #define smnreg_0x1A340294	0x1A340294
603 #define smreg_0x1A380088	0x1A380088
604 
605 #define NUM_PCIE_SMN_REGS	14
606 
607 static struct aqua_reg_list pcie_reg_addrs[] = {
608 	{ smnreg_0x1A340218, 1, 0 },
609 	{ smnreg_0x1A3402E4, 1, 0 },
610 	{ smnreg_0x1A340294, 6, DW_ADDR_INCR },
611 	{ smreg_0x1A380088, 6, DW_ADDR_INCR },
612 };
613 
aqua_vanjaram_read_pcie_state(struct amdgpu_device * adev,void * buf,size_t max_size)614 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
615 					     void *buf, size_t max_size)
616 {
617 	struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
618 	uint32_t start_addr, incrx, num_regs, szbuf;
619 	struct amdgpu_regs_pcie_v1_0 *pcie_regs;
620 	struct amdgpu_smn_reg_data *reg_data;
621 	struct pci_dev *us_pdev, *ds_pdev;
622 	int aer_cap, r, n;
623 
624 	if (!buf || !max_size)
625 		return -EINVAL;
626 
627 	pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
628 
629 	szbuf = sizeof(*pcie_reg_state) +
630 		amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
631 	/* Only one instance of pcie regs */
632 	if (max_size < szbuf)
633 		return -EOVERFLOW;
634 
635 	pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
636 						     sizeof(*pcie_reg_state));
637 	pcie_regs->inst_header.instance = 0;
638 	pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
639 	pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
640 
641 	reg_data = pcie_regs->smn_reg_values;
642 
643 	for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
644 		start_addr = pcie_reg_addrs[r].start_addr;
645 		incrx = pcie_reg_addrs[r].incrx;
646 		num_regs = pcie_reg_addrs[r].num_regs;
647 		for (n = 0; n < num_regs; n++) {
648 			aqua_read_smn(adev, reg_data, start_addr + n * incrx);
649 			++reg_data;
650 		}
651 	}
652 
653 	ds_pdev = pci_upstream_bridge(adev->pdev);
654 	us_pdev = pci_upstream_bridge(ds_pdev);
655 
656 	pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
657 				  &pcie_regs->device_status);
658 	pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
659 				  &pcie_regs->link_status);
660 
661 	aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
662 	if (aer_cap) {
663 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
664 				      &pcie_regs->pcie_corr_err_status);
665 		pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
666 				      &pcie_regs->pcie_uncorr_err_status);
667 	}
668 
669 	pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
670 			      &pcie_regs->sub_bus_number_latency);
671 
672 	pcie_reg_state->common_header.structure_size = szbuf;
673 	pcie_reg_state->common_header.format_revision = 1;
674 	pcie_reg_state->common_header.content_revision = 0;
675 	pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
676 	pcie_reg_state->common_header.num_instances = 1;
677 
678 	return pcie_reg_state->common_header.structure_size;
679 }
680 
681 #define smnreg_0x11A00050	0x11A00050
682 #define smnreg_0x11A00180	0x11A00180
683 #define smnreg_0x11A00070	0x11A00070
684 #define smnreg_0x11A00200	0x11A00200
685 #define smnreg_0x11A0020C	0x11A0020C
686 #define smnreg_0x11A00210	0x11A00210
687 #define smnreg_0x11A00108	0x11A00108
688 
689 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
690 
691 #define NUM_XGMI_SMN_REGS 25
692 
693 static struct aqua_reg_list xgmi_reg_addrs[] = {
694 	{ smnreg_0x11A00050, 1, 0 },
695 	{ smnreg_0x11A00180, 16, DW_ADDR_INCR },
696 	{ smnreg_0x11A00070, 4, DW_ADDR_INCR },
697 	{ smnreg_0x11A00200, 1, 0 },
698 	{ smnreg_0x11A0020C, 1, 0 },
699 	{ smnreg_0x11A00210, 1, 0 },
700 	{ smnreg_0x11A00108, 1, 0 },
701 };
702 
aqua_vanjaram_read_xgmi_state(struct amdgpu_device * adev,void * buf,size_t max_size)703 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
704 					     void *buf, size_t max_size)
705 {
706 	struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
707 	uint32_t start_addr, incrx, num_regs, szbuf;
708 	struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
709 	struct amdgpu_smn_reg_data *reg_data;
710 	const int max_xgmi_instances = 8;
711 	int inst = 0, i, j, r, n;
712 	const int xgmi_inst = 2;
713 	void *p;
714 
715 	if (!buf || !max_size)
716 		return -EINVAL;
717 
718 	xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
719 
720 	szbuf = sizeof(*xgmi_reg_state) +
721 		amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
722 				    NUM_XGMI_SMN_REGS);
723 	/* Only one instance of pcie regs */
724 	if (max_size < szbuf)
725 		return -EOVERFLOW;
726 
727 	p = &xgmi_reg_state->xgmi_state_regs[0];
728 	for_each_inst(i, adev->aid_mask) {
729 		for (j = 0; j < xgmi_inst; ++j) {
730 			xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
731 			xgmi_regs->inst_header.instance = inst++;
732 
733 			xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
734 			xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
735 
736 			reg_data = xgmi_regs->smn_reg_values;
737 
738 			for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
739 				start_addr = xgmi_reg_addrs[r].start_addr;
740 				incrx = xgmi_reg_addrs[r].incrx;
741 				num_regs = xgmi_reg_addrs[r].num_regs;
742 
743 				for (n = 0; n < num_regs; n++) {
744 					aqua_read_smn_ext(
745 						adev, reg_data,
746 						XGMI_LINK_REG(start_addr, j) +
747 							n * incrx,
748 						i);
749 					++reg_data;
750 				}
751 			}
752 			p = reg_data;
753 		}
754 	}
755 
756 	xgmi_reg_state->common_header.structure_size = szbuf;
757 	xgmi_reg_state->common_header.format_revision = 1;
758 	xgmi_reg_state->common_header.content_revision = 0;
759 	xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
760 	xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
761 
762 	return xgmi_reg_state->common_header.structure_size;
763 }
764 
765 #define smnreg_0x11C00070	0x11C00070
766 #define smnreg_0x11C00210	0x11C00210
767 
768 static struct aqua_reg_list wafl_reg_addrs[] = {
769 	{ smnreg_0x11C00070, 4, DW_ADDR_INCR },
770 	{ smnreg_0x11C00210, 1, 0 },
771 };
772 
773 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
774 
775 #define NUM_WAFL_SMN_REGS 5
776 
aqua_vanjaram_read_wafl_state(struct amdgpu_device * adev,void * buf,size_t max_size)777 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
778 					     void *buf, size_t max_size)
779 {
780 	struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
781 	uint32_t start_addr, incrx, num_regs, szbuf;
782 	struct amdgpu_regs_wafl_v1_0 *wafl_regs;
783 	struct amdgpu_smn_reg_data *reg_data;
784 	const int max_wafl_instances = 8;
785 	int inst = 0, i, j, r, n;
786 	const int wafl_inst = 2;
787 	void *p;
788 
789 	if (!buf || !max_size)
790 		return -EINVAL;
791 
792 	wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
793 
794 	szbuf = sizeof(*wafl_reg_state) +
795 		amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
796 				    NUM_WAFL_SMN_REGS);
797 
798 	if (max_size < szbuf)
799 		return -EOVERFLOW;
800 
801 	p = &wafl_reg_state->wafl_state_regs[0];
802 	for_each_inst(i, adev->aid_mask) {
803 		for (j = 0; j < wafl_inst; ++j) {
804 			wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
805 			wafl_regs->inst_header.instance = inst++;
806 
807 			wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
808 			wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
809 
810 			reg_data = wafl_regs->smn_reg_values;
811 
812 			for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
813 				start_addr = wafl_reg_addrs[r].start_addr;
814 				incrx = wafl_reg_addrs[r].incrx;
815 				num_regs = wafl_reg_addrs[r].num_regs;
816 				for (n = 0; n < num_regs; n++) {
817 					aqua_read_smn_ext(
818 						adev, reg_data,
819 						WAFL_LINK_REG(start_addr, j) +
820 							n * incrx,
821 						i);
822 					++reg_data;
823 				}
824 			}
825 			p = reg_data;
826 		}
827 	}
828 
829 	wafl_reg_state->common_header.structure_size = szbuf;
830 	wafl_reg_state->common_header.format_revision = 1;
831 	wafl_reg_state->common_header.content_revision = 0;
832 	wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
833 	wafl_reg_state->common_header.num_instances = max_wafl_instances;
834 
835 	return wafl_reg_state->common_header.structure_size;
836 }
837 
838 #define smnreg_0x1B311060 0x1B311060
839 #define smnreg_0x1B411060 0x1B411060
840 #define smnreg_0x1B511060 0x1B511060
841 #define smnreg_0x1B611060 0x1B611060
842 
843 #define smnreg_0x1C307120 0x1C307120
844 #define smnreg_0x1C317120 0x1C317120
845 
846 #define smnreg_0x1C320830 0x1C320830
847 #define smnreg_0x1C380830 0x1C380830
848 #define smnreg_0x1C3D0830 0x1C3D0830
849 #define smnreg_0x1C420830 0x1C420830
850 
851 #define smnreg_0x1C320100 0x1C320100
852 #define smnreg_0x1C380100 0x1C380100
853 #define smnreg_0x1C3D0100 0x1C3D0100
854 #define smnreg_0x1C420100 0x1C420100
855 
856 #define smnreg_0x1B310500 0x1B310500
857 #define smnreg_0x1C300400 0x1C300400
858 
859 #define USR_CAKE_INCR 0x11000
860 #define USR_LINK_INCR 0x100000
861 #define USR_CP_INCR 0x10000
862 
863 #define NUM_USR_SMN_REGS	20
864 
865 struct aqua_reg_list usr_reg_addrs[] = {
866 	{ smnreg_0x1B311060, 4, DW_ADDR_INCR },
867 	{ smnreg_0x1B411060, 4, DW_ADDR_INCR },
868 	{ smnreg_0x1B511060, 4, DW_ADDR_INCR },
869 	{ smnreg_0x1B611060, 4, DW_ADDR_INCR },
870 	{ smnreg_0x1C307120, 2, DW_ADDR_INCR },
871 	{ smnreg_0x1C317120, 2, DW_ADDR_INCR },
872 };
873 
874 #define NUM_USR1_SMN_REGS	46
875 struct aqua_reg_list usr1_reg_addrs[] = {
876 	{ smnreg_0x1C320830, 6, USR_CAKE_INCR },
877 	{ smnreg_0x1C380830, 5, USR_CAKE_INCR },
878 	{ smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
879 	{ smnreg_0x1C420830, 4, USR_CAKE_INCR },
880 	{ smnreg_0x1C320100, 6, USR_CAKE_INCR },
881 	{ smnreg_0x1C380100, 5, USR_CAKE_INCR },
882 	{ smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
883 	{ smnreg_0x1C420100, 4, USR_CAKE_INCR },
884 	{ smnreg_0x1B310500, 4, USR_LINK_INCR },
885 	{ smnreg_0x1C300400, 2, USR_CP_INCR },
886 };
887 
aqua_vanjaram_read_usr_state(struct amdgpu_device * adev,void * buf,size_t max_size,int reg_state)888 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
889 					    void *buf, size_t max_size,
890 					    int reg_state)
891 {
892 	uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
893 	struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
894 	struct amdgpu_regs_usr_v1_0 *usr_regs;
895 	struct amdgpu_smn_reg_data *reg_data;
896 	const int max_usr_instances = 4;
897 	struct aqua_reg_list *reg_addrs;
898 	int inst = 0, i, n, r, arr_size;
899 	void *p;
900 
901 	if (!buf || !max_size)
902 		return -EINVAL;
903 
904 	switch (reg_state) {
905 	case AMDGPU_REG_STATE_TYPE_USR:
906 		arr_size = ARRAY_SIZE(usr_reg_addrs);
907 		reg_addrs = usr_reg_addrs;
908 		num_smn = NUM_USR_SMN_REGS;
909 		break;
910 	case AMDGPU_REG_STATE_TYPE_USR_1:
911 		arr_size = ARRAY_SIZE(usr1_reg_addrs);
912 		reg_addrs = usr1_reg_addrs;
913 		num_smn = NUM_USR1_SMN_REGS;
914 		break;
915 	default:
916 		return -EINVAL;
917 	}
918 
919 	usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
920 
921 	szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
922 							     sizeof(*usr_regs),
923 							     num_smn);
924 	if (max_size < szbuf)
925 		return -EOVERFLOW;
926 
927 	p = &usr_reg_state->usr_state_regs[0];
928 	for_each_inst(i, adev->aid_mask) {
929 		usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
930 		usr_regs->inst_header.instance = inst++;
931 		usr_regs->inst_header.state = AMDGPU_INST_S_OK;
932 		usr_regs->inst_header.num_smn_regs = num_smn;
933 		reg_data = usr_regs->smn_reg_values;
934 
935 		for (r = 0; r < arr_size; r++) {
936 			start_addr = reg_addrs[r].start_addr;
937 			incrx = reg_addrs[r].incrx;
938 			num_regs = reg_addrs[r].num_regs;
939 			for (n = 0; n < num_regs; n++) {
940 				aqua_read_smn_ext(adev, reg_data,
941 						  start_addr + n * incrx, i);
942 				reg_data++;
943 			}
944 		}
945 		p = reg_data;
946 	}
947 
948 	usr_reg_state->common_header.structure_size = szbuf;
949 	usr_reg_state->common_header.format_revision = 1;
950 	usr_reg_state->common_header.content_revision = 0;
951 	usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
952 	usr_reg_state->common_header.num_instances = max_usr_instances;
953 
954 	return usr_reg_state->common_header.structure_size;
955 }
956 
aqua_vanjaram_get_reg_state(struct amdgpu_device * adev,enum amdgpu_reg_state reg_state,void * buf,size_t max_size)957 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
958 				    enum amdgpu_reg_state reg_state, void *buf,
959 				    size_t max_size)
960 {
961 	ssize_t size;
962 
963 	switch (reg_state) {
964 	case AMDGPU_REG_STATE_TYPE_PCIE:
965 		size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
966 		break;
967 	case AMDGPU_REG_STATE_TYPE_XGMI:
968 		size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
969 		break;
970 	case AMDGPU_REG_STATE_TYPE_WAFL:
971 		size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
972 		break;
973 	case AMDGPU_REG_STATE_TYPE_USR:
974 		size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
975 						    AMDGPU_REG_STATE_TYPE_USR);
976 		break;
977 	case AMDGPU_REG_STATE_TYPE_USR_1:
978 		size = aqua_vanjaram_read_usr_state(
979 			adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
980 		break;
981 	default:
982 		return -EINVAL;
983 	}
984 
985 	return size;
986 }
987