1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "soc15.h" 25 26 #include "soc15_common.h" 27 #include "amdgpu_reg_state.h" 28 #include "amdgpu_xcp.h" 29 #include "gfx_v9_4_3.h" 30 #include "gfxhub_v1_2.h" 31 #include "sdma_v4_4_2.h" 32 33 #define XCP_INST_MASK(num_inst, xcp_id) \ 34 (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) 35 36 #define AMDGPU_XCP_OPS_KFD (1 << 0) 37 38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) 39 { 40 int i; 41 42 adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START; 43 44 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START; 45 46 adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START; 47 adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; 48 adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE; 49 50 adev->doorbell_index.sdma_doorbell_range = 20; 51 for (i = 0; i < adev->sdma.num_instances; i++) 52 adev->doorbell_index.sdma_engine[i] = 53 AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START + 54 i * (adev->doorbell_index.sdma_doorbell_range >> 1); 55 56 adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH; 57 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START; 58 59 adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP; 60 adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP; 61 62 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; 63 } 64 65 static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) 66 { 67 return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); 68 } 69 70 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, 71 uint32_t inst_idx, struct amdgpu_ring *ring) 72 { 73 int xcp_id; 74 enum AMDGPU_XCP_IP_BLOCK ip_blk; 75 uint32_t inst_mask; 76 77 ring->xcp_id = AMDGPU_XCP_NO_PARTITION; 78 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 79 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; 80 if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || 81 (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) 82 return; 83 84 inst_mask = 1 << inst_idx; 85 86 switch (ring->funcs->type) { 87 case AMDGPU_HW_IP_GFX: 88 case AMDGPU_RING_TYPE_COMPUTE: 89 case AMDGPU_RING_TYPE_KIQ: 90 ip_blk = AMDGPU_XCP_GFX; 91 break; 92 case AMDGPU_RING_TYPE_SDMA: 93 ip_blk = AMDGPU_XCP_SDMA; 94 break; 95 case AMDGPU_RING_TYPE_VCN_ENC: 96 case AMDGPU_RING_TYPE_VCN_JPEG: 97 ip_blk = AMDGPU_XCP_VCN; 98 break; 99 default: 100 DRM_ERROR("Not support ring type %d!", ring->funcs->type); 101 return; 102 } 103 104 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { 105 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { 106 ring->xcp_id = xcp_id; 107 dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, 108 ring->xcp_id); 109 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 110 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; 111 break; 112 } 113 } 114 } 115 116 static void aqua_vanjaram_xcp_gpu_sched_update( 117 struct amdgpu_device *adev, 118 struct amdgpu_ring *ring, 119 unsigned int sel_xcp_id) 120 { 121 unsigned int *num_gpu_sched; 122 123 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] 124 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; 125 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] 126 .sched[(*num_gpu_sched)++] = &ring->sched; 127 DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name, 128 sel_xcp_id, ring->funcs->type, 129 ring->hw_prio, *num_gpu_sched); 130 } 131 132 static int aqua_vanjaram_xcp_sched_list_update( 133 struct amdgpu_device *adev) 134 { 135 struct amdgpu_ring *ring; 136 int i; 137 138 for (i = 0; i < MAX_XCP; i++) { 139 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); 140 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); 141 } 142 143 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) 144 return 0; 145 146 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 147 ring = adev->rings[i]; 148 if (!ring || !ring->sched.ready || ring->no_scheduler) 149 continue; 150 151 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); 152 153 /* VCN may be shared by two partitions under CPX MODE in certain 154 * configs. 155 */ 156 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || 157 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && 158 aqua_vanjaram_xcp_vcn_shared(adev)) 159 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); 160 } 161 162 return 0; 163 } 164 165 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) 166 { 167 int i; 168 169 for (i = 0; i < adev->num_rings; i++) { 170 struct amdgpu_ring *ring = adev->rings[i]; 171 172 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || 173 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 174 aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring); 175 else 176 aqua_vanjaram_set_xcp_id(adev, ring->me, ring); 177 } 178 179 return aqua_vanjaram_xcp_sched_list_update(adev); 180 } 181 182 static int aqua_vanjaram_select_scheds( 183 struct amdgpu_device *adev, 184 u32 hw_ip, 185 u32 hw_prio, 186 struct amdgpu_fpriv *fpriv, 187 unsigned int *num_scheds, 188 struct drm_gpu_scheduler ***scheds) 189 { 190 u32 sel_xcp_id; 191 int i; 192 193 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { 194 u32 least_ref_cnt = ~0; 195 196 fpriv->xcp_id = 0; 197 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { 198 u32 total_ref_cnt; 199 200 total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); 201 if (total_ref_cnt < least_ref_cnt) { 202 fpriv->xcp_id = i; 203 least_ref_cnt = total_ref_cnt; 204 } 205 } 206 } 207 sel_xcp_id = fpriv->xcp_id; 208 209 if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { 210 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; 211 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; 212 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); 213 DRM_DEBUG("Selected partition #%d", sel_xcp_id); 214 } else { 215 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id); 216 return -ENOENT; 217 } 218 219 return 0; 220 } 221 222 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, 223 enum amd_hw_ip_block_type block, 224 int8_t inst) 225 { 226 int8_t dev_inst; 227 228 switch (block) { 229 case GC_HWIP: 230 case SDMA0_HWIP: 231 /* Both JPEG and VCN as JPEG is only alias of VCN */ 232 case VCN_HWIP: 233 dev_inst = adev->ip_map.dev_inst[block][inst]; 234 break; 235 default: 236 /* For rest of the IPs, no look up required. 237 * Assume 'logical instance == physical instance' for all configs. */ 238 dev_inst = inst; 239 break; 240 } 241 242 return dev_inst; 243 } 244 245 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev, 246 enum amd_hw_ip_block_type block, 247 uint32_t mask) 248 { 249 uint32_t dev_mask = 0; 250 int8_t log_inst, dev_inst; 251 252 while (mask) { 253 log_inst = ffs(mask) - 1; 254 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst); 255 dev_mask |= (1 << dev_inst); 256 mask &= ~(1 << log_inst); 257 } 258 259 return dev_mask; 260 } 261 262 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, 263 enum amd_hw_ip_block_type ip_block, 264 uint32_t inst_mask) 265 { 266 int l = 0, i; 267 268 while (inst_mask) { 269 i = ffs(inst_mask) - 1; 270 adev->ip_map.dev_inst[ip_block][l++] = i; 271 inst_mask &= ~(1 << i); 272 } 273 for (; l < HWIP_MAX_INSTANCE; l++) 274 adev->ip_map.dev_inst[ip_block][l] = -1; 275 } 276 277 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) 278 { 279 u32 ip_map[][2] = { 280 { GC_HWIP, adev->gfx.xcc_mask }, 281 { SDMA0_HWIP, adev->sdma.sdma_mask }, 282 { VCN_HWIP, adev->vcn.inst_mask }, 283 }; 284 int i; 285 286 for (i = 0; i < ARRAY_SIZE(ip_map); ++i) 287 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); 288 289 adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; 290 adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask; 291 } 292 293 /* Fixed pattern for smn addressing on different AIDs: 294 * bit[34]: indicate cross AID access 295 * bit[33:32]: indicate target AID id 296 * AID id range is 0 ~ 3 as maximum AID number is 4. 297 */ 298 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) 299 { 300 u64 ext_offset; 301 302 /* local routing and bit[34:32] will be zeros */ 303 if (ext_id == 0) 304 return 0; 305 306 /* Initiated from host, accessing to all non-zero aids are cross traffic */ 307 ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34); 308 309 return ext_offset; 310 } 311 312 static enum amdgpu_gfx_partition 313 __aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr) 314 { 315 struct amdgpu_device *adev = xcp_mgr->adev; 316 int num_xcc, num_xcc_per_xcp = 0, mode = 0; 317 318 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); 319 if (adev->gfx.funcs->get_xccs_per_xcp) 320 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev); 321 if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0)) 322 mode = num_xcc / num_xcc_per_xcp; 323 324 if (num_xcc_per_xcp == 1) 325 return AMDGPU_CPX_PARTITION_MODE; 326 327 switch (mode) { 328 case 1: 329 return AMDGPU_SPX_PARTITION_MODE; 330 case 2: 331 return AMDGPU_DPX_PARTITION_MODE; 332 case 3: 333 return AMDGPU_TPX_PARTITION_MODE; 334 case 4: 335 return AMDGPU_QPX_PARTITION_MODE; 336 default: 337 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; 338 } 339 340 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; 341 } 342 343 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) 344 { 345 enum amdgpu_gfx_partition derv_mode, 346 mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; 347 struct amdgpu_device *adev = xcp_mgr->adev; 348 349 derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr); 350 351 if (amdgpu_sriov_vf(adev)) 352 return derv_mode; 353 354 if (adev->nbio.funcs->get_compute_partition_mode) { 355 mode = adev->nbio.funcs->get_compute_partition_mode(adev); 356 if (mode != derv_mode) { 357 dev_warn( 358 adev->dev, 359 "Mismatch in compute partition mode - reported : %d derived : %d", 360 mode, derv_mode); 361 if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 362 amdgpu_device_bus_status_check(adev); 363 } 364 } 365 366 return mode; 367 } 368 369 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) 370 { 371 int num_xcc, num_xcc_per_xcp = 0; 372 373 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); 374 375 switch (mode) { 376 case AMDGPU_SPX_PARTITION_MODE: 377 num_xcc_per_xcp = num_xcc; 378 break; 379 case AMDGPU_DPX_PARTITION_MODE: 380 num_xcc_per_xcp = num_xcc / 2; 381 break; 382 case AMDGPU_TPX_PARTITION_MODE: 383 num_xcc_per_xcp = num_xcc / 3; 384 break; 385 case AMDGPU_QPX_PARTITION_MODE: 386 num_xcc_per_xcp = num_xcc / 4; 387 break; 388 case AMDGPU_CPX_PARTITION_MODE: 389 num_xcc_per_xcp = 1; 390 break; 391 } 392 393 return num_xcc_per_xcp; 394 } 395 396 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, 397 enum AMDGPU_XCP_IP_BLOCK ip_id, 398 struct amdgpu_xcp_ip *ip) 399 { 400 struct amdgpu_device *adev = xcp_mgr->adev; 401 int num_sdma, num_vcn, num_shared_vcn, num_xcp; 402 int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; 403 404 num_sdma = adev->sdma.num_instances; 405 num_vcn = adev->vcn.num_vcn_inst; 406 num_shared_vcn = 1; 407 408 num_xcc_xcp = adev->gfx.num_xcc_per_xcp; 409 num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp; 410 411 switch (xcp_mgr->mode) { 412 case AMDGPU_SPX_PARTITION_MODE: 413 case AMDGPU_DPX_PARTITION_MODE: 414 case AMDGPU_TPX_PARTITION_MODE: 415 case AMDGPU_QPX_PARTITION_MODE: 416 case AMDGPU_CPX_PARTITION_MODE: 417 num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp); 418 num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp); 419 break; 420 default: 421 return -EINVAL; 422 } 423 424 if (num_vcn && num_xcp > num_vcn) 425 num_shared_vcn = num_xcp / num_vcn; 426 427 switch (ip_id) { 428 case AMDGPU_XCP_GFXHUB: 429 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); 430 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs; 431 break; 432 case AMDGPU_XCP_GFX: 433 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); 434 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs; 435 break; 436 case AMDGPU_XCP_SDMA: 437 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id); 438 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs; 439 break; 440 case AMDGPU_XCP_VCN: 441 ip->inst_mask = 442 XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn); 443 /* TODO : Assign IP funcs */ 444 break; 445 default: 446 return -EINVAL; 447 } 448 449 ip->ip_id = ip_id; 450 451 return 0; 452 } 453 454 static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, 455 int px_mode, int *num_xcp, 456 uint16_t *nps_modes) 457 { 458 struct amdgpu_device *adev = xcp_mgr->adev; 459 460 if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode))) 461 return -EINVAL; 462 463 switch (px_mode) { 464 case AMDGPU_SPX_PARTITION_MODE: 465 *num_xcp = 1; 466 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); 467 break; 468 case AMDGPU_DPX_PARTITION_MODE: 469 *num_xcp = 2; 470 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | 471 BIT(AMDGPU_NPS2_PARTITION_MODE); 472 break; 473 case AMDGPU_TPX_PARTITION_MODE: 474 *num_xcp = 3; 475 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | 476 BIT(AMDGPU_NPS4_PARTITION_MODE); 477 break; 478 case AMDGPU_QPX_PARTITION_MODE: 479 *num_xcp = 4; 480 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | 481 BIT(AMDGPU_NPS4_PARTITION_MODE); 482 break; 483 case AMDGPU_CPX_PARTITION_MODE: 484 *num_xcp = NUM_XCC(adev->gfx.xcc_mask); 485 *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | 486 BIT(AMDGPU_NPS4_PARTITION_MODE); 487 if (amdgpu_sriov_vf(adev)) 488 *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); 489 break; 490 default: 491 return -EINVAL; 492 } 493 494 return 0; 495 } 496 497 static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, 498 int mode, 499 struct amdgpu_xcp_cfg *xcp_cfg) 500 { 501 struct amdgpu_device *adev = xcp_mgr->adev; 502 int max_res[AMDGPU_XCP_RES_MAX] = {}; 503 bool res_lt_xcp; 504 int num_xcp, i, r; 505 u16 nps_modes; 506 507 if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) 508 return -EINVAL; 509 510 max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); 511 max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; 512 max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; 513 max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; 514 515 r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes); 516 if (r) 517 return r; 518 519 xcp_cfg->compatible_nps_modes = 520 (adev->gmc.supported_nps_modes & nps_modes); 521 xcp_cfg->num_res = ARRAY_SIZE(max_res); 522 523 for (i = 0; i < xcp_cfg->num_res; i++) { 524 res_lt_xcp = max_res[i] < num_xcp; 525 xcp_cfg->xcp_res[i].id = i; 526 xcp_cfg->xcp_res[i].num_inst = 527 res_lt_xcp ? 1 : max_res[i] / num_xcp; 528 xcp_cfg->xcp_res[i].num_inst = 529 i == AMDGPU_XCP_RES_JPEG ? 530 xcp_cfg->xcp_res[i].num_inst * 531 adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst; 532 xcp_cfg->xcp_res[i].num_shared = 533 res_lt_xcp ? num_xcp / max_res[i] : 1; 534 } 535 536 return 0; 537 } 538 539 static enum amdgpu_gfx_partition 540 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) 541 { 542 struct amdgpu_device *adev = xcp_mgr->adev; 543 int num_xcc; 544 545 num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); 546 547 if (adev->gmc.num_mem_partitions == 1) 548 return AMDGPU_SPX_PARTITION_MODE; 549 550 if (adev->gmc.num_mem_partitions == num_xcc) 551 return AMDGPU_CPX_PARTITION_MODE; 552 553 if (adev->gmc.num_mem_partitions == num_xcc / 2) 554 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE : 555 AMDGPU_CPX_PARTITION_MODE; 556 557 if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU)) 558 return AMDGPU_DPX_PARTITION_MODE; 559 560 return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; 561 } 562 563 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, 564 enum amdgpu_gfx_partition mode) 565 { 566 struct amdgpu_device *adev = xcp_mgr->adev; 567 int num_xcc, num_xccs_per_xcp, r; 568 int num_xcp, nps_mode; 569 u16 supp_nps_modes; 570 bool comp_mode; 571 572 nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 573 r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, 574 &supp_nps_modes); 575 if (r) 576 return false; 577 578 comp_mode = !!(BIT(nps_mode) & supp_nps_modes); 579 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 580 switch (mode) { 581 case AMDGPU_SPX_PARTITION_MODE: 582 return comp_mode && num_xcc > 0; 583 case AMDGPU_DPX_PARTITION_MODE: 584 return comp_mode && (num_xcc % 4) == 0; 585 case AMDGPU_TPX_PARTITION_MODE: 586 return comp_mode && ((num_xcc % 3) == 0); 587 case AMDGPU_QPX_PARTITION_MODE: 588 num_xccs_per_xcp = num_xcc / 4; 589 return comp_mode && (num_xccs_per_xcp >= 2); 590 case AMDGPU_CPX_PARTITION_MODE: 591 return comp_mode && (num_xcc > 1); 592 default: 593 return false; 594 } 595 596 return false; 597 } 598 599 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) 600 { 601 /* TODO: 602 * Stop user queues and threads, and make sure GPU is empty of work. 603 */ 604 605 if (flags & AMDGPU_XCP_OPS_KFD) 606 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); 607 608 return 0; 609 } 610 611 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) 612 { 613 int ret = 0; 614 615 if (flags & AMDGPU_XCP_OPS_KFD) { 616 amdgpu_amdkfd_device_probe(xcp_mgr->adev); 617 amdgpu_amdkfd_device_init(xcp_mgr->adev); 618 /* If KFD init failed, return failure */ 619 if (!xcp_mgr->adev->kfd.init_complete) 620 ret = -EIO; 621 } 622 623 return ret; 624 } 625 626 static void 627 __aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) 628 { 629 struct amdgpu_device *adev = xcp_mgr->adev; 630 631 xcp_mgr->supp_xcp_modes = 0; 632 633 switch (NUM_XCC(adev->gfx.xcc_mask)) { 634 case 8: 635 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | 636 BIT(AMDGPU_DPX_PARTITION_MODE) | 637 BIT(AMDGPU_QPX_PARTITION_MODE) | 638 BIT(AMDGPU_CPX_PARTITION_MODE); 639 break; 640 case 6: 641 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | 642 BIT(AMDGPU_TPX_PARTITION_MODE) | 643 BIT(AMDGPU_CPX_PARTITION_MODE); 644 break; 645 case 4: 646 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | 647 BIT(AMDGPU_DPX_PARTITION_MODE) | 648 BIT(AMDGPU_CPX_PARTITION_MODE); 649 break; 650 /* this seems only existing in emulation phase */ 651 case 2: 652 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | 653 BIT(AMDGPU_CPX_PARTITION_MODE); 654 break; 655 case 1: 656 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | 657 BIT(AMDGPU_CPX_PARTITION_MODE); 658 break; 659 660 default: 661 break; 662 } 663 } 664 665 static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) 666 { 667 int mode; 668 669 xcp_mgr->avail_xcp_modes = 0; 670 671 for_each_inst(mode, xcp_mgr->supp_xcp_modes) { 672 if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) 673 xcp_mgr->avail_xcp_modes |= BIT(mode); 674 } 675 } 676 677 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, 678 int mode, int *num_xcps) 679 { 680 int num_xcc_per_xcp, num_xcc, ret; 681 struct amdgpu_device *adev; 682 u32 flags = 0; 683 684 adev = xcp_mgr->adev; 685 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 686 687 if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) { 688 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); 689 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) { 690 dev_err(adev->dev, 691 "Invalid config, no compatible compute partition mode found, available memory partitions: %d", 692 adev->gmc.num_mem_partitions); 693 return -EINVAL; 694 } 695 } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) { 696 dev_err(adev->dev, 697 "Invalid compute partition mode requested, requested: %s, available memory partitions: %d", 698 amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions); 699 return -EINVAL; 700 } 701 702 if (adev->kfd.init_complete && !amdgpu_in_reset(adev)) 703 flags |= AMDGPU_XCP_OPS_KFD; 704 705 if (flags & AMDGPU_XCP_OPS_KFD) { 706 ret = amdgpu_amdkfd_check_and_lock_kfd(adev); 707 if (ret) 708 goto out; 709 } 710 711 ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); 712 if (ret) 713 goto unlock; 714 715 num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); 716 if (adev->gfx.funcs->switch_partition_mode) 717 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev, 718 num_xcc_per_xcp); 719 720 /* Init info about new xcps */ 721 *num_xcps = num_xcc / num_xcc_per_xcp; 722 amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); 723 724 ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); 725 if (!ret) 726 __aqua_vanjaram_update_available_partition_mode(xcp_mgr); 727 unlock: 728 if (flags & AMDGPU_XCP_OPS_KFD) 729 amdgpu_amdkfd_unlock_kfd(adev); 730 out: 731 return ret; 732 } 733 734 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev, 735 int xcc_id, uint8_t *mem_id) 736 { 737 /* memory/spatial modes validation check is already done */ 738 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp; 739 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition; 740 741 return 0; 742 } 743 744 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr, 745 struct amdgpu_xcp *xcp, uint8_t *mem_id) 746 { 747 struct amdgpu_numa_info numa_info; 748 struct amdgpu_device *adev; 749 uint32_t xcc_mask; 750 int r, i, xcc_id; 751 752 adev = xcp_mgr->adev; 753 /* TODO: BIOS is not returning the right info now 754 * Check on this later 755 */ 756 /* 757 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 758 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 759 */ 760 if (adev->gmc.num_mem_partitions == 1) { 761 /* Only one range */ 762 *mem_id = 0; 763 return 0; 764 } 765 766 r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask); 767 if (r || !xcc_mask) 768 return -EINVAL; 769 770 xcc_id = ffs(xcc_mask) - 1; 771 if (!adev->gmc.is_app_apu) 772 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id); 773 774 r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); 775 776 if (r) 777 return r; 778 779 r = -EINVAL; 780 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { 781 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) { 782 *mem_id = i; 783 r = 0; 784 break; 785 } 786 } 787 788 return r; 789 } 790 791 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, 792 enum AMDGPU_XCP_IP_BLOCK ip_id, 793 struct amdgpu_xcp_ip *ip) 794 { 795 if (!ip) 796 return -EINVAL; 797 798 return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip); 799 } 800 801 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { 802 .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, 803 .query_partition_mode = &aqua_vanjaram_query_partition_mode, 804 .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, 805 .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, 806 .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, 807 .select_scheds = &aqua_vanjaram_select_scheds, 808 .update_partition_sched_list = 809 &aqua_vanjaram_update_partition_sched_list 810 }; 811 812 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) 813 { 814 int ret; 815 816 if (amdgpu_sriov_vf(adev)) 817 aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL; 818 819 ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1, 820 &aqua_vanjaram_xcp_funcs); 821 if (ret) 822 return ret; 823 824 __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); 825 /* TODO: Default memory node affinity init */ 826 827 return ret; 828 } 829 830 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) 831 { 832 u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask; 833 int ret, i; 834 835 /* generally 1 AID supports 4 instances */ 836 adev->sdma.num_inst_per_aid = 4; 837 adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); 838 839 adev->aid_mask = i = 1; 840 inst_mask >>= adev->sdma.num_inst_per_aid; 841 842 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; 843 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { 844 avail_inst = inst_mask & mask; 845 if (avail_inst == mask || avail_inst == 0x3 || 846 avail_inst == 0xc) 847 adev->aid_mask |= (1 << i); 848 } 849 850 /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be 851 * addressed based on logical instance ids. 852 */ 853 adev->vcn.harvest_config = 0; 854 adev->vcn.num_inst_per_aid = 1; 855 adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask); 856 adev->jpeg.harvest_config = 0; 857 adev->jpeg.num_inst_per_aid = 1; 858 adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask); 859 860 ret = aqua_vanjaram_xcp_mgr_init(adev); 861 if (ret) 862 return ret; 863 864 aqua_vanjaram_ip_map_init(adev); 865 866 return 0; 867 } 868 869 static void aqua_read_smn(struct amdgpu_device *adev, 870 struct amdgpu_smn_reg_data *regdata, 871 uint64_t smn_addr) 872 { 873 regdata->addr = smn_addr; 874 regdata->value = RREG32_PCIE(smn_addr); 875 } 876 877 struct aqua_reg_list { 878 uint64_t start_addr; 879 uint32_t num_regs; 880 uint32_t incrx; 881 }; 882 883 #define DW_ADDR_INCR 4 884 885 static void aqua_read_smn_ext(struct amdgpu_device *adev, 886 struct amdgpu_smn_reg_data *regdata, 887 uint64_t smn_addr, int i) 888 { 889 regdata->addr = 890 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i); 891 regdata->value = RREG32_PCIE_EXT(regdata->addr); 892 } 893 894 #define smnreg_0x1A340218 0x1A340218 895 #define smnreg_0x1A3402E4 0x1A3402E4 896 #define smnreg_0x1A340294 0x1A340294 897 #define smreg_0x1A380088 0x1A380088 898 899 #define NUM_PCIE_SMN_REGS 14 900 901 static struct aqua_reg_list pcie_reg_addrs[] = { 902 { smnreg_0x1A340218, 1, 0 }, 903 { smnreg_0x1A3402E4, 1, 0 }, 904 { smnreg_0x1A340294, 6, DW_ADDR_INCR }, 905 { smreg_0x1A380088, 6, DW_ADDR_INCR }, 906 }; 907 908 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev, 909 void *buf, size_t max_size) 910 { 911 struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state; 912 uint32_t start_addr, incrx, num_regs, szbuf; 913 struct amdgpu_regs_pcie_v1_0 *pcie_regs; 914 struct amdgpu_smn_reg_data *reg_data; 915 struct pci_dev *us_pdev, *ds_pdev; 916 int aer_cap, r, n; 917 918 if (!buf || !max_size) 919 return -EINVAL; 920 921 pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf; 922 923 szbuf = sizeof(*pcie_reg_state) + 924 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS); 925 /* Only one instance of pcie regs */ 926 if (max_size < szbuf) 927 return -EOVERFLOW; 928 929 pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf + 930 sizeof(*pcie_reg_state)); 931 pcie_regs->inst_header.instance = 0; 932 pcie_regs->inst_header.state = AMDGPU_INST_S_OK; 933 pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS; 934 935 reg_data = pcie_regs->smn_reg_values; 936 937 for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) { 938 start_addr = pcie_reg_addrs[r].start_addr; 939 incrx = pcie_reg_addrs[r].incrx; 940 num_regs = pcie_reg_addrs[r].num_regs; 941 for (n = 0; n < num_regs; n++) { 942 aqua_read_smn(adev, reg_data, start_addr + n * incrx); 943 ++reg_data; 944 } 945 } 946 947 ds_pdev = pci_upstream_bridge(adev->pdev); 948 us_pdev = pci_upstream_bridge(ds_pdev); 949 950 pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA, 951 &pcie_regs->device_status); 952 pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA, 953 &pcie_regs->link_status); 954 955 aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR); 956 if (aer_cap) { 957 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS, 958 &pcie_regs->pcie_corr_err_status); 959 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS, 960 &pcie_regs->pcie_uncorr_err_status); 961 } 962 963 pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS, 964 &pcie_regs->sub_bus_number_latency); 965 966 pcie_reg_state->common_header.structure_size = szbuf; 967 pcie_reg_state->common_header.format_revision = 1; 968 pcie_reg_state->common_header.content_revision = 0; 969 pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE; 970 pcie_reg_state->common_header.num_instances = 1; 971 972 return pcie_reg_state->common_header.structure_size; 973 } 974 975 #define smnreg_0x11A00050 0x11A00050 976 #define smnreg_0x11A00180 0x11A00180 977 #define smnreg_0x11A00070 0x11A00070 978 #define smnreg_0x11A00200 0x11A00200 979 #define smnreg_0x11A0020C 0x11A0020C 980 #define smnreg_0x11A00210 0x11A00210 981 #define smnreg_0x11A00108 0x11A00108 982 983 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) 984 985 #define NUM_XGMI_SMN_REGS 25 986 987 static struct aqua_reg_list xgmi_reg_addrs[] = { 988 { smnreg_0x11A00050, 1, 0 }, 989 { smnreg_0x11A00180, 16, DW_ADDR_INCR }, 990 { smnreg_0x11A00070, 4, DW_ADDR_INCR }, 991 { smnreg_0x11A00200, 1, 0 }, 992 { smnreg_0x11A0020C, 1, 0 }, 993 { smnreg_0x11A00210, 1, 0 }, 994 { smnreg_0x11A00108, 1, 0 }, 995 }; 996 997 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev, 998 void *buf, size_t max_size) 999 { 1000 struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state; 1001 uint32_t start_addr, incrx, num_regs, szbuf; 1002 struct amdgpu_regs_xgmi_v1_0 *xgmi_regs; 1003 struct amdgpu_smn_reg_data *reg_data; 1004 const int max_xgmi_instances = 8; 1005 int inst = 0, i, j, r, n; 1006 const int xgmi_inst = 2; 1007 void *p; 1008 1009 if (!buf || !max_size) 1010 return -EINVAL; 1011 1012 xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf; 1013 1014 szbuf = sizeof(*xgmi_reg_state) + 1015 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs), 1016 NUM_XGMI_SMN_REGS); 1017 /* Only one instance of pcie regs */ 1018 if (max_size < szbuf) 1019 return -EOVERFLOW; 1020 1021 p = &xgmi_reg_state->xgmi_state_regs[0]; 1022 for_each_inst(i, adev->aid_mask) { 1023 for (j = 0; j < xgmi_inst; ++j) { 1024 xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p; 1025 xgmi_regs->inst_header.instance = inst++; 1026 1027 xgmi_regs->inst_header.state = AMDGPU_INST_S_OK; 1028 xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS; 1029 1030 reg_data = xgmi_regs->smn_reg_values; 1031 1032 for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) { 1033 start_addr = xgmi_reg_addrs[r].start_addr; 1034 incrx = xgmi_reg_addrs[r].incrx; 1035 num_regs = xgmi_reg_addrs[r].num_regs; 1036 1037 for (n = 0; n < num_regs; n++) { 1038 aqua_read_smn_ext( 1039 adev, reg_data, 1040 XGMI_LINK_REG(start_addr, j) + 1041 n * incrx, 1042 i); 1043 ++reg_data; 1044 } 1045 } 1046 p = reg_data; 1047 } 1048 } 1049 1050 xgmi_reg_state->common_header.structure_size = szbuf; 1051 xgmi_reg_state->common_header.format_revision = 1; 1052 xgmi_reg_state->common_header.content_revision = 0; 1053 xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI; 1054 xgmi_reg_state->common_header.num_instances = max_xgmi_instances; 1055 1056 return xgmi_reg_state->common_header.structure_size; 1057 } 1058 1059 #define smnreg_0x11C00070 0x11C00070 1060 #define smnreg_0x11C00210 0x11C00210 1061 1062 static struct aqua_reg_list wafl_reg_addrs[] = { 1063 { smnreg_0x11C00070, 4, DW_ADDR_INCR }, 1064 { smnreg_0x11C00210, 1, 0 }, 1065 }; 1066 1067 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) 1068 1069 #define NUM_WAFL_SMN_REGS 5 1070 1071 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev, 1072 void *buf, size_t max_size) 1073 { 1074 struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state; 1075 uint32_t start_addr, incrx, num_regs, szbuf; 1076 struct amdgpu_regs_wafl_v1_0 *wafl_regs; 1077 struct amdgpu_smn_reg_data *reg_data; 1078 const int max_wafl_instances = 8; 1079 int inst = 0, i, j, r, n; 1080 const int wafl_inst = 2; 1081 void *p; 1082 1083 if (!buf || !max_size) 1084 return -EINVAL; 1085 1086 wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf; 1087 1088 szbuf = sizeof(*wafl_reg_state) + 1089 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs), 1090 NUM_WAFL_SMN_REGS); 1091 1092 if (max_size < szbuf) 1093 return -EOVERFLOW; 1094 1095 p = &wafl_reg_state->wafl_state_regs[0]; 1096 for_each_inst(i, adev->aid_mask) { 1097 for (j = 0; j < wafl_inst; ++j) { 1098 wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p; 1099 wafl_regs->inst_header.instance = inst++; 1100 1101 wafl_regs->inst_header.state = AMDGPU_INST_S_OK; 1102 wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS; 1103 1104 reg_data = wafl_regs->smn_reg_values; 1105 1106 for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) { 1107 start_addr = wafl_reg_addrs[r].start_addr; 1108 incrx = wafl_reg_addrs[r].incrx; 1109 num_regs = wafl_reg_addrs[r].num_regs; 1110 for (n = 0; n < num_regs; n++) { 1111 aqua_read_smn_ext( 1112 adev, reg_data, 1113 WAFL_LINK_REG(start_addr, j) + 1114 n * incrx, 1115 i); 1116 ++reg_data; 1117 } 1118 } 1119 p = reg_data; 1120 } 1121 } 1122 1123 wafl_reg_state->common_header.structure_size = szbuf; 1124 wafl_reg_state->common_header.format_revision = 1; 1125 wafl_reg_state->common_header.content_revision = 0; 1126 wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL; 1127 wafl_reg_state->common_header.num_instances = max_wafl_instances; 1128 1129 return wafl_reg_state->common_header.structure_size; 1130 } 1131 1132 #define smnreg_0x1B311060 0x1B311060 1133 #define smnreg_0x1B411060 0x1B411060 1134 #define smnreg_0x1B511060 0x1B511060 1135 #define smnreg_0x1B611060 0x1B611060 1136 1137 #define smnreg_0x1C307120 0x1C307120 1138 #define smnreg_0x1C317120 0x1C317120 1139 1140 #define smnreg_0x1C320830 0x1C320830 1141 #define smnreg_0x1C380830 0x1C380830 1142 #define smnreg_0x1C3D0830 0x1C3D0830 1143 #define smnreg_0x1C420830 0x1C420830 1144 1145 #define smnreg_0x1C320100 0x1C320100 1146 #define smnreg_0x1C380100 0x1C380100 1147 #define smnreg_0x1C3D0100 0x1C3D0100 1148 #define smnreg_0x1C420100 0x1C420100 1149 1150 #define smnreg_0x1B310500 0x1B310500 1151 #define smnreg_0x1C300400 0x1C300400 1152 1153 #define USR_CAKE_INCR 0x11000 1154 #define USR_LINK_INCR 0x100000 1155 #define USR_CP_INCR 0x10000 1156 1157 #define NUM_USR_SMN_REGS 20 1158 1159 struct aqua_reg_list usr_reg_addrs[] = { 1160 { smnreg_0x1B311060, 4, DW_ADDR_INCR }, 1161 { smnreg_0x1B411060, 4, DW_ADDR_INCR }, 1162 { smnreg_0x1B511060, 4, DW_ADDR_INCR }, 1163 { smnreg_0x1B611060, 4, DW_ADDR_INCR }, 1164 { smnreg_0x1C307120, 2, DW_ADDR_INCR }, 1165 { smnreg_0x1C317120, 2, DW_ADDR_INCR }, 1166 }; 1167 1168 #define NUM_USR1_SMN_REGS 46 1169 struct aqua_reg_list usr1_reg_addrs[] = { 1170 { smnreg_0x1C320830, 6, USR_CAKE_INCR }, 1171 { smnreg_0x1C380830, 5, USR_CAKE_INCR }, 1172 { smnreg_0x1C3D0830, 5, USR_CAKE_INCR }, 1173 { smnreg_0x1C420830, 4, USR_CAKE_INCR }, 1174 { smnreg_0x1C320100, 6, USR_CAKE_INCR }, 1175 { smnreg_0x1C380100, 5, USR_CAKE_INCR }, 1176 { smnreg_0x1C3D0100, 5, USR_CAKE_INCR }, 1177 { smnreg_0x1C420100, 4, USR_CAKE_INCR }, 1178 { smnreg_0x1B310500, 4, USR_LINK_INCR }, 1179 { smnreg_0x1C300400, 2, USR_CP_INCR }, 1180 }; 1181 1182 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev, 1183 void *buf, size_t max_size, 1184 int reg_state) 1185 { 1186 uint32_t start_addr, incrx, num_regs, szbuf, num_smn; 1187 struct amdgpu_reg_state_usr_v1_0 *usr_reg_state; 1188 struct amdgpu_regs_usr_v1_0 *usr_regs; 1189 struct amdgpu_smn_reg_data *reg_data; 1190 const int max_usr_instances = 4; 1191 struct aqua_reg_list *reg_addrs; 1192 int inst = 0, i, n, r, arr_size; 1193 void *p; 1194 1195 if (!buf || !max_size) 1196 return -EINVAL; 1197 1198 switch (reg_state) { 1199 case AMDGPU_REG_STATE_TYPE_USR: 1200 arr_size = ARRAY_SIZE(usr_reg_addrs); 1201 reg_addrs = usr_reg_addrs; 1202 num_smn = NUM_USR_SMN_REGS; 1203 break; 1204 case AMDGPU_REG_STATE_TYPE_USR_1: 1205 arr_size = ARRAY_SIZE(usr1_reg_addrs); 1206 reg_addrs = usr1_reg_addrs; 1207 num_smn = NUM_USR1_SMN_REGS; 1208 break; 1209 default: 1210 return -EINVAL; 1211 } 1212 1213 usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf; 1214 1215 szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances, 1216 sizeof(*usr_regs), 1217 num_smn); 1218 if (max_size < szbuf) 1219 return -EOVERFLOW; 1220 1221 p = &usr_reg_state->usr_state_regs[0]; 1222 for_each_inst(i, adev->aid_mask) { 1223 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p; 1224 usr_regs->inst_header.instance = inst++; 1225 usr_regs->inst_header.state = AMDGPU_INST_S_OK; 1226 usr_regs->inst_header.num_smn_regs = num_smn; 1227 reg_data = usr_regs->smn_reg_values; 1228 1229 for (r = 0; r < arr_size; r++) { 1230 start_addr = reg_addrs[r].start_addr; 1231 incrx = reg_addrs[r].incrx; 1232 num_regs = reg_addrs[r].num_regs; 1233 for (n = 0; n < num_regs; n++) { 1234 aqua_read_smn_ext(adev, reg_data, 1235 start_addr + n * incrx, i); 1236 reg_data++; 1237 } 1238 } 1239 p = reg_data; 1240 } 1241 1242 usr_reg_state->common_header.structure_size = szbuf; 1243 usr_reg_state->common_header.format_revision = 1; 1244 usr_reg_state->common_header.content_revision = 0; 1245 usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR; 1246 usr_reg_state->common_header.num_instances = max_usr_instances; 1247 1248 return usr_reg_state->common_header.structure_size; 1249 } 1250 1251 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, 1252 enum amdgpu_reg_state reg_state, void *buf, 1253 size_t max_size) 1254 { 1255 ssize_t size; 1256 1257 switch (reg_state) { 1258 case AMDGPU_REG_STATE_TYPE_PCIE: 1259 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size); 1260 break; 1261 case AMDGPU_REG_STATE_TYPE_XGMI: 1262 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size); 1263 break; 1264 case AMDGPU_REG_STATE_TYPE_WAFL: 1265 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size); 1266 break; 1267 case AMDGPU_REG_STATE_TYPE_USR: 1268 size = aqua_vanjaram_read_usr_state(adev, buf, max_size, 1269 AMDGPU_REG_STATE_TYPE_USR); 1270 break; 1271 case AMDGPU_REG_STATE_TYPE_USR_1: 1272 size = aqua_vanjaram_read_usr_state( 1273 adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1); 1274 break; 1275 default: 1276 return -EINVAL; 1277 } 1278 1279 return size; 1280 } 1281