1 /* 2 * Copyright 2016-2024 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 27 #include <linux/firmware.h> 28 #include <linux/module.h> 29 #include <linux/dmi.h> 30 #include <linux/pci.h> 31 #include <linux/debugfs.h> 32 #include <drm/drm_drv.h> 33 34 #include "amdgpu.h" 35 #include "amdgpu_pm.h" 36 #include "amdgpu_vcn.h" 37 #include "soc15d.h" 38 39 /* Firmware Names */ 40 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" 41 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" 42 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" 43 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" 44 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" 45 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" 46 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" 47 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" 48 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" 49 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" 50 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" 51 #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" 52 #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" 53 #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" 54 #define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" 55 #define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" 56 #define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" 57 #define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" 58 #define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" 59 #define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin" 60 #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" 61 #define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" 62 #define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin" 63 #define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin" 64 #define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin" 65 #define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin" 66 67 MODULE_FIRMWARE(FIRMWARE_RAVEN); 68 MODULE_FIRMWARE(FIRMWARE_PICASSO); 69 MODULE_FIRMWARE(FIRMWARE_RAVEN2); 70 MODULE_FIRMWARE(FIRMWARE_ARCTURUS); 71 MODULE_FIRMWARE(FIRMWARE_RENOIR); 72 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE); 73 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN); 74 MODULE_FIRMWARE(FIRMWARE_NAVI10); 75 MODULE_FIRMWARE(FIRMWARE_NAVI14); 76 MODULE_FIRMWARE(FIRMWARE_NAVI12); 77 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); 78 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); 79 MODULE_FIRMWARE(FIRMWARE_VANGOGH); 80 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); 81 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY); 82 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); 83 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2); 84 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0); 85 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); 86 MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); 87 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); 88 MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); 89 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6); 90 MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1); 91 MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); 92 MODULE_FIRMWARE(FIRMWARE_VCN5_0_1); 93 94 static void amdgpu_vcn_idle_work_handler(struct work_struct *work); 95 96 int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i) 97 { 98 char ucode_prefix[25]; 99 int r; 100 101 adev->vcn.inst[i].adev = adev; 102 adev->vcn.inst[i].inst = i; 103 amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix)); 104 105 if (i != 0 && adev->vcn.per_inst_fw) { 106 r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw, 107 AMDGPU_UCODE_REQUIRED, 108 "amdgpu/%s_%d.bin", ucode_prefix, i); 109 if (r) 110 amdgpu_ucode_release(&adev->vcn.inst[i].fw); 111 } else { 112 if (!adev->vcn.inst[0].fw) { 113 r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw, 114 AMDGPU_UCODE_REQUIRED, 115 "amdgpu/%s.bin", ucode_prefix); 116 if (r) 117 amdgpu_ucode_release(&adev->vcn.inst[0].fw); 118 } else { 119 r = 0; 120 } 121 adev->vcn.inst[i].fw = adev->vcn.inst[0].fw; 122 } 123 124 return r; 125 } 126 127 int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) 128 { 129 unsigned long bo_size; 130 const struct common_firmware_header *hdr; 131 unsigned char fw_check; 132 unsigned int fw_shared_size, log_offset; 133 int r; 134 135 mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround); 136 mutex_init(&adev->vcn.inst[i].vcn_pg_lock); 137 atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0); 138 INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler); 139 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); 140 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 141 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 142 adev->vcn.inst[i].indirect_sram = true; 143 144 /* 145 * Some Steam Deck's BIOS versions are incompatible with the 146 * indirect SRAM mode, leading to amdgpu being unable to get 147 * properly probed (and even potentially crashing the kernel). 148 * Hence, check for these versions here - notice this is 149 * restricted to Vangogh (Deck's APU). 150 */ 151 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) { 152 const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); 153 154 if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) || 155 !strncmp("F7A0114", bios_ver, 7))) { 156 adev->vcn.inst[i].indirect_sram = false; 157 dev_info(adev->dev, 158 "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver); 159 } 160 } 161 162 /* from vcn4 and above, only unified queue is used */ 163 adev->vcn.inst[i].using_unified_queue = 164 amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0); 165 166 hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data; 167 adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version); 168 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); 169 170 /* Bit 20-23, it is encode major and non-zero for new naming convention. 171 * This field is part of version minor and DRM_DISABLED_FLAG in old naming 172 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG 173 * is zero in old naming convention, this field is always zero so far. 174 * These four bits are used to tell which naming convention is present. 175 */ 176 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; 177 if (fw_check) { 178 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; 179 180 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; 181 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; 182 enc_major = fw_check; 183 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; 184 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; 185 dev_info(adev->dev, 186 "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", 187 enc_major, enc_minor, dec_ver, vep, fw_rev); 188 } else { 189 unsigned int version_major, version_minor, family_id; 190 191 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 192 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 193 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 194 dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n", 195 version_major, version_minor, family_id); 196 } 197 198 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; 199 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 200 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 201 202 if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) { 203 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)); 204 log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log); 205 } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { 206 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); 207 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); 208 } else { 209 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); 210 log_offset = offsetof(struct amdgpu_fw_shared, fw_log); 211 } 212 213 bo_size += fw_shared_size; 214 215 if (amdgpu_vcnfw_log) 216 bo_size += AMDGPU_VCNFW_LOG_SIZE; 217 218 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 219 AMDGPU_GEM_DOMAIN_VRAM | 220 AMDGPU_GEM_DOMAIN_GTT, 221 &adev->vcn.inst[i].vcpu_bo, 222 &adev->vcn.inst[i].gpu_addr, 223 &adev->vcn.inst[i].cpu_addr); 224 if (r) { 225 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); 226 return r; 227 } 228 229 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr + 230 bo_size - fw_shared_size; 231 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr + 232 bo_size - fw_shared_size; 233 234 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size; 235 236 if (amdgpu_vcnfw_log) { 237 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE; 238 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE; 239 adev->vcn.inst[i].fw_shared.log_offset = log_offset; 240 } 241 242 if (adev->vcn.inst[i].indirect_sram) { 243 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, 244 AMDGPU_GEM_DOMAIN_VRAM | 245 AMDGPU_GEM_DOMAIN_GTT, 246 &adev->vcn.inst[i].dpg_sram_bo, 247 &adev->vcn.inst[i].dpg_sram_gpu_addr, 248 &adev->vcn.inst[i].dpg_sram_cpu_addr); 249 if (r) { 250 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); 251 return r; 252 } 253 } 254 255 return 0; 256 } 257 258 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) 259 { 260 int j; 261 262 if (adev->vcn.harvest_config & (1 << i)) 263 return 0; 264 265 amdgpu_bo_free_kernel( 266 &adev->vcn.inst[i].dpg_sram_bo, 267 &adev->vcn.inst[i].dpg_sram_gpu_addr, 268 (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr); 269 270 kvfree(adev->vcn.inst[i].saved_bo); 271 272 amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo, 273 &adev->vcn.inst[i].gpu_addr, 274 (void **)&adev->vcn.inst[i].cpu_addr); 275 276 amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec); 277 278 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) 279 amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]); 280 281 if (adev->vcn.per_inst_fw) { 282 amdgpu_ucode_release(&adev->vcn.inst[i].fw); 283 } else { 284 amdgpu_ucode_release(&adev->vcn.inst[0].fw); 285 adev->vcn.inst[i].fw = NULL; 286 } 287 mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock); 288 mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround); 289 290 return 0; 291 } 292 293 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) 294 { 295 bool ret = false; 296 int vcn_config = adev->vcn.inst[vcn_instance].vcn_config; 297 298 if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) 299 ret = true; 300 else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) 301 ret = true; 302 else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) 303 ret = true; 304 305 return ret; 306 } 307 308 static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i) 309 { 310 unsigned int size; 311 void *ptr; 312 int idx; 313 314 if (adev->vcn.harvest_config & (1 << i)) 315 return 0; 316 if (adev->vcn.inst[i].vcpu_bo == NULL) 317 return 0; 318 319 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); 320 ptr = adev->vcn.inst[i].cpu_addr; 321 322 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); 323 if (!adev->vcn.inst[i].saved_bo) 324 return -ENOMEM; 325 326 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 327 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); 328 drm_dev_exit(idx); 329 } 330 331 return 0; 332 } 333 334 int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev) 335 { 336 int ret, i; 337 338 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 339 ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i); 340 if (ret) 341 return ret; 342 } 343 344 return 0; 345 } 346 347 int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i) 348 { 349 bool in_ras_intr = amdgpu_ras_intr_triggered(); 350 351 if (adev->vcn.harvest_config & (1 << i)) 352 return 0; 353 354 cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); 355 356 /* err_event_athub will corrupt VCPU buffer, so we need to 357 * restore fw data and clear buffer in amdgpu_vcn_resume() */ 358 if (in_ras_intr) 359 return 0; 360 361 return amdgpu_vcn_save_vcpu_bo_inst(adev, i); 362 } 363 364 int amdgpu_vcn_resume(struct amdgpu_device *adev, int i) 365 { 366 unsigned int size; 367 void *ptr; 368 int idx; 369 370 if (adev->vcn.harvest_config & (1 << i)) 371 return 0; 372 if (adev->vcn.inst[i].vcpu_bo == NULL) 373 return -EINVAL; 374 375 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); 376 ptr = adev->vcn.inst[i].cpu_addr; 377 378 if (adev->vcn.inst[i].saved_bo != NULL) { 379 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 380 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); 381 drm_dev_exit(idx); 382 } 383 kvfree(adev->vcn.inst[i].saved_bo); 384 adev->vcn.inst[i].saved_bo = NULL; 385 } else { 386 const struct common_firmware_header *hdr; 387 unsigned int offset; 388 389 hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data; 390 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 391 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 392 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 393 memcpy_toio(adev->vcn.inst[i].cpu_addr, 394 adev->vcn.inst[i].fw->data + offset, 395 le32_to_cpu(hdr->ucode_size_bytes)); 396 drm_dev_exit(idx); 397 } 398 size -= le32_to_cpu(hdr->ucode_size_bytes); 399 ptr += le32_to_cpu(hdr->ucode_size_bytes); 400 } 401 memset_io(ptr, 0, size); 402 } 403 404 return 0; 405 } 406 407 static void amdgpu_vcn_idle_work_handler(struct work_struct *work) 408 { 409 struct amdgpu_vcn_inst *vcn_inst = 410 container_of(work, struct amdgpu_vcn_inst, idle_work.work); 411 struct amdgpu_device *adev = vcn_inst->adev; 412 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; 413 unsigned int i = vcn_inst->inst, j; 414 int r = 0; 415 416 if (adev->vcn.harvest_config & (1 << i)) 417 return; 418 419 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) 420 fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]); 421 422 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ 423 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && 424 !adev->vcn.inst[i].using_unified_queue) { 425 struct dpg_pause_state new_state; 426 427 if (fence[i] || 428 unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt))) 429 new_state.fw_based = VCN_DPG_STATE__PAUSE; 430 else 431 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 432 433 adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state); 434 } 435 436 fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec); 437 fences += fence[i]; 438 439 if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) { 440 vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE); 441 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, 442 false); 443 if (r) 444 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); 445 } else { 446 schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT); 447 } 448 } 449 450 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) 451 { 452 struct amdgpu_device *adev = ring->adev; 453 struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me]; 454 int r = 0; 455 456 atomic_inc(&vcn_inst->total_submission_cnt); 457 458 if (!cancel_delayed_work_sync(&vcn_inst->idle_work)) { 459 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, 460 true); 461 if (r) 462 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); 463 } 464 465 mutex_lock(&vcn_inst->vcn_pg_lock); 466 vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE); 467 468 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ 469 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && 470 !vcn_inst->using_unified_queue) { 471 struct dpg_pause_state new_state; 472 473 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { 474 atomic_inc(&vcn_inst->dpg_enc_submission_cnt); 475 new_state.fw_based = VCN_DPG_STATE__PAUSE; 476 } else { 477 unsigned int fences = 0; 478 unsigned int i; 479 480 for (i = 0; i < vcn_inst->num_enc_rings; ++i) 481 fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]); 482 483 if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt)) 484 new_state.fw_based = VCN_DPG_STATE__PAUSE; 485 else 486 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 487 } 488 489 vcn_inst->pause_dpg_mode(vcn_inst, &new_state); 490 } 491 mutex_unlock(&vcn_inst->vcn_pg_lock); 492 } 493 494 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) 495 { 496 struct amdgpu_device *adev = ring->adev; 497 498 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ 499 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && 500 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC && 501 !adev->vcn.inst[ring->me].using_unified_queue) 502 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); 503 504 atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt); 505 506 schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work, 507 VCN_IDLE_TIMEOUT); 508 } 509 510 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) 511 { 512 struct amdgpu_device *adev = ring->adev; 513 uint32_t tmp = 0; 514 unsigned int i; 515 int r; 516 517 /* VCN in SRIOV does not support direct register read/write */ 518 if (amdgpu_sriov_vf(adev)) 519 return 0; 520 521 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); 522 r = amdgpu_ring_alloc(ring, 3); 523 if (r) 524 return r; 525 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0)); 526 amdgpu_ring_write(ring, 0xDEADBEEF); 527 amdgpu_ring_commit(ring); 528 for (i = 0; i < adev->usec_timeout; i++) { 529 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); 530 if (tmp == 0xDEADBEEF) 531 break; 532 udelay(1); 533 } 534 535 if (i >= adev->usec_timeout) 536 r = -ETIMEDOUT; 537 538 return r; 539 } 540 541 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) 542 { 543 struct amdgpu_device *adev = ring->adev; 544 uint32_t rptr; 545 unsigned int i; 546 int r; 547 548 if (amdgpu_sriov_vf(adev)) 549 return 0; 550 551 r = amdgpu_ring_alloc(ring, 16); 552 if (r) 553 return r; 554 555 rptr = amdgpu_ring_get_rptr(ring); 556 557 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); 558 amdgpu_ring_commit(ring); 559 560 for (i = 0; i < adev->usec_timeout; i++) { 561 if (amdgpu_ring_get_rptr(ring) != rptr) 562 break; 563 udelay(1); 564 } 565 566 if (i >= adev->usec_timeout) 567 r = -ETIMEDOUT; 568 569 return r; 570 } 571 572 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, 573 struct amdgpu_ib *ib_msg, 574 struct dma_fence **fence) 575 { 576 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 577 struct amdgpu_device *adev = ring->adev; 578 struct dma_fence *f = NULL; 579 struct amdgpu_job *job; 580 struct amdgpu_ib *ib; 581 int i, r; 582 583 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 584 64, AMDGPU_IB_POOL_DIRECT, 585 &job); 586 if (r) 587 goto err; 588 589 ib = &job->ibs[0]; 590 ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0); 591 ib->ptr[1] = addr; 592 ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0); 593 ib->ptr[3] = addr >> 32; 594 ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0); 595 ib->ptr[5] = 0; 596 for (i = 6; i < 16; i += 2) { 597 ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0); 598 ib->ptr[i+1] = 0; 599 } 600 ib->length_dw = 16; 601 602 r = amdgpu_job_submit_direct(job, ring, &f); 603 if (r) 604 goto err_free; 605 606 amdgpu_ib_free(ib_msg, f); 607 608 if (fence) 609 *fence = dma_fence_get(f); 610 dma_fence_put(f); 611 612 return 0; 613 614 err_free: 615 amdgpu_job_free(job); 616 err: 617 amdgpu_ib_free(ib_msg, f); 618 return r; 619 } 620 621 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 622 struct amdgpu_ib *ib) 623 { 624 struct amdgpu_device *adev = ring->adev; 625 uint32_t *msg; 626 int r, i; 627 628 memset(ib, 0, sizeof(*ib)); 629 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, 630 AMDGPU_IB_POOL_DIRECT, 631 ib); 632 if (r) 633 return r; 634 635 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); 636 msg[0] = cpu_to_le32(0x00000028); 637 msg[1] = cpu_to_le32(0x00000038); 638 msg[2] = cpu_to_le32(0x00000001); 639 msg[3] = cpu_to_le32(0x00000000); 640 msg[4] = cpu_to_le32(handle); 641 msg[5] = cpu_to_le32(0x00000000); 642 msg[6] = cpu_to_le32(0x00000001); 643 msg[7] = cpu_to_le32(0x00000028); 644 msg[8] = cpu_to_le32(0x00000010); 645 msg[9] = cpu_to_le32(0x00000000); 646 msg[10] = cpu_to_le32(0x00000007); 647 msg[11] = cpu_to_le32(0x00000000); 648 msg[12] = cpu_to_le32(0x00000780); 649 msg[13] = cpu_to_le32(0x00000440); 650 for (i = 14; i < 1024; ++i) 651 msg[i] = cpu_to_le32(0x0); 652 653 return 0; 654 } 655 656 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 657 struct amdgpu_ib *ib) 658 { 659 struct amdgpu_device *adev = ring->adev; 660 uint32_t *msg; 661 int r, i; 662 663 memset(ib, 0, sizeof(*ib)); 664 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, 665 AMDGPU_IB_POOL_DIRECT, 666 ib); 667 if (r) 668 return r; 669 670 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); 671 msg[0] = cpu_to_le32(0x00000028); 672 msg[1] = cpu_to_le32(0x00000018); 673 msg[2] = cpu_to_le32(0x00000000); 674 msg[3] = cpu_to_le32(0x00000002); 675 msg[4] = cpu_to_le32(handle); 676 msg[5] = cpu_to_le32(0x00000000); 677 for (i = 6; i < 1024; ++i) 678 msg[i] = cpu_to_le32(0x0); 679 680 return 0; 681 } 682 683 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 684 { 685 struct dma_fence *fence = NULL; 686 struct amdgpu_ib ib; 687 long r; 688 689 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); 690 if (r) 691 goto error; 692 693 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); 694 if (r) 695 goto error; 696 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); 697 if (r) 698 goto error; 699 700 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); 701 if (r) 702 goto error; 703 704 r = dma_fence_wait_timeout(fence, false, timeout); 705 if (r == 0) 706 r = -ETIMEDOUT; 707 else if (r > 0) 708 r = 0; 709 710 dma_fence_put(fence); 711 error: 712 return r; 713 } 714 715 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, 716 uint32_t ib_pack_in_dw, bool enc) 717 { 718 uint32_t *ib_checksum; 719 720 ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */ 721 ib->ptr[ib->length_dw++] = 0x30000002; 722 ib_checksum = &ib->ptr[ib->length_dw++]; 723 ib->ptr[ib->length_dw++] = ib_pack_in_dw; 724 725 ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */ 726 ib->ptr[ib->length_dw++] = 0x30000001; 727 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3; 728 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t); 729 730 return ib_checksum; 731 } 732 733 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum, 734 uint32_t ib_pack_in_dw) 735 { 736 uint32_t i; 737 uint32_t checksum = 0; 738 739 for (i = 0; i < ib_pack_in_dw; i++) 740 checksum += *(*ib_checksum + 2 + i); 741 742 **ib_checksum = checksum; 743 } 744 745 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, 746 struct amdgpu_ib *ib_msg, 747 struct dma_fence **fence) 748 { 749 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; 750 unsigned int ib_size_dw = 64; 751 struct amdgpu_device *adev = ring->adev; 752 struct dma_fence *f = NULL; 753 struct amdgpu_job *job; 754 struct amdgpu_ib *ib; 755 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 756 uint32_t *ib_checksum; 757 uint32_t ib_pack_in_dw; 758 int i, r; 759 760 if (adev->vcn.inst[ring->me].using_unified_queue) 761 ib_size_dw += 8; 762 763 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 764 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 765 &job); 766 if (r) 767 goto err; 768 769 ib = &job->ibs[0]; 770 ib->length_dw = 0; 771 772 /* single queue headers */ 773 if (adev->vcn.inst[ring->me].using_unified_queue) { 774 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) 775 + 4 + 2; /* engine info + decoding ib in dw */ 776 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); 777 } 778 779 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; 780 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); 781 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); 782 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4; 783 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer)); 784 785 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER); 786 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32); 787 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr); 788 789 for (i = ib->length_dw; i < ib_size_dw; ++i) 790 ib->ptr[i] = 0x0; 791 792 if (adev->vcn.inst[ring->me].using_unified_queue) 793 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); 794 795 r = amdgpu_job_submit_direct(job, ring, &f); 796 if (r) 797 goto err_free; 798 799 amdgpu_ib_free(ib_msg, f); 800 801 if (fence) 802 *fence = dma_fence_get(f); 803 dma_fence_put(f); 804 805 return 0; 806 807 err_free: 808 amdgpu_job_free(job); 809 err: 810 amdgpu_ib_free(ib_msg, f); 811 return r; 812 } 813 814 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) 815 { 816 struct dma_fence *fence = NULL; 817 struct amdgpu_ib ib; 818 long r; 819 820 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); 821 if (r) 822 goto error; 823 824 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); 825 if (r) 826 goto error; 827 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); 828 if (r) 829 goto error; 830 831 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); 832 if (r) 833 goto error; 834 835 r = dma_fence_wait_timeout(fence, false, timeout); 836 if (r == 0) 837 r = -ETIMEDOUT; 838 else if (r > 0) 839 r = 0; 840 841 dma_fence_put(fence); 842 error: 843 return r; 844 } 845 846 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) 847 { 848 struct amdgpu_device *adev = ring->adev; 849 uint32_t rptr; 850 unsigned int i; 851 int r; 852 853 if (amdgpu_sriov_vf(adev)) 854 return 0; 855 856 r = amdgpu_ring_alloc(ring, 16); 857 if (r) 858 return r; 859 860 rptr = amdgpu_ring_get_rptr(ring); 861 862 amdgpu_ring_write(ring, VCN_ENC_CMD_END); 863 amdgpu_ring_commit(ring); 864 865 for (i = 0; i < adev->usec_timeout; i++) { 866 if (amdgpu_ring_get_rptr(ring) != rptr) 867 break; 868 udelay(1); 869 } 870 871 if (i >= adev->usec_timeout) 872 r = -ETIMEDOUT; 873 874 return r; 875 } 876 877 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 878 struct amdgpu_ib *ib_msg, 879 struct dma_fence **fence) 880 { 881 unsigned int ib_size_dw = 16; 882 struct amdgpu_device *adev = ring->adev; 883 struct amdgpu_job *job; 884 struct amdgpu_ib *ib; 885 struct dma_fence *f = NULL; 886 uint32_t *ib_checksum = NULL; 887 uint64_t addr; 888 int i, r; 889 890 if (adev->vcn.inst[ring->me].using_unified_queue) 891 ib_size_dw += 8; 892 893 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 894 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 895 &job); 896 if (r) 897 return r; 898 899 ib = &job->ibs[0]; 900 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 901 902 ib->length_dw = 0; 903 904 if (adev->vcn.inst[ring->me].using_unified_queue) 905 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); 906 907 ib->ptr[ib->length_dw++] = 0x00000018; 908 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 909 ib->ptr[ib->length_dw++] = handle; 910 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 911 ib->ptr[ib->length_dw++] = addr; 912 ib->ptr[ib->length_dw++] = 0x00000000; 913 914 ib->ptr[ib->length_dw++] = 0x00000014; 915 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 916 ib->ptr[ib->length_dw++] = 0x0000001c; 917 ib->ptr[ib->length_dw++] = 0x00000000; 918 ib->ptr[ib->length_dw++] = 0x00000000; 919 920 ib->ptr[ib->length_dw++] = 0x00000008; 921 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ 922 923 for (i = ib->length_dw; i < ib_size_dw; ++i) 924 ib->ptr[i] = 0x0; 925 926 if (adev->vcn.inst[ring->me].using_unified_queue) 927 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); 928 929 r = amdgpu_job_submit_direct(job, ring, &f); 930 if (r) 931 goto err; 932 933 if (fence) 934 *fence = dma_fence_get(f); 935 dma_fence_put(f); 936 937 return 0; 938 939 err: 940 amdgpu_job_free(job); 941 return r; 942 } 943 944 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 945 struct amdgpu_ib *ib_msg, 946 struct dma_fence **fence) 947 { 948 unsigned int ib_size_dw = 16; 949 struct amdgpu_device *adev = ring->adev; 950 struct amdgpu_job *job; 951 struct amdgpu_ib *ib; 952 struct dma_fence *f = NULL; 953 uint32_t *ib_checksum = NULL; 954 uint64_t addr; 955 int i, r; 956 957 if (adev->vcn.inst[ring->me].using_unified_queue) 958 ib_size_dw += 8; 959 960 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 961 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 962 &job); 963 if (r) 964 return r; 965 966 ib = &job->ibs[0]; 967 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 968 969 ib->length_dw = 0; 970 971 if (adev->vcn.inst[ring->me].using_unified_queue) 972 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); 973 974 ib->ptr[ib->length_dw++] = 0x00000018; 975 ib->ptr[ib->length_dw++] = 0x00000001; 976 ib->ptr[ib->length_dw++] = handle; 977 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 978 ib->ptr[ib->length_dw++] = addr; 979 ib->ptr[ib->length_dw++] = 0x00000000; 980 981 ib->ptr[ib->length_dw++] = 0x00000014; 982 ib->ptr[ib->length_dw++] = 0x00000002; 983 ib->ptr[ib->length_dw++] = 0x0000001c; 984 ib->ptr[ib->length_dw++] = 0x00000000; 985 ib->ptr[ib->length_dw++] = 0x00000000; 986 987 ib->ptr[ib->length_dw++] = 0x00000008; 988 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ 989 990 for (i = ib->length_dw; i < ib_size_dw; ++i) 991 ib->ptr[i] = 0x0; 992 993 if (adev->vcn.inst[ring->me].using_unified_queue) 994 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); 995 996 r = amdgpu_job_submit_direct(job, ring, &f); 997 if (r) 998 goto err; 999 1000 if (fence) 1001 *fence = dma_fence_get(f); 1002 dma_fence_put(f); 1003 1004 return 0; 1005 1006 err: 1007 amdgpu_job_free(job); 1008 return r; 1009 } 1010 1011 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1012 { 1013 struct amdgpu_device *adev = ring->adev; 1014 struct dma_fence *fence = NULL; 1015 struct amdgpu_ib ib; 1016 long r; 1017 1018 memset(&ib, 0, sizeof(ib)); 1019 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, 1020 AMDGPU_IB_POOL_DIRECT, 1021 &ib); 1022 if (r) 1023 return r; 1024 1025 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); 1026 if (r) 1027 goto error; 1028 1029 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); 1030 if (r) 1031 goto error; 1032 1033 r = dma_fence_wait_timeout(fence, false, timeout); 1034 if (r == 0) 1035 r = -ETIMEDOUT; 1036 else if (r > 0) 1037 r = 0; 1038 1039 error: 1040 amdgpu_ib_free(&ib, fence); 1041 dma_fence_put(fence); 1042 1043 return r; 1044 } 1045 1046 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1047 { 1048 struct amdgpu_device *adev = ring->adev; 1049 long r; 1050 1051 if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) && 1052 (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) { 1053 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); 1054 if (r) 1055 goto error; 1056 } 1057 1058 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); 1059 1060 error: 1061 return r; 1062 } 1063 1064 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) 1065 { 1066 switch (ring) { 1067 case 0: 1068 return AMDGPU_RING_PRIO_0; 1069 case 1: 1070 return AMDGPU_RING_PRIO_1; 1071 case 2: 1072 return AMDGPU_RING_PRIO_2; 1073 default: 1074 return AMDGPU_RING_PRIO_0; 1075 } 1076 } 1077 1078 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i) 1079 { 1080 unsigned int idx; 1081 1082 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1083 const struct common_firmware_header *hdr; 1084 1085 if (adev->vcn.harvest_config & (1 << i)) 1086 return; 1087 1088 if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) || 1089 amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1)) 1090 && (i > 0)) 1091 return; 1092 1093 hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data; 1094 /* currently only support 2 FW instances */ 1095 if (i >= 2) { 1096 dev_info(adev->dev, "More then 2 VCN FW instances!\n"); 1097 return; 1098 } 1099 idx = AMDGPU_UCODE_ID_VCN + i; 1100 adev->firmware.ucode[idx].ucode_id = idx; 1101 adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw; 1102 adev->firmware.fw_size += 1103 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 1104 } 1105 } 1106 1107 /* 1108 * debugfs for mapping vcn firmware log buffer. 1109 */ 1110 #if defined(CONFIG_DEBUG_FS) 1111 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, 1112 size_t size, loff_t *pos) 1113 { 1114 struct amdgpu_vcn_inst *vcn; 1115 void *log_buf; 1116 volatile struct amdgpu_vcn_fwlog *plog; 1117 unsigned int read_pos, write_pos, available, i, read_bytes = 0; 1118 unsigned int read_num[2] = {0}; 1119 1120 vcn = file_inode(f)->i_private; 1121 if (!vcn) 1122 return -ENODEV; 1123 1124 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log) 1125 return -EFAULT; 1126 1127 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; 1128 1129 plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; 1130 read_pos = plog->rptr; 1131 write_pos = plog->wptr; 1132 1133 if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE) 1134 return -EFAULT; 1135 1136 if (!size || (read_pos == write_pos)) 1137 return 0; 1138 1139 if (write_pos > read_pos) { 1140 available = write_pos - read_pos; 1141 read_num[0] = min_t(size_t, size, available); 1142 } else { 1143 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos; 1144 available = read_num[0] + write_pos - plog->header_size; 1145 if (size > available) 1146 read_num[1] = write_pos - plog->header_size; 1147 else if (size > read_num[0]) 1148 read_num[1] = size - read_num[0]; 1149 else 1150 read_num[0] = size; 1151 } 1152 1153 for (i = 0; i < 2; i++) { 1154 if (read_num[i]) { 1155 if (read_pos == AMDGPU_VCNFW_LOG_SIZE) 1156 read_pos = plog->header_size; 1157 if (read_num[i] == copy_to_user((buf + read_bytes), 1158 (log_buf + read_pos), read_num[i])) 1159 return -EFAULT; 1160 1161 read_bytes += read_num[i]; 1162 read_pos += read_num[i]; 1163 } 1164 } 1165 1166 plog->rptr = read_pos; 1167 *pos += read_bytes; 1168 return read_bytes; 1169 } 1170 1171 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = { 1172 .owner = THIS_MODULE, 1173 .read = amdgpu_debugfs_vcn_fwlog_read, 1174 .llseek = default_llseek 1175 }; 1176 #endif 1177 1178 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, 1179 struct amdgpu_vcn_inst *vcn) 1180 { 1181 #if defined(CONFIG_DEBUG_FS) 1182 struct drm_minor *minor = adev_to_drm(adev)->primary; 1183 struct dentry *root = minor->debugfs_root; 1184 char name[32]; 1185 1186 sprintf(name, "amdgpu_vcn_%d_fwlog", i); 1187 debugfs_create_file_size(name, S_IFREG | 0444, root, vcn, 1188 &amdgpu_debugfs_vcnfwlog_fops, 1189 AMDGPU_VCNFW_LOG_SIZE); 1190 #endif 1191 } 1192 1193 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) 1194 { 1195 #if defined(CONFIG_DEBUG_FS) 1196 volatile uint32_t *flag = vcn->fw_shared.cpu_addr; 1197 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; 1198 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; 1199 volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; 1200 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr 1201 + vcn->fw_shared.log_offset; 1202 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); 1203 fw_log->is_enabled = 1; 1204 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF); 1205 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32); 1206 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE); 1207 1208 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog); 1209 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE; 1210 log_buf->rptr = log_buf->header_size; 1211 log_buf->wptr = log_buf->header_size; 1212 log_buf->wrapped = 0; 1213 #endif 1214 } 1215 1216 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, 1217 struct amdgpu_irq_src *source, 1218 struct amdgpu_iv_entry *entry) 1219 { 1220 struct ras_common_if *ras_if = adev->vcn.ras_if; 1221 struct ras_dispatch_if ih_data = { 1222 .entry = entry, 1223 }; 1224 1225 if (!ras_if) 1226 return 0; 1227 1228 if (!amdgpu_sriov_vf(adev)) { 1229 ih_data.head = *ras_if; 1230 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 1231 } else { 1232 if (adev->virt.ops && adev->virt.ops->ras_poison_handler) 1233 adev->virt.ops->ras_poison_handler(adev, ras_if->block); 1234 else 1235 dev_warn(adev->dev, 1236 "No ras_poison_handler interface in SRIOV for VCN!\n"); 1237 } 1238 1239 return 0; 1240 } 1241 1242 int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1243 { 1244 int r, i; 1245 1246 r = amdgpu_ras_block_late_init(adev, ras_block); 1247 if (r) 1248 return r; 1249 1250 if (amdgpu_ras_is_supported(adev, ras_block->block)) { 1251 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1252 if (adev->vcn.harvest_config & (1 << i) || 1253 !adev->vcn.inst[i].ras_poison_irq.funcs) 1254 continue; 1255 1256 r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); 1257 if (r) 1258 goto late_fini; 1259 } 1260 } 1261 return 0; 1262 1263 late_fini: 1264 amdgpu_ras_block_late_fini(adev, ras_block); 1265 return r; 1266 } 1267 1268 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) 1269 { 1270 int err; 1271 struct amdgpu_vcn_ras *ras; 1272 1273 if (!adev->vcn.ras) 1274 return 0; 1275 1276 ras = adev->vcn.ras; 1277 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 1278 if (err) { 1279 dev_err(adev->dev, "Failed to register vcn ras block!\n"); 1280 return err; 1281 } 1282 1283 strcpy(ras->ras_block.ras_comm.name, "vcn"); 1284 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; 1285 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; 1286 adev->vcn.ras_if = &ras->ras_block.ras_comm; 1287 1288 if (!ras->ras_block.ras_late_init) 1289 ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; 1290 1291 return 0; 1292 } 1293 1294 int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, 1295 enum AMDGPU_UCODE_ID ucode_id) 1296 { 1297 struct amdgpu_firmware_info ucode = { 1298 .ucode_id = (ucode_id ? ucode_id : 1299 (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 1300 AMDGPU_UCODE_ID_VCN0_RAM)), 1301 .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, 1302 .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - 1303 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr), 1304 }; 1305 1306 return psp_execute_ip_fw_load(&adev->psp, &ucode); 1307 } 1308 1309 static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev, 1310 struct device_attribute *attr, 1311 char *buf) 1312 { 1313 struct drm_device *ddev = dev_get_drvdata(dev); 1314 struct amdgpu_device *adev = drm_to_adev(ddev); 1315 1316 if (!adev) 1317 return -ENODEV; 1318 1319 return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset); 1320 } 1321 1322 static DEVICE_ATTR(vcn_reset_mask, 0444, 1323 amdgpu_get_vcn_reset_mask, NULL); 1324 1325 int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev) 1326 { 1327 int r = 0; 1328 1329 if (adev->vcn.num_vcn_inst) { 1330 r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask); 1331 if (r) 1332 return r; 1333 } 1334 1335 return r; 1336 } 1337 1338 void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev) 1339 { 1340 if (adev->dev->kobj.sd) { 1341 if (adev->vcn.num_vcn_inst) 1342 device_remove_file(adev->dev, &dev_attr_vcn_reset_mask); 1343 } 1344 } 1345 1346 /* 1347 * debugfs to enable/disable vcn job submission to specific core or 1348 * instance. It is created only if the queue type is unified. 1349 */ 1350 #if defined(CONFIG_DEBUG_FS) 1351 static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val) 1352 { 1353 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1354 u32 i; 1355 u64 mask; 1356 struct amdgpu_ring *ring; 1357 1358 if (!adev) 1359 return -ENODEV; 1360 1361 mask = (1ULL << adev->vcn.num_vcn_inst) - 1; 1362 if ((val & mask) == 0) 1363 return -EINVAL; 1364 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1365 ring = &adev->vcn.inst[i].ring_enc[0]; 1366 if (val & (1ULL << i)) 1367 ring->sched.ready = true; 1368 else 1369 ring->sched.ready = false; 1370 } 1371 /* publish sched.ready flag update effective immediately across smp */ 1372 smp_rmb(); 1373 return 0; 1374 } 1375 1376 static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val) 1377 { 1378 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1379 u32 i; 1380 u64 mask = 0; 1381 struct amdgpu_ring *ring; 1382 1383 if (!adev) 1384 return -ENODEV; 1385 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1386 ring = &adev->vcn.inst[i].ring_enc[0]; 1387 if (ring->sched.ready) 1388 mask |= 1ULL << i; 1389 } 1390 *val = mask; 1391 return 0; 1392 } 1393 1394 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops, 1395 amdgpu_debugfs_vcn_sched_mask_get, 1396 amdgpu_debugfs_vcn_sched_mask_set, "%llx\n"); 1397 #endif 1398 1399 void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev) 1400 { 1401 #if defined(CONFIG_DEBUG_FS) 1402 struct drm_minor *minor = adev_to_drm(adev)->primary; 1403 struct dentry *root = minor->debugfs_root; 1404 char name[32]; 1405 1406 if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue) 1407 return; 1408 sprintf(name, "amdgpu_vcn_sched_mask"); 1409 debugfs_create_file(name, 0600, root, adev, 1410 &amdgpu_debugfs_vcn_sched_mask_fops); 1411 #endif 1412 } 1413 1414 /** 1415 * vcn_set_powergating_state - set VCN block powergating state 1416 * 1417 * @ip_block: amdgpu_ip_block pointer 1418 * @state: power gating state 1419 * 1420 * Set VCN block powergating state 1421 */ 1422 int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, 1423 enum amd_powergating_state state) 1424 { 1425 struct amdgpu_device *adev = ip_block->adev; 1426 int ret = 0, i; 1427 1428 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1429 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; 1430 1431 ret |= vinst->set_pg_state(vinst, state); 1432 } 1433 1434 return ret; 1435 } 1436