Lines Matching refs:uvd
155 if (adev->uvd.address_64_bit) in amdgpu_uvd_create_msg_bo_helper()
192 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); in amdgpu_uvd_sw_init()
263 r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name); in amdgpu_uvd_sw_init()
267 amdgpu_ucode_release(&adev->uvd.fw); in amdgpu_uvd_sw_init()
272 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; in amdgpu_uvd_sw_init()
274 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
293 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
295 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
300 (adev->uvd.fw_version < FW_1_66_16)) in amdgpu_uvd_sw_init()
312 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
314 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_uvd_sw_init()
318 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; in amdgpu_uvd_sw_init()
322 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in amdgpu_uvd_sw_init()
323 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_init()
328 &adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_init()
329 &adev->uvd.inst[j].gpu_addr, in amdgpu_uvd_sw_init()
330 &adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_init()
337 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_sw_init()
338 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
339 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
344 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
346 r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); in amdgpu_uvd_sw_init()
352 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; in amdgpu_uvd_sw_init()
355 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; in amdgpu_uvd_sw_init()
358 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; in amdgpu_uvd_sw_init()
361 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; in amdgpu_uvd_sw_init()
364 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; in amdgpu_uvd_sw_init()
372 void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); in amdgpu_uvd_sw_fini()
375 drm_sched_entity_destroy(&adev->uvd.entity); in amdgpu_uvd_sw_fini()
377 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_sw_fini()
378 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_fini()
380 kvfree(adev->uvd.inst[j].saved_bo); in amdgpu_uvd_sw_fini()
382 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_fini()
383 &adev->uvd.inst[j].gpu_addr, in amdgpu_uvd_sw_fini()
384 (void **)&adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_fini()
386 amdgpu_ring_fini(&adev->uvd.inst[j].ring); in amdgpu_uvd_sw_fini()
389 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in amdgpu_uvd_sw_fini()
391 amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); in amdgpu_uvd_sw_fini()
392 amdgpu_ucode_release(&adev->uvd.fw); in amdgpu_uvd_sw_fini()
407 if (ring == &adev->uvd.inst[0].ring) { in amdgpu_uvd_entity_init()
411 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_uvd_entity_init()
428 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_prepare_suspend()
432 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_prepare_suspend()
433 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_prepare_suspend()
436 if (i == adev->uvd.max_handles) in amdgpu_uvd_prepare_suspend()
440 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_prepare_suspend()
441 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_prepare_suspend()
443 if (adev->uvd.inst[j].vcpu_bo == NULL) in amdgpu_uvd_prepare_suspend()
446 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); in amdgpu_uvd_prepare_suspend()
447 ptr = adev->uvd.inst[j].cpu_addr; in amdgpu_uvd_prepare_suspend()
449 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_uvd_prepare_suspend()
450 if (!adev->uvd.inst[j].saved_bo) in amdgpu_uvd_prepare_suspend()
456 memset(adev->uvd.inst[j].saved_bo, 0, size); in amdgpu_uvd_prepare_suspend()
458 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); in amdgpu_uvd_prepare_suspend()
481 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in amdgpu_uvd_resume()
482 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_resume()
484 if (adev->uvd.inst[i].vcpu_bo == NULL) in amdgpu_uvd_resume()
487 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); in amdgpu_uvd_resume()
488 ptr = adev->uvd.inst[i].cpu_addr; in amdgpu_uvd_resume()
490 if (adev->uvd.inst[i].saved_bo != NULL) { in amdgpu_uvd_resume()
492 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); in amdgpu_uvd_resume()
495 kvfree(adev->uvd.inst[i].saved_bo); in amdgpu_uvd_resume()
496 adev->uvd.inst[i].saved_bo = NULL; in amdgpu_uvd_resume()
501 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
505 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, in amdgpu_uvd_resume()
514 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); in amdgpu_uvd_resume()
522 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_free_handles()
525 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_free_handles()
526 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
528 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
541 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
542 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
594 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
754 if (!adev->uvd.use_ctx_buf) { in amdgpu_uvd_cs_msg_decode()
802 adev->uvd.decode_image_width = width; in amdgpu_uvd_cs_msg_decode()
853 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
854 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
860 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
861 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
877 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
878 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
879 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
892 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_cs_msg()
893 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
959 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
967 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
1105 if (!parser->adev->uvd.address_64_bit) { in amdgpu_uvd_ring_parse_cs()
1136 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, in amdgpu_uvd_send_msg()
1204 struct amdgpu_bo *bo = adev->uvd.ib_bo; in amdgpu_uvd_get_create_msg()
1237 bo = adev->uvd.ib_bo; in amdgpu_uvd_get_destroy_msg()
1264 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1267 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in amdgpu_uvd_idle_work_handler()
1268 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_idle_work_handler()
1270 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); in amdgpu_uvd_idle_work_handler()
1271 for (j = 0; j < adev->uvd.num_enc_rings; ++j) in amdgpu_uvd_idle_work_handler()
1272 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); in amdgpu_uvd_idle_work_handler()
1287 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_idle_work_handler()
1299 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_ring_begin_use()
1316 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_ring_end_use()
1371 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_used_handles()
1377 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()