Lines Matching refs:uvd

72 	INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
137 rdev->uvd.fw_header_present = false;
138 rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
153 rdev->uvd.fw_header_present = true;
168 rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
188 RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
191 NULL, &rdev->uvd.vcpu_bo);
197 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
199 radeon_bo_unref(&rdev->uvd.vcpu_bo);
204 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
205 &rdev->uvd.gpu_addr);
207 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
208 radeon_bo_unref(&rdev->uvd.vcpu_bo);
213 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
219 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
221 for (i = 0; i < rdev->uvd.max_handles; ++i) {
222 atomic_set(&rdev->uvd.handles[i], 0);
223 rdev->uvd.filp[i] = NULL;
224 rdev->uvd.img_size[i] = 0;
234 if (rdev->uvd.vcpu_bo == NULL)
237 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
239 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
240 radeon_bo_unpin(rdev->uvd.vcpu_bo);
241 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
244 radeon_bo_unref(&rdev->uvd.vcpu_bo);
255 if (rdev->uvd.vcpu_bo == NULL)
258 for (i = 0; i < rdev->uvd.max_handles; ++i) {
259 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
275 rdev->uvd.filp[i] = NULL;
276 atomic_set(&rdev->uvd.handles[i], 0);
288 if (rdev->uvd.vcpu_bo == NULL)
291 memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
293 size = radeon_bo_size(rdev->uvd.vcpu_bo);
296 ptr = rdev->uvd.cpu_addr;
332 for (i = 0; i < rdev->uvd.max_handles; ++i) {
333 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
334 if (handle != 0 && rdev->uvd.filp[i] == filp) {
349 rdev->uvd.filp[i] = NULL;
350 atomic_set(&rdev->uvd.handles[i], 0);
507 for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
508 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
513 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
514 p->rdev->uvd.filp[i] = p->filp;
515 p->rdev->uvd.img_size[i] = img_size;
533 for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
534 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
535 if (p->rdev->uvd.filp[i] != p->filp) {
548 for (i = 0; i < p->rdev->uvd.max_handles; ++i)
549 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
614 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
770 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
773 uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
774 uint64_t addr = rdev->uvd.gpu_addr + offs;
778 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
798 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
806 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
809 uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
810 uint64_t addr = rdev->uvd.gpu_addr + offs;
814 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
827 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
848 for (i = 0; i < rdev->uvd.max_handles; ++i) {
849 if (!atomic_read(&rdev->uvd.handles[i]))
852 if (rdev->uvd.img_size[i] >= 720*576)
862 container_of(work, struct radeon_device, uvd.idle_work.work);
873 schedule_delayed_work(&rdev->uvd.idle_work,
881 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
882 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,