Lines Matching +full:reference +full:- +full:div +full:- +full:factor

15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
72 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
74 switch (rdev->family) {
134 return -EINVAL;
137 rdev->uvd.fw_header_present = false;
138 rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
141 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
143 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
146 struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
149 r = radeon_ucode_validate(rdev->uvd_fw);
153 rdev->uvd.fw_header_present = true;
155 family_id = (__force u32)(hdr->ucode_version) & 0xff;
156 version_major = (le32_to_cpu((__force __le32)(hdr->ucode_version))
158 version_minor = (le32_to_cpu((__force __le32)(hdr->ucode_version))
168 rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
178 r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev);
180 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
186 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
188 RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
191 NULL, &rdev->uvd.vcpu_bo);
193 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
197 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
199 radeon_bo_unref(&rdev->uvd.vcpu_bo);
200 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
204 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
205 &rdev->uvd.gpu_addr);
207 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
208 radeon_bo_unref(&rdev->uvd.vcpu_bo);
209 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
213 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
215 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
219 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
221 for (i = 0; i < rdev->uvd.max_handles; ++i) {
222 atomic_set(&rdev->uvd.handles[i], 0);
223 rdev->uvd.filp[i] = NULL;
224 rdev->uvd.img_size[i] = 0;
234 if (rdev->uvd.vcpu_bo == NULL)
237 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
239 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
240 radeon_bo_unpin(rdev->uvd.vcpu_bo);
241 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
244 radeon_bo_unref(&rdev->uvd.vcpu_bo);
246 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
248 release_firmware(rdev->uvd_fw);
255 if (rdev->uvd.vcpu_bo == NULL)
258 for (i = 0; i < rdev->uvd.max_handles; ++i) {
259 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
275 rdev->uvd.filp[i] = NULL;
276 atomic_set(&rdev->uvd.handles[i], 0);
288 if (rdev->uvd.vcpu_bo == NULL)
289 return -EINVAL;
291 memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
293 size = radeon_bo_size(rdev->uvd.vcpu_bo);
294 size -= rdev->uvd_fw->size;
296 ptr = rdev->uvd.cpu_addr;
297 ptr += rdev->uvd_fw->size;
309 for (i = 0; i < rbo->placement.num_placement; ++i) {
310 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
311 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
319 if (rbo->placement.num_placement > 1)
323 rbo->placements[1] = rbo->placements[0];
324 rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
325 rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
326 rbo->placement.num_placement++;
332 for (i = 0; i < rdev->uvd.max_handles; ++i) {
333 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
334 if (handle != 0 && rdev->uvd.filp[i] == filp) {
349 rdev->uvd.filp[i] = NULL;
350 atomic_set(&rdev->uvd.handles[i], 0);
375 /* reference picture buffer */
387 /* reference picture buffer */
406 /* reference picture buffer */
412 /* reference picture buffer */
424 return -EINVAL;
429 return -EINVAL;
435 return -EINVAL;
455 if (p->rdev->family >= CHIP_PALM)
462 return -EINVAL;
476 return -EINVAL;
493 return -EINVAL;
507 for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
508 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
510 return -EINVAL;
513 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
514 p->rdev->uvd.filp[i] = p->filp;
515 p->rdev->uvd.img_size[i] = img_size;
521 return -EINVAL;
533 for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
534 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
535 if (p->rdev->uvd.filp[i] != p->filp) {
537 return -EINVAL;
544 return -ENOENT;
548 for (i = 0; i < p->rdev->uvd.max_handles; ++i)
549 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
558 return -EINVAL;
571 relocs_chunk = p->chunk_relocs;
574 if (idx >= relocs_chunk->length_dw) {
576 idx, relocs_chunk->length_dw);
577 return -EINVAL;
580 reloc = &p->relocs[(idx / 4)];
581 start = reloc->gpu_offset;
582 end = start + radeon_bo_size(reloc->robj);
585 p->ib.ptr[data0] = start & 0xFFFFFFFF;
586 p->ib.ptr[data1] = start >> 32;
588 cmd = radeon_get_ib_value(p, p->idx) >> 1;
593 return -EINVAL;
595 if ((end - start) < buf_sizes[cmd]) {
597 (unsigned)(end - start), buf_sizes[cmd]);
598 return -EINVAL;
603 return -EINVAL;
606 if ((start >> 28) != ((end - 1) >> 28)) {
607 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
609 return -EINVAL;
614 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
615 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
617 return -EINVAL;
622 DRM_ERROR("More than one message in a UVD-IB!\n");
623 return -EINVAL;
626 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
631 return -EINVAL;
645 p->idx++;
646 for (i = 0; i <= pkt->count; ++i) {
647 switch (pkt->reg + i*4) {
649 *data0 = p->idx;
652 *data1 = p->idx;
665 pkt->reg + i*4);
666 return -EINVAL;
668 p->idx++;
689 if (p->chunk_ib->length_dw % 16) {
691 p->chunk_ib->length_dw);
692 return -EINVAL;
695 if (p->chunk_relocs == NULL) {
697 return -EINVAL;
702 r = radeon_cs_packet_parse(p, &pkt, p->idx);
713 p->idx += pkt.count + 2;
717 return -EINVAL;
719 } while (p->idx < p->chunk_ib->length_dw);
722 DRM_ERROR("UVD-IBs need a msg command!\n");
723 return -EINVAL;
770 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
773 uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
774 uint64_t addr = rdev->uvd.gpu_addr + offs;
778 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
798 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
806 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
809 uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
810 uint64_t addr = rdev->uvd.gpu_addr + offs;
814 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
827 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
832 * radeon_uvd_count_handles - count number of open streams
848 for (i = 0; i < rdev->uvd.max_handles; ++i) {
849 if (!atomic_read(&rdev->uvd.handles[i]))
852 if (rdev->uvd.img_size[i] >= 720*576)
865 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
866 radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
867 &rdev->pm.dpm.hd);
873 schedule_delayed_work(&rdev->uvd.idle_work,
881 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
882 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
885 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
888 if ((rdev->pm.dpm.sd != sd) ||
889 (rdev->pm.dpm.hd != hd)) {
890 rdev->pm.dpm.sd = sd;
891 rdev->pm.dpm.hd = hd;
898 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
929 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
936 * @fb_factor: factor to multiply vco freq with
945 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
946 * Returns zero on success -EINVAL on error.
958 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
972 /* fb div out of range ? */
991 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
1006 return -EINVAL;
1037 return -ETIMEDOUT;