Lines Matching +full:input +full:- +full:depth
1 // SPDX-License-Identifier: GPL-2.0-only
13 #include <linux/dma-mapping.h>
26 #define VPDMA_FIRMWARE "vpdma-1b8.bin"
32 .depth = 8,
37 .depth = 8,
42 .depth = 8,
47 .depth = 8,
52 .depth = 8,
57 .depth = 4,
62 .depth = 4,
67 .depth = 16,
72 .depth = 24,
77 .depth = 16,
82 .depth = 16,
87 .depth = 16,
96 .depth = 16,
101 .depth = 16,
106 .depth = 16,
111 .depth = 16,
116 .depth = 16,
121 .depth = 24,
126 .depth = 24,
131 .depth = 32,
136 .depth = 24,
141 .depth = 32,
146 .depth = 16,
151 .depth = 16,
156 .depth = 16,
161 .depth = 16,
166 .depth = 16,
171 .depth = 24,
176 .depth = 24,
181 .depth = 32,
186 .depth = 24,
191 .depth = 32,
197 * To handle RAW format we are re-using the CBY422
198 * vpdma data type so that we use the vpdma to re-order
209 .depth = 8,
214 .depth = 16,
223 .depth = 4,
282 return ioread32(vpdma->base + offset);
287 iowrite32(value, vpdma->base + offset);
309 struct device *dev = &vpdma->pdev->dev;
311 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
365 buf->size = size;
366 buf->mapped = false;
367 buf->addr = kzalloc(size, GFP_KERNEL);
368 if (!buf->addr)
369 return -ENOMEM;
371 WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
379 WARN_ON(buf->mapped);
380 kfree(buf->addr);
381 buf->addr = NULL;
382 buf->size = 0;
391 struct device *dev = &vpdma->pdev->dev;
393 WARN_ON(buf->mapped);
394 buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
396 if (dma_mapping_error(dev, buf->dma_addr)) {
398 return -EINVAL;
401 buf->mapped = true;
413 struct device *dev = &vpdma->pdev->dev;
415 if (buf->mapped)
416 dma_unmap_single(dev, buf->dma_addr, buf->size,
419 buf->mapped = false;
458 while (vpdma_list_busy(vpdma, list_num) && --timeout)
462 dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
463 ret = -EBUSY;
485 r = vpdma_alloc_desc_buf(&list->buf, size);
489 list->next = list->buf.addr;
491 list->type = type;
503 list->next = list->buf.addr;
513 vpdma_free_desc_buf(&list->buf);
515 list->next = NULL;
535 return -EBUSY;
537 /* 16-byte granularity */
538 list_size = (list->next - list->buf.addr) >> 4;
540 spin_lock_irqsave(&vpdma->lock, flags);
541 write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
545 (list->type << VPDMA_LIST_TYPE_SHFT) |
547 spin_unlock_irqrestore(&vpdma->lock, flags);
562 write_field_reg(vpdma, reg_addr, width - 1,
565 write_field_reg(vpdma, reg_addr, height - 1,
583 cfd->dest_addr_offset);
586 pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
588 pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
605 int len = blk->size;
607 WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
609 cfd = list->next;
610 WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
612 cfd->dest_addr_offset = dest_offset;
613 cfd->block_len = len;
614 cfd->payload_addr = (u32) blk->dma_addr;
615 cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
618 list->next = cfd + 1;
633 unsigned int len = adb->size;
636 WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
638 cfd = list->next;
639 BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
641 cfd->w0 = 0;
642 cfd->w1 = 0;
643 cfd->payload_addr = (u32) adb->dma_addr;
644 cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
647 list->next = cfd + 1;
676 ctd = list->next;
677 WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
679 ctd->w0 = 0;
680 ctd->w1 = 0;
681 ctd->w2 = 0;
682 ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
685 list->next = ctd + 1;
701 ctd = list->next;
702 WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
704 ctd->w0 = 0;
705 ctd->w1 = 0;
706 ctd->w2 = 0;
707 ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
710 list->next = ctd + 1;
735 pr_debug("word2: start_addr = %x\n", dtd->start_addr);
757 pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
758 pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
795 int depth = fmt->depth;
800 if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
801 (fmt->data_type == DATA_TYPE_C420 ||
802 fmt->data_type == DATA_TYPE_CB420)) {
805 depth = 8;
808 dma_addr += rect.top * stride + (rect.left * depth >> 3);
810 dtd = list->next;
811 WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
813 dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
820 dtd->w1 = 0;
821 dtd->start_addr = (u32) dma_addr;
822 dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
824 dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
825 dtd->max_width_height = dtd_max_width_height(max_w, max_h);
826 dtd->client_attr0 = 0;
827 dtd->client_attr1 = 0;
829 list->next = dtd + 1;
841 * @c_rect: crop params of input image
845 * field: top or bottom field info of the input image
862 int depth = fmt->depth;
869 if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
870 (fmt->data_type == DATA_TYPE_C420 ||
871 fmt->data_type == DATA_TYPE_CB420)) {
874 depth = 8;
877 dma_addr += rect.top * stride + (rect.left * depth >> 3);
879 dtd = list->next;
880 WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
882 dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
890 dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
892 dtd->start_addr = (u32) dma_addr;
893 dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
895 dtd->frame_width_height = dtd_frame_width_height(frame_width,
897 dtd->start_h_v = dtd_start_h_v(start_h, start_v);
898 dtd->client_attr0 = 0;
899 dtd->client_attr1 = 0;
901 list->next = dtd + 1;
909 int i, list_num = -1;
912 spin_lock_irqsave(&vpdma->lock, flags);
913 for (i = 0; i < VPDMA_MAX_NUM_LIST && vpdma->hwlist_used[i]; i++)
918 vpdma->hwlist_used[i] = true;
919 vpdma->hwlist_priv[i] = priv;
921 spin_unlock_irqrestore(&vpdma->lock, flags);
932 return vpdma->hwlist_priv[list_num];
941 spin_lock_irqsave(&vpdma->lock, flags);
942 vpdma->hwlist_used[list_num] = false;
943 priv = vpdma->hwlist_priv;
944 spin_unlock_irqrestore(&vpdma->lock, flags);
997 if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
999 else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
1040 dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
1042 if (!f || !f->data) {
1043 dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
1050 vpdma->cb(vpdma->pdev);
1054 r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
1056 dev_err(&vpdma->pdev->dev,
1061 memcpy(fw_dma_buf.addr, f->data, f->size);
1076 dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
1080 vpdma->cb(vpdma->pdev);
1093 struct device *dev = &vpdma->pdev->dev;
1114 dev_dbg(&pdev->dev, "vpdma_create\n");
1116 vpdma->pdev = pdev;
1117 vpdma->cb = cb;
1118 spin_lock_init(&vpdma->lock);
1122 dev_err(&pdev->dev, "missing platform resources data\n");
1123 return -ENODEV;
1126 vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1127 if (!vpdma->base) {
1128 dev_err(&pdev->dev, "failed to ioremap\n");
1129 return -ENOMEM;