1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd 4 * Author: Jacob Chen <jacob-chen@iotwrt.com> 5 */ 6 7 #include <linux/pm_runtime.h> 8 #include <linux/scatterlist.h> 9 10 #include <media/v4l2-common.h> 11 #include <media/v4l2-device.h> 12 #include <media/v4l2-ioctl.h> 13 #include <media/v4l2-mem2mem.h> 14 #include <media/videobuf2-dma-sg.h> 15 #include <media/videobuf2-v4l2.h> 16 17 #include "rga-hw.h" 18 #include "rga.h" 19 20 static ssize_t fill_descriptors(struct rga_dma_desc *desc, size_t max_desc, 21 struct sg_table *sgt) 22 { 23 struct sg_dma_page_iter iter; 24 struct rga_dma_desc *tmp = desc; 25 size_t n_desc = 0; 26 dma_addr_t addr; 27 28 for_each_sgtable_dma_page(sgt, &iter, 0) { 29 if (n_desc > max_desc) 30 return -EINVAL; 31 addr = sg_page_iter_dma_address(&iter); 32 tmp->addr = lower_32_bits(addr); 33 tmp++; 34 n_desc++; 35 } 36 37 return n_desc; 38 } 39 40 static int 41 rga_queue_setup(struct vb2_queue *vq, 42 unsigned int *nbuffers, unsigned int *nplanes, 43 unsigned int sizes[], struct device *alloc_devs[]) 44 { 45 struct rga_ctx *ctx = vb2_get_drv_priv(vq); 46 struct rga_frame *f = rga_get_frame(ctx, vq->type); 47 const struct v4l2_pix_format_mplane *pix_fmt; 48 int i; 49 50 if (IS_ERR(f)) 51 return PTR_ERR(f); 52 53 pix_fmt = &f->pix; 54 55 if (*nplanes) { 56 if (*nplanes != pix_fmt->num_planes) 57 return -EINVAL; 58 59 for (i = 0; i < pix_fmt->num_planes; i++) 60 if (sizes[i] < pix_fmt->plane_fmt[i].sizeimage) 61 return -EINVAL; 62 63 return 0; 64 } 65 66 *nplanes = pix_fmt->num_planes; 67 68 for (i = 0; i < pix_fmt->num_planes; i++) 69 sizes[i] = pix_fmt->plane_fmt[i].sizeimage; 70 71 return 0; 72 } 73 74 static int rga_buf_init(struct vb2_buffer *vb) 75 { 76 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 77 struct rga_vb_buffer *rbuf = vb_to_rga(vbuf); 78 struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 79 struct rockchip_rga *rga = ctx->rga; 80 struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type); 81 size_t n_desc = 0; 82 83 n_desc = DIV_ROUND_UP(f->size, PAGE_SIZE); 84 85 rbuf->n_desc = n_desc; 86 rbuf->dma_desc = dma_alloc_coherent(rga->dev, 87 rbuf->n_desc * sizeof(*rbuf->dma_desc), 88 &rbuf->dma_desc_pa, GFP_KERNEL); 89 if (!rbuf->dma_desc) 90 return -ENOMEM; 91 92 return 0; 93 } 94 95 static int get_plane_offset(struct rga_frame *f, int plane) 96 { 97 if (plane == 0) 98 return 0; 99 if (plane == 1) 100 return f->width * f->height; 101 if (plane == 2) 102 return f->width * f->height + (f->width * f->height / f->fmt->uv_factor); 103 104 return -EINVAL; 105 } 106 107 static int rga_buf_prepare(struct vb2_buffer *vb) 108 { 109 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 110 struct rga_vb_buffer *rbuf = vb_to_rga(vbuf); 111 struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 112 struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type); 113 ssize_t n_desc = 0; 114 size_t curr_desc = 0; 115 int i; 116 const struct v4l2_format_info *info; 117 unsigned int offsets[VIDEO_MAX_PLANES]; 118 119 if (IS_ERR(f)) 120 return PTR_ERR(f); 121 122 for (i = 0; i < vb->num_planes; i++) { 123 vb2_set_plane_payload(vb, i, f->pix.plane_fmt[i].sizeimage); 124 125 /* Create local MMU table for RGA */ 126 n_desc = fill_descriptors(&rbuf->dma_desc[curr_desc], 127 rbuf->n_desc - curr_desc, 128 vb2_dma_sg_plane_desc(vb, i)); 129 if (n_desc < 0) { 130 v4l2_err(&ctx->rga->v4l2_dev, 131 "Failed to map video buffer to RGA\n"); 132 return n_desc; 133 } 134 offsets[i] = curr_desc << PAGE_SHIFT; 135 curr_desc += n_desc; 136 } 137 138 /* Fill the remaining planes */ 139 info = v4l2_format_info(f->fmt->fourcc); 140 for (i = info->mem_planes; i < info->comp_planes; i++) 141 offsets[i] = get_plane_offset(f, i); 142 143 rbuf->offset.y_off = offsets[0]; 144 rbuf->offset.u_off = offsets[1]; 145 rbuf->offset.v_off = offsets[2]; 146 147 return 0; 148 } 149 150 static void rga_buf_queue(struct vb2_buffer *vb) 151 { 152 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 153 struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 154 155 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); 156 } 157 158 static void rga_buf_cleanup(struct vb2_buffer *vb) 159 { 160 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 161 struct rga_vb_buffer *rbuf = vb_to_rga(vbuf); 162 struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 163 struct rockchip_rga *rga = ctx->rga; 164 165 dma_free_coherent(rga->dev, rbuf->n_desc * sizeof(*rbuf->dma_desc), 166 rbuf->dma_desc, rbuf->dma_desc_pa); 167 } 168 169 static void rga_buf_return_buffers(struct vb2_queue *q, 170 enum vb2_buffer_state state) 171 { 172 struct rga_ctx *ctx = vb2_get_drv_priv(q); 173 struct vb2_v4l2_buffer *vbuf; 174 175 for (;;) { 176 if (V4L2_TYPE_IS_OUTPUT(q->type)) 177 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 178 else 179 vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 180 if (!vbuf) 181 break; 182 v4l2_m2m_buf_done(vbuf, state); 183 } 184 } 185 186 static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count) 187 { 188 struct rga_ctx *ctx = vb2_get_drv_priv(q); 189 struct rockchip_rga *rga = ctx->rga; 190 int ret; 191 192 ret = pm_runtime_resume_and_get(rga->dev); 193 if (ret < 0) { 194 rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED); 195 return ret; 196 } 197 198 if (V4L2_TYPE_IS_OUTPUT(q->type)) 199 ctx->osequence = 0; 200 else 201 ctx->csequence = 0; 202 203 return 0; 204 } 205 206 static void rga_buf_stop_streaming(struct vb2_queue *q) 207 { 208 struct rga_ctx *ctx = vb2_get_drv_priv(q); 209 struct rockchip_rga *rga = ctx->rga; 210 211 rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR); 212 pm_runtime_put(rga->dev); 213 } 214 215 const struct vb2_ops rga_qops = { 216 .queue_setup = rga_queue_setup, 217 .buf_init = rga_buf_init, 218 .buf_prepare = rga_buf_prepare, 219 .buf_queue = rga_buf_queue, 220 .buf_cleanup = rga_buf_cleanup, 221 .wait_prepare = vb2_ops_wait_prepare, 222 .wait_finish = vb2_ops_wait_finish, 223 .start_streaming = rga_buf_start_streaming, 224 .stop_streaming = rga_buf_stop_streaming, 225 }; 226