xref: /linux/drivers/staging/media/meson/vdec/vdec_helpers.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 BayLibre, SAS
4  * Author: Maxime Jourdan <mjourdan@baylibre.com>
5  */
6 
7 #include <linux/gcd.h>
8 #include <media/v4l2-mem2mem.h>
9 #include <media/v4l2-event.h>
10 #include <media/videobuf2-dma-contig.h>
11 
12 #include "vdec_helpers.h"
13 
14 #define NUM_CANVAS_NV12 2
15 #define NUM_CANVAS_YUV420 3
16 
amvdec_read_dos(struct amvdec_core * core,u32 reg)17 u32 amvdec_read_dos(struct amvdec_core *core, u32 reg)
18 {
19 	return readl_relaxed(core->dos_base + reg);
20 }
21 EXPORT_SYMBOL_GPL(amvdec_read_dos);
22 
amvdec_write_dos(struct amvdec_core * core,u32 reg,u32 val)23 void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val)
24 {
25 	writel_relaxed(val, core->dos_base + reg);
26 }
27 EXPORT_SYMBOL_GPL(amvdec_write_dos);
28 
amvdec_write_dos_bits(struct amvdec_core * core,u32 reg,u32 val)29 void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
30 {
31 	amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) | val);
32 }
33 EXPORT_SYMBOL_GPL(amvdec_write_dos_bits);
34 
amvdec_clear_dos_bits(struct amvdec_core * core,u32 reg,u32 val)35 void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
36 {
37 	amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) & ~val);
38 }
39 EXPORT_SYMBOL_GPL(amvdec_clear_dos_bits);
40 
amvdec_read_parser(struct amvdec_core * core,u32 reg)41 u32 amvdec_read_parser(struct amvdec_core *core, u32 reg)
42 {
43 	return readl_relaxed(core->esparser_base + reg);
44 }
45 EXPORT_SYMBOL_GPL(amvdec_read_parser);
46 
amvdec_write_parser(struct amvdec_core * core,u32 reg,u32 val)47 void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val)
48 {
49 	writel_relaxed(val, core->esparser_base + reg);
50 }
51 EXPORT_SYMBOL_GPL(amvdec_write_parser);
52 
53 /* 4 KiB per 64x32 block */
amvdec_am21c_body_size(u32 width,u32 height)54 u32 amvdec_am21c_body_size(u32 width, u32 height)
55 {
56 	u32 width_64 = ALIGN(width, 64) / 64;
57 	u32 height_32 = ALIGN(height, 32) / 32;
58 
59 	return SZ_4K * width_64 * height_32;
60 }
61 EXPORT_SYMBOL_GPL(amvdec_am21c_body_size);
62 
63 /* 32 bytes per 128x64 block */
amvdec_am21c_head_size(u32 width,u32 height)64 u32 amvdec_am21c_head_size(u32 width, u32 height)
65 {
66 	u32 width_128 = ALIGN(width, 128) / 128;
67 	u32 height_64 = ALIGN(height, 64) / 64;
68 
69 	return 32 * width_128 * height_64;
70 }
71 EXPORT_SYMBOL_GPL(amvdec_am21c_head_size);
72 
amvdec_am21c_size(u32 width,u32 height)73 u32 amvdec_am21c_size(u32 width, u32 height)
74 {
75 	return ALIGN(amvdec_am21c_body_size(width, height) +
76 		     amvdec_am21c_head_size(width, height), SZ_64K);
77 }
78 EXPORT_SYMBOL_GPL(amvdec_am21c_size);
79 
canvas_alloc(struct amvdec_session * sess,u8 * canvas_id)80 static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
81 {
82 	int ret;
83 
84 	if (sess->canvas_num >= MAX_CANVAS) {
85 		dev_err(sess->core->dev, "Reached max number of canvas\n");
86 		return -ENOMEM;
87 	}
88 
89 	ret = meson_canvas_alloc(sess->core->canvas, canvas_id);
90 	if (ret)
91 		return ret;
92 
93 	sess->canvas_alloc[sess->canvas_num++] = *canvas_id;
94 	return 0;
95 }
96 
set_canvas_yuv420m(struct amvdec_session * sess,struct vb2_buffer * vb,u32 width,u32 height,u32 reg)97 static int set_canvas_yuv420m(struct amvdec_session *sess,
98 			      struct vb2_buffer *vb, u32 width,
99 			      u32 height, u32 reg)
100 {
101 	struct amvdec_core *core = sess->core;
102 	u8 canvas_id[NUM_CANVAS_YUV420]; /* Y U V */
103 	dma_addr_t buf_paddr[NUM_CANVAS_YUV420]; /* Y U V */
104 	int ret, i;
105 
106 	for (i = 0; i < NUM_CANVAS_YUV420; ++i) {
107 		ret = canvas_alloc(sess, &canvas_id[i]);
108 		if (ret)
109 			return ret;
110 
111 		buf_paddr[i] =
112 		    vb2_dma_contig_plane_dma_addr(vb, i);
113 	}
114 
115 	/* Y plane */
116 	meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
117 			    width, height, MESON_CANVAS_WRAP_NONE,
118 			    MESON_CANVAS_BLKMODE_LINEAR,
119 			    MESON_CANVAS_ENDIAN_SWAP64);
120 
121 	/* U plane */
122 	meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
123 			    width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
124 			    MESON_CANVAS_BLKMODE_LINEAR,
125 			    MESON_CANVAS_ENDIAN_SWAP64);
126 
127 	/* V plane */
128 	meson_canvas_config(core->canvas, canvas_id[2], buf_paddr[2],
129 			    width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
130 			    MESON_CANVAS_BLKMODE_LINEAR,
131 			    MESON_CANVAS_ENDIAN_SWAP64);
132 
133 	amvdec_write_dos(core, reg,
134 			 ((canvas_id[2]) << 16) |
135 			 ((canvas_id[1]) << 8)  |
136 			 (canvas_id[0]));
137 
138 	return 0;
139 }
140 
set_canvas_nv12m(struct amvdec_session * sess,struct vb2_buffer * vb,u32 width,u32 height,u32 reg)141 static int set_canvas_nv12m(struct amvdec_session *sess,
142 			    struct vb2_buffer *vb, u32 width,
143 			    u32 height, u32 reg)
144 {
145 	struct amvdec_core *core = sess->core;
146 	u8 canvas_id[NUM_CANVAS_NV12]; /* Y U/V */
147 	dma_addr_t buf_paddr[NUM_CANVAS_NV12]; /* Y U/V */
148 	int ret, i;
149 
150 	for (i = 0; i < NUM_CANVAS_NV12; ++i) {
151 		ret = canvas_alloc(sess, &canvas_id[i]);
152 		if (ret)
153 			return ret;
154 
155 		buf_paddr[i] =
156 		    vb2_dma_contig_plane_dma_addr(vb, i);
157 	}
158 
159 	/* Y plane */
160 	meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
161 			    width, height, MESON_CANVAS_WRAP_NONE,
162 			    MESON_CANVAS_BLKMODE_LINEAR,
163 			    MESON_CANVAS_ENDIAN_SWAP64);
164 
165 	/* U/V plane */
166 	meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
167 			    width, height / 2, MESON_CANVAS_WRAP_NONE,
168 			    MESON_CANVAS_BLKMODE_LINEAR,
169 			    MESON_CANVAS_ENDIAN_SWAP64);
170 
171 	amvdec_write_dos(core, reg,
172 			 ((canvas_id[1]) << 16) |
173 			 ((canvas_id[1]) << 8)  |
174 			 (canvas_id[0]));
175 
176 	return 0;
177 }
178 
amvdec_set_canvases(struct amvdec_session * sess,u32 reg_base[],u32 reg_num[])179 int amvdec_set_canvases(struct amvdec_session *sess,
180 			u32 reg_base[], u32 reg_num[])
181 {
182 	struct v4l2_m2m_buffer *buf;
183 	u32 pixfmt = sess->pixfmt_cap;
184 	u32 width = ALIGN(sess->width, 32);
185 	u32 height = ALIGN(sess->height, 32);
186 	u32 reg_cur;
187 	u32 reg_num_cur = 0;
188 	u32 reg_base_cur = 0;
189 	int i = 0;
190 	int ret;
191 
192 	v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
193 		if (!reg_base[reg_base_cur])
194 			return -EINVAL;
195 
196 		reg_cur = reg_base[reg_base_cur] + reg_num_cur * 4;
197 
198 		switch (pixfmt) {
199 		case V4L2_PIX_FMT_NV12M:
200 			ret = set_canvas_nv12m(sess, &buf->vb.vb2_buf, width,
201 					       height, reg_cur);
202 			if (ret)
203 				return ret;
204 			break;
205 		case V4L2_PIX_FMT_YUV420M:
206 			ret = set_canvas_yuv420m(sess, &buf->vb.vb2_buf, width,
207 						 height, reg_cur);
208 			if (ret)
209 				return ret;
210 			break;
211 		default:
212 			dev_err(sess->core->dev, "Unsupported pixfmt %08X\n",
213 				pixfmt);
214 			return -EINVAL;
215 		}
216 
217 		reg_num_cur++;
218 		if (reg_num_cur >= reg_num[reg_base_cur]) {
219 			reg_base_cur++;
220 			reg_num_cur = 0;
221 		}
222 
223 		sess->fw_idx_to_vb2_idx[i++] = buf->vb.vb2_buf.index;
224 	}
225 
226 	return 0;
227 }
228 EXPORT_SYMBOL_GPL(amvdec_set_canvases);
229 
amvdec_add_ts(struct amvdec_session * sess,u64 ts,struct v4l2_timecode tc,u32 offset,u32 vbuf_flags)230 int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
231 		  struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
232 {
233 	struct amvdec_timestamp *new_ts;
234 	unsigned long flags;
235 
236 	new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
237 	if (!new_ts)
238 		return -ENOMEM;
239 
240 	new_ts->ts = ts;
241 	new_ts->tc = tc;
242 	new_ts->offset = offset;
243 	new_ts->flags = vbuf_flags;
244 
245 	spin_lock_irqsave(&sess->ts_spinlock, flags);
246 	list_add_tail(&new_ts->list, &sess->timestamps);
247 	spin_unlock_irqrestore(&sess->ts_spinlock, flags);
248 	return 0;
249 }
250 EXPORT_SYMBOL_GPL(amvdec_add_ts);
251 
amvdec_remove_ts(struct amvdec_session * sess,u64 ts)252 void amvdec_remove_ts(struct amvdec_session *sess, u64 ts)
253 {
254 	struct amvdec_timestamp *tmp;
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&sess->ts_spinlock, flags);
258 	list_for_each_entry(tmp, &sess->timestamps, list) {
259 		if (tmp->ts == ts) {
260 			list_del(&tmp->list);
261 			kfree(tmp);
262 			goto unlock;
263 		}
264 	}
265 	dev_warn(sess->core->dev_dec,
266 		 "Couldn't remove buffer with timestamp %llu from list\n", ts);
267 
268 unlock:
269 	spin_unlock_irqrestore(&sess->ts_spinlock, flags);
270 }
271 EXPORT_SYMBOL_GPL(amvdec_remove_ts);
272 
dst_buf_done(struct amvdec_session * sess,struct vb2_v4l2_buffer * vbuf,u32 field,u64 timestamp,struct v4l2_timecode timecode,u32 flags)273 static void dst_buf_done(struct amvdec_session *sess,
274 			 struct vb2_v4l2_buffer *vbuf,
275 			 u32 field, u64 timestamp,
276 			 struct v4l2_timecode timecode, u32 flags)
277 {
278 	struct device *dev = sess->core->dev_dec;
279 	u32 output_size = amvdec_get_output_size(sess);
280 
281 	switch (sess->pixfmt_cap) {
282 	case V4L2_PIX_FMT_NV12M:
283 		vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
284 		vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 2);
285 		break;
286 	case V4L2_PIX_FMT_YUV420M:
287 		vb2_set_plane_payload(&vbuf->vb2_buf, 0, output_size);
288 		vb2_set_plane_payload(&vbuf->vb2_buf, 1, output_size / 4);
289 		vb2_set_plane_payload(&vbuf->vb2_buf, 2, output_size / 4);
290 		break;
291 	}
292 
293 	vbuf->vb2_buf.timestamp = timestamp;
294 	vbuf->sequence = sess->sequence_cap++;
295 	vbuf->flags = flags;
296 	vbuf->timecode = timecode;
297 
298 	if (sess->should_stop &&
299 	    atomic_read(&sess->esparser_queued_bufs) <= 1) {
300 		const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
301 
302 		dev_dbg(dev, "Signaling EOS, sequence_cap = %u\n",
303 			sess->sequence_cap - 1);
304 		v4l2_event_queue_fh(&sess->fh, &ev);
305 		vbuf->flags |= V4L2_BUF_FLAG_LAST;
306 	} else if (sess->status == STATUS_NEEDS_RESUME) {
307 		/* Mark LAST for drained show frames during a source change */
308 		vbuf->flags |= V4L2_BUF_FLAG_LAST;
309 		sess->sequence_cap = 0;
310 	} else if (sess->should_stop)
311 		dev_dbg(dev, "should_stop, %u bufs remain\n",
312 			atomic_read(&sess->esparser_queued_bufs));
313 
314 	dev_dbg(dev, "Buffer %u done, ts = %llu, flags = %08X\n",
315 		vbuf->vb2_buf.index, timestamp, flags);
316 	vbuf->field = field;
317 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
318 
319 	/* Buffer done probably means the vififo got freed */
320 	schedule_work(&sess->esparser_queue_work);
321 }
322 
amvdec_dst_buf_done(struct amvdec_session * sess,struct vb2_v4l2_buffer * vbuf,u32 field)323 void amvdec_dst_buf_done(struct amvdec_session *sess,
324 			 struct vb2_v4l2_buffer *vbuf, u32 field)
325 {
326 	struct device *dev = sess->core->dev_dec;
327 	struct amvdec_timestamp *tmp;
328 	struct list_head *timestamps = &sess->timestamps;
329 	struct v4l2_timecode timecode;
330 	u64 timestamp;
331 	u32 vbuf_flags;
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&sess->ts_spinlock, flags);
335 	if (list_empty(timestamps)) {
336 		dev_err(dev, "Buffer %u done but list is empty\n",
337 			vbuf->vb2_buf.index);
338 
339 		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
340 		spin_unlock_irqrestore(&sess->ts_spinlock, flags);
341 		return;
342 	}
343 
344 	tmp = list_first_entry(timestamps, struct amvdec_timestamp, list);
345 	timestamp = tmp->ts;
346 	timecode = tmp->tc;
347 	vbuf_flags = tmp->flags;
348 	list_del(&tmp->list);
349 	kfree(tmp);
350 	spin_unlock_irqrestore(&sess->ts_spinlock, flags);
351 
352 	dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
353 	atomic_dec(&sess->esparser_queued_bufs);
354 }
355 EXPORT_SYMBOL_GPL(amvdec_dst_buf_done);
356 
amvdec_dst_buf_done_offset(struct amvdec_session * sess,struct vb2_v4l2_buffer * vbuf,u32 offset,u32 field,bool allow_drop)357 void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
358 				struct vb2_v4l2_buffer *vbuf,
359 				u32 offset, u32 field, bool allow_drop)
360 {
361 	struct device *dev = sess->core->dev_dec;
362 	struct amvdec_timestamp *match = NULL;
363 	struct amvdec_timestamp *tmp, *n;
364 	struct v4l2_timecode timecode = { 0 };
365 	u64 timestamp = 0;
366 	u32 vbuf_flags = 0;
367 	unsigned long flags;
368 
369 	spin_lock_irqsave(&sess->ts_spinlock, flags);
370 
371 	/* Look for our vififo offset to get the corresponding timestamp. */
372 	list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
373 		if (tmp->offset > offset) {
374 			/*
375 			 * Delete any record that remained unused for 32 match
376 			 * checks
377 			 */
378 			if (tmp->used_count++ >= 32) {
379 				list_del(&tmp->list);
380 				kfree(tmp);
381 			}
382 			break;
383 		}
384 
385 		match = tmp;
386 	}
387 
388 	if (!match) {
389 		dev_err(dev, "Buffer %u done but can't match offset (%08X)\n",
390 			vbuf->vb2_buf.index, offset);
391 	} else {
392 		timestamp = match->ts;
393 		timecode = match->tc;
394 		vbuf_flags = match->flags;
395 		list_del(&match->list);
396 		kfree(match);
397 	}
398 	spin_unlock_irqrestore(&sess->ts_spinlock, flags);
399 
400 	dst_buf_done(sess, vbuf, field, timestamp, timecode, vbuf_flags);
401 	if (match)
402 		atomic_dec(&sess->esparser_queued_bufs);
403 }
404 EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_offset);
405 
amvdec_dst_buf_done_idx(struct amvdec_session * sess,u32 buf_idx,u32 offset,u32 field)406 void amvdec_dst_buf_done_idx(struct amvdec_session *sess,
407 			     u32 buf_idx, u32 offset, u32 field)
408 {
409 	struct vb2_v4l2_buffer *vbuf;
410 	struct device *dev = sess->core->dev_dec;
411 
412 	vbuf = v4l2_m2m_dst_buf_remove_by_idx(sess->m2m_ctx,
413 					      sess->fw_idx_to_vb2_idx[buf_idx]);
414 
415 	if (!vbuf) {
416 		dev_err(dev,
417 			"Buffer %u done but it doesn't exist in m2m_ctx\n",
418 			buf_idx);
419 		return;
420 	}
421 
422 	if (offset != -1)
423 		amvdec_dst_buf_done_offset(sess, vbuf, offset, field, true);
424 	else
425 		amvdec_dst_buf_done(sess, vbuf, field);
426 }
427 EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_idx);
428 
amvdec_set_par_from_dar(struct amvdec_session * sess,u32 dar_num,u32 dar_den)429 void amvdec_set_par_from_dar(struct amvdec_session *sess,
430 			     u32 dar_num, u32 dar_den)
431 {
432 	u32 div;
433 
434 	sess->pixelaspect.numerator = sess->height * dar_num;
435 	sess->pixelaspect.denominator = sess->width * dar_den;
436 	div = gcd(sess->pixelaspect.numerator, sess->pixelaspect.denominator);
437 	sess->pixelaspect.numerator /= div;
438 	sess->pixelaspect.denominator /= div;
439 }
440 EXPORT_SYMBOL_GPL(amvdec_set_par_from_dar);
441 
amvdec_src_change(struct amvdec_session * sess,u32 width,u32 height,u32 dpb_size)442 void amvdec_src_change(struct amvdec_session *sess, u32 width,
443 		       u32 height, u32 dpb_size)
444 {
445 	static const struct v4l2_event ev = {
446 		.type = V4L2_EVENT_SOURCE_CHANGE,
447 		.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
448 
449 	v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size);
450 
451 	/*
452 	 * Check if the capture queue is already configured well for our
453 	 * usecase. If so, keep decoding with it and do not send the event
454 	 */
455 	if (sess->streamon_cap &&
456 	    sess->width == width &&
457 	    sess->height == height &&
458 	    dpb_size <= sess->num_dst_bufs) {
459 		sess->fmt_out->codec_ops->resume(sess);
460 		return;
461 	}
462 
463 	sess->changed_format = 0;
464 	sess->width = width;
465 	sess->height = height;
466 	sess->status = STATUS_NEEDS_RESUME;
467 
468 	dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB size %u\n",
469 		width, height, dpb_size);
470 	v4l2_event_queue_fh(&sess->fh, &ev);
471 }
472 EXPORT_SYMBOL_GPL(amvdec_src_change);
473 
amvdec_abort(struct amvdec_session * sess)474 void amvdec_abort(struct amvdec_session *sess)
475 {
476 	dev_info(sess->core->dev, "Aborting decoding session!\n");
477 	vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q);
478 	vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q);
479 }
480 EXPORT_SYMBOL_GPL(amvdec_abort);
481