xref: /linux/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Wave5 series multi-standard codec IP - helper functions
4  *
5  * Copyright (C) 2021-2023 CHIPS&MEDIA INC
6  */
7 
8 #include <linux/bug.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/delay.h>
11 #include "wave5-vpuapi.h"
12 #include "wave5-regdefine.h"
13 #include "wave5.h"
14 
15 #define DECODE_ALL_TEMPORAL_LAYERS 0
16 #define DECODE_ALL_SPATIAL_LAYERS 0
17 
18 static int wave5_initialize_vpu(struct device *dev, u8 *code, size_t size)
19 {
20 	int ret;
21 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
22 
23 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
24 	if (ret)
25 		return ret;
26 
27 	if (wave5_vpu_is_init(vpu_dev)) {
28 		wave5_vpu_re_init(dev, (void *)code, size);
29 		ret = -EBUSY;
30 		goto err_out;
31 	}
32 
33 	ret = wave5_vpu_reset(dev, SW_RESET_ON_BOOT);
34 	if (ret)
35 		goto err_out;
36 
37 	ret = wave5_vpu_init(dev, (void *)code, size);
38 
39 err_out:
40 	mutex_unlock(&vpu_dev->hw_lock);
41 	return ret;
42 }
43 
44 int wave5_vpu_init_with_bitcode(struct device *dev, u8 *bitcode, size_t size)
45 {
46 	if (!bitcode || size == 0)
47 		return -EINVAL;
48 
49 	return wave5_initialize_vpu(dev, bitcode, size);
50 }
51 
52 int wave5_vpu_flush_instance(struct vpu_instance *inst)
53 {
54 	int ret = 0;
55 	int mutex_ret = 0;
56 	int retry = 0;
57 
58 	mutex_ret = mutex_lock_interruptible(&inst->dev->hw_lock);
59 	if (mutex_ret)
60 		return mutex_ret;
61 	do {
62 		/*
63 		 * Repeat the FLUSH command until the firmware reports that the
64 		 * VPU isn't running anymore
65 		 */
66 		ret = wave5_vpu_hw_flush_instance(inst);
67 		if (ret < 0 && ret != -EBUSY) {
68 			dev_warn(inst->dev->dev, "Flush of %s instance with id: %d fail: %d\n",
69 				 inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id,
70 				 ret);
71 			mutex_unlock(&inst->dev->hw_lock);
72 			return ret;
73 		}
74 		if (ret == -EBUSY && retry++ >= MAX_FIRMWARE_CALL_RETRY) {
75 			dev_warn(inst->dev->dev, "Flush of %s instance with id: %d timed out!\n",
76 				 inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id);
77 			mutex_unlock(&inst->dev->hw_lock);
78 			return -ETIMEDOUT;
79 		} else if (ret == -EBUSY) {
80 			struct dec_output_info dec_info;
81 
82 			mutex_unlock(&inst->dev->hw_lock);
83 			wave5_vpu_dec_get_output_info(inst, &dec_info);
84 			mutex_ret = mutex_lock_interruptible(&inst->dev->hw_lock);
85 			if (mutex_ret)
86 				return mutex_ret;
87 			if (dec_info.index_frame_display >= 0) {
88 				mutex_unlock(&inst->dev->hw_lock);
89 				wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
90 				mutex_ret = mutex_lock_interruptible(&inst->dev->hw_lock);
91 				if (mutex_ret)
92 					return mutex_ret;
93 			}
94 		}
95 	} while (ret != 0);
96 	mutex_unlock(&inst->dev->hw_lock);
97 
98 	return ret;
99 }
100 
101 int wave5_vpu_get_version_info(struct device *dev, u32 *revision, unsigned int *product_id)
102 {
103 	int ret;
104 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
105 
106 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
107 	if (ret)
108 		return ret;
109 
110 	if (!wave5_vpu_is_init(vpu_dev)) {
111 		ret = -EINVAL;
112 		goto err_out;
113 	}
114 
115 	if (product_id)
116 		*product_id = vpu_dev->product;
117 	ret = wave5_vpu_get_version(vpu_dev, revision);
118 
119 err_out:
120 	mutex_unlock(&vpu_dev->hw_lock);
121 	return ret;
122 }
123 
124 static int wave5_check_dec_open_param(struct vpu_instance *inst, struct dec_open_param *param)
125 {
126 	if (inst->id >= MAX_NUM_INSTANCE) {
127 		dev_err(inst->dev->dev, "Too many simultaneous instances: %d (max: %u)\n",
128 			inst->id, MAX_NUM_INSTANCE);
129 		return -EOPNOTSUPP;
130 	}
131 
132 	if (param->bitstream_buffer % 8) {
133 		dev_err(inst->dev->dev,
134 			"Bitstream buffer must be aligned to a multiple of 8\n");
135 		return -EINVAL;
136 	}
137 
138 	if (param->bitstream_buffer_size % 1024 ||
139 	    param->bitstream_buffer_size < MIN_BITSTREAM_BUFFER_SIZE) {
140 		dev_err(inst->dev->dev,
141 			"Bitstream buffer size must be aligned to a multiple of 1024 and have a minimum size of %d\n",
142 			MIN_BITSTREAM_BUFFER_SIZE);
143 		return -EINVAL;
144 	}
145 
146 	return 0;
147 }
148 
149 int wave5_vpu_dec_open(struct vpu_instance *inst, struct dec_open_param *open_param)
150 {
151 	struct dec_info *p_dec_info;
152 	int ret;
153 	struct vpu_device *vpu_dev = inst->dev;
154 	dma_addr_t buffer_addr;
155 	size_t buffer_size;
156 
157 	ret = wave5_check_dec_open_param(inst, open_param);
158 	if (ret)
159 		return ret;
160 
161 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
162 	if (ret)
163 		return ret;
164 
165 	if (!wave5_vpu_is_init(vpu_dev)) {
166 		mutex_unlock(&vpu_dev->hw_lock);
167 		return -ENODEV;
168 	}
169 
170 	p_dec_info = &inst->codec_info->dec_info;
171 	memcpy(&p_dec_info->open_param, open_param, sizeof(struct dec_open_param));
172 
173 	buffer_addr = open_param->bitstream_buffer;
174 	buffer_size = open_param->bitstream_buffer_size;
175 	p_dec_info->stream_wr_ptr = buffer_addr;
176 	p_dec_info->stream_rd_ptr = buffer_addr;
177 	p_dec_info->stream_buf_start_addr = buffer_addr;
178 	p_dec_info->stream_buf_size = buffer_size;
179 	p_dec_info->stream_buf_end_addr = buffer_addr + buffer_size;
180 	p_dec_info->reorder_enable = TRUE;
181 	p_dec_info->temp_id_select_mode = TEMPORAL_ID_MODE_ABSOLUTE;
182 	p_dec_info->target_temp_id = DECODE_ALL_TEMPORAL_LAYERS;
183 	p_dec_info->target_spatial_id = DECODE_ALL_SPATIAL_LAYERS;
184 
185 	ret = wave5_vpu_build_up_dec_param(inst, open_param);
186 	mutex_unlock(&vpu_dev->hw_lock);
187 
188 	return ret;
189 }
190 
191 static int reset_auxiliary_buffers(struct vpu_instance *inst, unsigned int index)
192 {
193 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
194 
195 	if (index >= MAX_REG_FRAME)
196 		return 1;
197 
198 	if (p_dec_info->vb_mv[index].size == 0 && p_dec_info->vb_fbc_y_tbl[index].size == 0 &&
199 	    p_dec_info->vb_fbc_c_tbl[index].size == 0)
200 		return 1;
201 
202 	wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_mv[index]);
203 	wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_y_tbl[index]);
204 	wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_c_tbl[index]);
205 
206 	return 0;
207 }
208 
209 int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res)
210 {
211 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
212 	int ret;
213 	int retry = 0;
214 	struct vpu_device *vpu_dev = inst->dev;
215 	int i;
216 	struct dec_output_info dec_info;
217 	int ret_mutex;
218 
219 	*fail_res = 0;
220 	if (!inst->codec_info)
221 		return -EINVAL;
222 
223 	pm_runtime_resume_and_get(inst->dev->dev);
224 
225 	ret_mutex = mutex_lock_interruptible(&vpu_dev->hw_lock);
226 	if (ret_mutex) {
227 		pm_runtime_put_sync(inst->dev->dev);
228 		return ret_mutex;
229 	}
230 
231 	do {
232 		ret = wave5_vpu_dec_finish_seq(inst, fail_res);
233 		if (ret < 0 && *fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
234 			dev_warn(inst->dev->dev, "dec_finish_seq timed out\n");
235 			goto unlock_and_return;
236 		}
237 
238 		if (ret == 0)
239 			break;
240 
241 		if (*fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
242 			dev_warn(inst->dev->dev, "dec_finish_seq timed out\n");
243 			goto unlock_and_return;
244 		}
245 
246 		if (retry++ >= MAX_FIRMWARE_CALL_RETRY) {
247 			ret = -ETIMEDOUT;
248 			goto unlock_and_return;
249 		}
250 
251 		mutex_unlock(&vpu_dev->hw_lock);
252 		wave5_vpu_dec_get_output_info(inst, &dec_info);
253 		ret_mutex = mutex_lock_interruptible(&vpu_dev->hw_lock);
254 		if (ret_mutex) {
255 			pm_runtime_put_sync(inst->dev->dev);
256 			return ret_mutex;
257 		}
258 	} while (ret != 0);
259 
260 	dev_dbg(inst->dev->dev, "%s: dec_finish_seq complete\n", __func__);
261 
262 	wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
263 
264 	for (i = 0 ; i < MAX_REG_FRAME; i++) {
265 		ret = reset_auxiliary_buffers(inst, i);
266 		if (ret) {
267 			ret = 0;
268 			break;
269 		}
270 	}
271 
272 	wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_task);
273 
274 	mutex_destroy(&inst->feed_lock);
275 
276 unlock_and_return:
277 	mutex_unlock(&vpu_dev->hw_lock);
278 	pm_runtime_put_sync(inst->dev->dev);
279 	return ret;
280 }
281 
282 int wave5_vpu_dec_issue_seq_init(struct vpu_instance *inst)
283 {
284 	int ret;
285 	struct vpu_device *vpu_dev = inst->dev;
286 
287 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
288 	if (ret)
289 		return ret;
290 
291 	ret = wave5_vpu_dec_init_seq(inst);
292 
293 	mutex_unlock(&vpu_dev->hw_lock);
294 
295 	return ret;
296 }
297 
298 int wave5_vpu_dec_complete_seq_init(struct vpu_instance *inst, struct dec_initial_info *info)
299 {
300 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
301 	int ret;
302 	struct vpu_device *vpu_dev = inst->dev;
303 
304 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
305 	if (ret)
306 		return ret;
307 
308 	ret = wave5_vpu_dec_get_seq_info(inst, info);
309 	if (!ret)
310 		p_dec_info->initial_info_obtained = true;
311 
312 	info->rd_ptr = wave5_dec_get_rd_ptr(inst);
313 	info->wr_ptr = p_dec_info->stream_wr_ptr;
314 
315 	p_dec_info->initial_info = *info;
316 
317 	mutex_unlock(&vpu_dev->hw_lock);
318 
319 	return ret;
320 }
321 
322 int wave5_vpu_dec_register_frame_buffer_ex(struct vpu_instance *inst, int num_of_decoding_fbs,
323 					   int num_of_display_fbs, int stride, int height)
324 {
325 	struct dec_info *p_dec_info;
326 	int ret;
327 	struct vpu_device *vpu_dev = inst->dev;
328 	struct frame_buffer *fb;
329 
330 	if (num_of_decoding_fbs >= WAVE5_MAX_FBS || num_of_display_fbs >= WAVE5_MAX_FBS)
331 		return -EINVAL;
332 
333 	p_dec_info = &inst->codec_info->dec_info;
334 	p_dec_info->num_of_decoding_fbs = num_of_decoding_fbs;
335 	p_dec_info->num_of_display_fbs = num_of_display_fbs;
336 	p_dec_info->stride = stride;
337 
338 	if (!p_dec_info->initial_info_obtained)
339 		return -EINVAL;
340 
341 	if (stride < p_dec_info->initial_info.pic_width || (stride % 8 != 0) ||
342 	    height < p_dec_info->initial_info.pic_height)
343 		return -EINVAL;
344 
345 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
346 	if (ret)
347 		return ret;
348 
349 	fb = inst->frame_buf;
350 	ret = wave5_vpu_dec_register_framebuffer(inst, &fb[p_dec_info->num_of_decoding_fbs],
351 						 LINEAR_FRAME_MAP, p_dec_info->num_of_display_fbs);
352 	if (ret)
353 		goto err_out;
354 
355 	ret = wave5_vpu_dec_register_framebuffer(inst, &fb[0], COMPRESSED_FRAME_MAP,
356 						 p_dec_info->num_of_decoding_fbs);
357 
358 err_out:
359 	mutex_unlock(&vpu_dev->hw_lock);
360 
361 	return ret;
362 }
363 
364 int wave5_vpu_dec_get_bitstream_buffer(struct vpu_instance *inst, dma_addr_t *prd_ptr,
365 				       dma_addr_t *pwr_ptr, size_t *size)
366 {
367 	struct dec_info *p_dec_info;
368 	dma_addr_t rd_ptr;
369 	dma_addr_t wr_ptr;
370 	int room;
371 	struct vpu_device *vpu_dev = inst->dev;
372 	int ret;
373 
374 	p_dec_info = &inst->codec_info->dec_info;
375 
376 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
377 	if (ret)
378 		return ret;
379 	rd_ptr = wave5_dec_get_rd_ptr(inst);
380 	mutex_unlock(&vpu_dev->hw_lock);
381 
382 	wr_ptr = p_dec_info->stream_wr_ptr;
383 
384 	if (wr_ptr < rd_ptr)
385 		room = rd_ptr - wr_ptr;
386 	else
387 		room = (p_dec_info->stream_buf_end_addr - wr_ptr) +
388 			(rd_ptr - p_dec_info->stream_buf_start_addr);
389 	room--;
390 
391 	if (prd_ptr)
392 		*prd_ptr = rd_ptr;
393 	if (pwr_ptr)
394 		*pwr_ptr = wr_ptr;
395 	if (size)
396 		*size = room;
397 
398 	return 0;
399 }
400 
401 int wave5_vpu_dec_update_bitstream_buffer(struct vpu_instance *inst, size_t size)
402 {
403 	struct dec_info *p_dec_info;
404 	dma_addr_t wr_ptr;
405 	dma_addr_t rd_ptr;
406 	int ret;
407 	struct vpu_device *vpu_dev = inst->dev;
408 
409 	if (!inst->codec_info)
410 		return -EINVAL;
411 
412 	p_dec_info = &inst->codec_info->dec_info;
413 	wr_ptr = p_dec_info->stream_wr_ptr;
414 	rd_ptr = p_dec_info->stream_rd_ptr;
415 
416 	if (size > 0) {
417 		if (wr_ptr < rd_ptr && rd_ptr <= wr_ptr + size)
418 			return -EINVAL;
419 
420 		wr_ptr += size;
421 
422 		if (wr_ptr > p_dec_info->stream_buf_end_addr) {
423 			u32 room = wr_ptr - p_dec_info->stream_buf_end_addr;
424 
425 			wr_ptr = p_dec_info->stream_buf_start_addr;
426 			wr_ptr += room;
427 		} else if (wr_ptr == p_dec_info->stream_buf_end_addr) {
428 			wr_ptr = p_dec_info->stream_buf_start_addr;
429 		}
430 
431 		p_dec_info->stream_wr_ptr = wr_ptr;
432 		p_dec_info->stream_rd_ptr = rd_ptr;
433 	}
434 
435 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
436 	if (ret)
437 		return ret;
438 	ret = wave5_vpu_dec_set_bitstream_flag(inst, (size == 0));
439 	mutex_unlock(&vpu_dev->hw_lock);
440 
441 	return ret;
442 }
443 
444 int wave5_vpu_dec_start_one_frame(struct vpu_instance *inst, u32 *res_fail)
445 {
446 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
447 	int ret;
448 	struct vpu_device *vpu_dev = inst->dev;
449 
450 	if (p_dec_info->stride == 0) /* this means frame buffers have not been registered. */
451 		return -EINVAL;
452 
453 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
454 	if (ret)
455 		return ret;
456 
457 	ret = wave5_vpu_decode(inst, res_fail);
458 
459 	mutex_unlock(&vpu_dev->hw_lock);
460 
461 	return ret;
462 }
463 
464 int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr, int update_wr_ptr)
465 {
466 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
467 	int ret;
468 	struct vpu_device *vpu_dev = inst->dev;
469 
470 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
471 	if (ret)
472 		return ret;
473 
474 	ret = wave5_dec_set_rd_ptr(inst, addr);
475 
476 	p_dec_info->stream_rd_ptr = addr;
477 	if (update_wr_ptr)
478 		p_dec_info->stream_wr_ptr = addr;
479 
480 	mutex_unlock(&vpu_dev->hw_lock);
481 
482 	return ret;
483 }
484 
485 dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst)
486 {
487 	int ret;
488 	dma_addr_t rd_ptr = 0;
489 
490 	ret = mutex_lock_interruptible(&inst->dev->hw_lock);
491 	if (ret)
492 		return rd_ptr;
493 
494 	rd_ptr = wave5_dec_get_rd_ptr(inst);
495 
496 	mutex_unlock(&inst->dev->hw_lock);
497 
498 	return rd_ptr;
499 }
500 
501 int wave5_vpu_dec_get_output_info(struct vpu_instance *inst, struct dec_output_info *info)
502 {
503 	struct dec_info *p_dec_info;
504 	int ret;
505 	struct vpu_rect rect_info;
506 	u32 val;
507 	u32 decoded_index;
508 	u32 disp_idx;
509 	u32 max_dec_index;
510 	struct vpu_device *vpu_dev = inst->dev;
511 	struct dec_output_info *disp_info;
512 
513 	if (WARN_ON(!info))
514 		return -EINVAL;
515 
516 	p_dec_info = &inst->codec_info->dec_info;
517 
518 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
519 	if (ret)
520 		return ret;
521 
522 	memset(info, 0, sizeof(*info));
523 
524 	ret = wave5_vpu_dec_get_result(inst, info);
525 	if (ret) {
526 		info->rd_ptr = p_dec_info->stream_rd_ptr;
527 		info->wr_ptr = p_dec_info->stream_wr_ptr;
528 		goto err_out;
529 	}
530 
531 	decoded_index = info->index_frame_decoded;
532 
533 	/* calculate display frame region */
534 	val = 0;
535 	rect_info.left = 0;
536 	rect_info.right = 0;
537 	rect_info.top = 0;
538 	rect_info.bottom = 0;
539 
540 	if (decoded_index < WAVE5_MAX_FBS) {
541 		if (inst->std == W_HEVC_DEC || inst->std == W_AVC_DEC)
542 			rect_info = p_dec_info->initial_info.pic_crop_rect;
543 
544 		if (inst->std == W_HEVC_DEC)
545 			p_dec_info->dec_out_info[decoded_index].decoded_poc = info->decoded_poc;
546 
547 		p_dec_info->dec_out_info[decoded_index].rc_decoded = rect_info;
548 	}
549 	info->rc_decoded = rect_info;
550 
551 	disp_idx = info->index_frame_display;
552 	if (info->index_frame_display >= 0 && info->index_frame_display < WAVE5_MAX_FBS) {
553 		disp_info = &p_dec_info->dec_out_info[disp_idx];
554 		if (info->index_frame_display != info->index_frame_decoded) {
555 			/*
556 			 * when index_frame_decoded < 0, and index_frame_display >= 0
557 			 * info->dec_pic_width and info->dec_pic_height are still valid
558 			 * but those of p_dec_info->dec_out_info[disp_idx] are invalid in VP9
559 			 */
560 			info->disp_pic_width = disp_info->dec_pic_width;
561 			info->disp_pic_height = disp_info->dec_pic_height;
562 		} else {
563 			info->disp_pic_width = info->dec_pic_width;
564 			info->disp_pic_height = info->dec_pic_height;
565 		}
566 
567 		info->rc_display = disp_info->rc_decoded;
568 
569 	} else {
570 		info->rc_display.left = 0;
571 		info->rc_display.right = 0;
572 		info->rc_display.top = 0;
573 		info->rc_display.bottom = 0;
574 		info->disp_pic_width = 0;
575 		info->disp_pic_height = 0;
576 	}
577 
578 	p_dec_info->stream_rd_ptr = wave5_dec_get_rd_ptr(inst);
579 	p_dec_info->frame_display_flag = vpu_read_reg(vpu_dev, W5_RET_DEC_DISP_IDC);
580 
581 	val = p_dec_info->num_of_decoding_fbs; //fb_offset
582 
583 	max_dec_index = (p_dec_info->num_of_decoding_fbs > p_dec_info->num_of_display_fbs) ?
584 		p_dec_info->num_of_decoding_fbs : p_dec_info->num_of_display_fbs;
585 
586 	if (info->index_frame_display >= 0 &&
587 	    info->index_frame_display < (int)max_dec_index)
588 		info->disp_frame = inst->frame_buf[val + info->index_frame_display];
589 
590 	info->rd_ptr = p_dec_info->stream_rd_ptr;
591 	info->wr_ptr = p_dec_info->stream_wr_ptr;
592 	info->frame_display_flag = p_dec_info->frame_display_flag;
593 
594 	info->sequence_no = p_dec_info->initial_info.sequence_no;
595 	if (decoded_index < WAVE5_MAX_FBS)
596 		p_dec_info->dec_out_info[decoded_index] = *info;
597 
598 	if (disp_idx < WAVE5_MAX_FBS)
599 		info->disp_frame.sequence_no = info->sequence_no;
600 
601 	if (info->sequence_changed) {
602 		memcpy((void *)&p_dec_info->initial_info, (void *)&p_dec_info->new_seq_info,
603 		       sizeof(struct dec_initial_info));
604 		p_dec_info->initial_info.sequence_no++;
605 	}
606 
607 err_out:
608 	mutex_unlock(&vpu_dev->hw_lock);
609 
610 	return ret;
611 }
612 
613 int wave5_vpu_dec_clr_disp_flag(struct vpu_instance *inst, int index)
614 {
615 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
616 	int ret;
617 	struct vpu_device *vpu_dev = inst->dev;
618 
619 	if (index >= p_dec_info->num_of_display_fbs)
620 		return -EINVAL;
621 
622 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
623 	if (ret)
624 		return ret;
625 	ret = wave5_dec_clr_disp_flag(inst, index);
626 	mutex_unlock(&vpu_dev->hw_lock);
627 
628 	return ret;
629 }
630 
631 int wave5_vpu_dec_set_disp_flag(struct vpu_instance *inst, int index)
632 {
633 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
634 	int ret = 0;
635 	struct vpu_device *vpu_dev = inst->dev;
636 
637 	if (index >= p_dec_info->num_of_display_fbs)
638 		return -EINVAL;
639 
640 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
641 	if (ret)
642 		return ret;
643 	ret = wave5_dec_set_disp_flag(inst, index);
644 	mutex_unlock(&vpu_dev->hw_lock);
645 
646 	return ret;
647 }
648 
649 int wave5_vpu_dec_reset_framebuffer(struct vpu_instance *inst, unsigned int index)
650 {
651 	if (index >= MAX_REG_FRAME)
652 		return -EINVAL;
653 
654 	if (inst->frame_vbuf[index].size == 0)
655 		return -EINVAL;
656 
657 	wave5_vdi_free_dma_memory(inst->dev, &inst->frame_vbuf[index]);
658 
659 	return 0;
660 }
661 
662 int wave5_vpu_dec_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
663 {
664 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
665 	int ret = 0;
666 
667 	switch (cmd) {
668 	case DEC_GET_QUEUE_STATUS: {
669 		struct queue_status_info *queue_info = parameter;
670 
671 		queue_info->instance_queue_count = p_dec_info->instance_queue_count;
672 		queue_info->report_queue_count = p_dec_info->report_queue_count;
673 		break;
674 	}
675 	case DEC_RESET_FRAMEBUF_INFO: {
676 		int i;
677 
678 		for (i = 0; i < MAX_REG_FRAME; i++) {
679 			ret = wave5_vpu_dec_reset_framebuffer(inst, i);
680 			if (ret)
681 				break;
682 		}
683 
684 		for (i = 0; i < MAX_REG_FRAME; i++) {
685 			ret = reset_auxiliary_buffers(inst, i);
686 			if (ret)
687 				break;
688 		}
689 
690 		wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
691 		break;
692 	}
693 	case DEC_GET_SEQ_INFO: {
694 		struct dec_initial_info *seq_info = parameter;
695 
696 		*seq_info = p_dec_info->initial_info;
697 		break;
698 	}
699 
700 	default:
701 		return -EINVAL;
702 	}
703 
704 	return ret;
705 }
706 
707 int wave5_vpu_enc_open(struct vpu_instance *inst, struct enc_open_param *open_param)
708 {
709 	struct enc_info *p_enc_info;
710 	int ret;
711 	struct vpu_device *vpu_dev = inst->dev;
712 
713 	ret = wave5_vpu_enc_check_open_param(inst, open_param);
714 	if (ret)
715 		return ret;
716 
717 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
718 	if (ret)
719 		return ret;
720 
721 	if (!wave5_vpu_is_init(vpu_dev)) {
722 		mutex_unlock(&vpu_dev->hw_lock);
723 		return -ENODEV;
724 	}
725 
726 	p_enc_info = &inst->codec_info->enc_info;
727 	p_enc_info->open_param = *open_param;
728 
729 	ret = wave5_vpu_build_up_enc_param(vpu_dev->dev, inst, open_param);
730 	mutex_unlock(&vpu_dev->hw_lock);
731 
732 	return ret;
733 }
734 
735 int wave5_vpu_enc_close(struct vpu_instance *inst, u32 *fail_res)
736 {
737 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
738 	int ret;
739 	int retry = 0;
740 	struct vpu_device *vpu_dev = inst->dev;
741 
742 	*fail_res = 0;
743 	if (!inst->codec_info)
744 		return -EINVAL;
745 
746 	pm_runtime_resume_and_get(inst->dev->dev);
747 
748 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
749 	if (ret) {
750 		pm_runtime_resume_and_get(inst->dev->dev);
751 		return ret;
752 	}
753 
754 	do {
755 		ret = wave5_vpu_enc_finish_seq(inst, fail_res);
756 		if (ret < 0 && *fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
757 			dev_warn(inst->dev->dev, "enc_finish_seq timed out\n");
758 			pm_runtime_resume_and_get(inst->dev->dev);
759 			mutex_unlock(&vpu_dev->hw_lock);
760 			return ret;
761 		}
762 
763 		if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING &&
764 		    retry++ >= MAX_FIRMWARE_CALL_RETRY) {
765 			pm_runtime_resume_and_get(inst->dev->dev);
766 			mutex_unlock(&vpu_dev->hw_lock);
767 			return -ETIMEDOUT;
768 		}
769 	} while (ret != 0);
770 
771 	dev_dbg(inst->dev->dev, "%s: enc_finish_seq complete\n", __func__);
772 
773 	wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_work);
774 
775 	if (inst->std == W_HEVC_ENC || inst->std == W_AVC_ENC) {
776 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_sub_sam_buf);
777 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_mv);
778 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_y_tbl);
779 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_c_tbl);
780 	}
781 
782 	wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_task);
783 	mutex_unlock(&vpu_dev->hw_lock);
784 	pm_runtime_put_sync(inst->dev->dev);
785 
786 	return 0;
787 }
788 
789 int wave5_vpu_enc_register_frame_buffer(struct vpu_instance *inst, unsigned int num,
790 					unsigned int stride, int height,
791 					enum tiled_map_type map_type)
792 {
793 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
794 	int ret;
795 	struct vpu_device *vpu_dev = inst->dev;
796 	unsigned int size_luma, size_chroma;
797 	int i;
798 
799 	if (p_enc_info->stride)
800 		return -EINVAL;
801 
802 	if (!p_enc_info->initial_info_obtained)
803 		return -EINVAL;
804 
805 	if (num < p_enc_info->initial_info.min_frame_buffer_count)
806 		return -EINVAL;
807 
808 	if (stride == 0 || stride % 8 != 0)
809 		return -EINVAL;
810 
811 	if (height <= 0)
812 		return -EINVAL;
813 
814 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
815 	if (ret)
816 		return ret;
817 
818 	p_enc_info->num_frame_buffers = num;
819 	p_enc_info->stride = stride;
820 
821 	size_luma = stride * height;
822 	size_chroma = ALIGN(stride / 2, 16) * height;
823 
824 	for (i = 0; i < num; i++) {
825 		if (!inst->frame_buf[i].update_fb_info)
826 			continue;
827 
828 		inst->frame_buf[i].update_fb_info = false;
829 		inst->frame_buf[i].stride = stride;
830 		inst->frame_buf[i].height = height;
831 		inst->frame_buf[i].map_type = COMPRESSED_FRAME_MAP;
832 		inst->frame_buf[i].buf_y_size = size_luma;
833 		inst->frame_buf[i].buf_cb = inst->frame_buf[i].buf_y + size_luma;
834 		inst->frame_buf[i].buf_cb_size = size_chroma;
835 		inst->frame_buf[i].buf_cr_size = 0;
836 	}
837 
838 	ret = wave5_vpu_enc_register_framebuffer(inst->dev->dev, inst, &inst->frame_buf[0],
839 						 COMPRESSED_FRAME_MAP,
840 						 p_enc_info->num_frame_buffers);
841 
842 	mutex_unlock(&vpu_dev->hw_lock);
843 
844 	return ret;
845 }
846 
847 static int wave5_check_enc_param(struct vpu_instance *inst, struct enc_param *param)
848 {
849 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
850 
851 	if (!param)
852 		return -EINVAL;
853 
854 	if (!param->source_frame)
855 		return -EINVAL;
856 
857 	if (p_enc_info->open_param.bit_rate == 0 && inst->std == W_HEVC_ENC) {
858 		if (param->pic_stream_buffer_addr % 16 || param->pic_stream_buffer_size == 0)
859 			return -EINVAL;
860 	}
861 	if (param->pic_stream_buffer_addr % 8 || param->pic_stream_buffer_size == 0)
862 		return -EINVAL;
863 
864 	return 0;
865 }
866 
867 int wave5_vpu_enc_start_one_frame(struct vpu_instance *inst, struct enc_param *param, u32 *fail_res)
868 {
869 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
870 	int ret;
871 	struct vpu_device *vpu_dev = inst->dev;
872 
873 	*fail_res = 0;
874 
875 	if (p_enc_info->stride == 0) /* this means frame buffers have not been registered. */
876 		return -EINVAL;
877 
878 	ret = wave5_check_enc_param(inst, param);
879 	if (ret)
880 		return ret;
881 
882 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
883 	if (ret)
884 		return ret;
885 
886 	p_enc_info->pts_map[param->src_idx] = param->pts;
887 
888 	ret = wave5_vpu_encode(inst, param, fail_res);
889 
890 	mutex_unlock(&vpu_dev->hw_lock);
891 
892 	return ret;
893 }
894 
895 int wave5_vpu_enc_get_output_info(struct vpu_instance *inst, struct enc_output_info *info)
896 {
897 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
898 	int ret;
899 	struct vpu_device *vpu_dev = inst->dev;
900 
901 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
902 	if (ret)
903 		return ret;
904 
905 	ret = wave5_vpu_enc_get_result(inst, info);
906 	if (ret) {
907 		info->pts = 0;
908 		goto unlock;
909 	}
910 
911 	if (info->recon_frame_index >= 0)
912 		info->pts = p_enc_info->pts_map[info->enc_src_idx];
913 
914 unlock:
915 	mutex_unlock(&vpu_dev->hw_lock);
916 
917 	return ret;
918 }
919 
920 int wave5_vpu_enc_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
921 {
922 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
923 
924 	switch (cmd) {
925 	case ENABLE_ROTATION:
926 		p_enc_info->rotation_enable = true;
927 		break;
928 	case ENABLE_MIRRORING:
929 		p_enc_info->mirror_enable = true;
930 		break;
931 	case SET_MIRROR_DIRECTION: {
932 		enum mirror_direction mir_dir;
933 
934 		mir_dir = *(enum mirror_direction *)parameter;
935 		if (mir_dir != MIRDIR_NONE && mir_dir != MIRDIR_HOR &&
936 		    mir_dir != MIRDIR_VER && mir_dir != MIRDIR_HOR_VER)
937 			return -EINVAL;
938 		p_enc_info->mirror_direction = mir_dir;
939 		break;
940 	}
941 	case SET_ROTATION_ANGLE: {
942 		int angle;
943 
944 		angle = *(int *)parameter;
945 		if (angle && angle != 90 && angle != 180 && angle != 270)
946 			return -EINVAL;
947 		if (p_enc_info->initial_info_obtained && (angle == 90 || angle == 270))
948 			return -EINVAL;
949 		p_enc_info->rotation_angle = angle;
950 		break;
951 	}
952 	case ENC_GET_QUEUE_STATUS: {
953 		struct queue_status_info *queue_info = parameter;
954 
955 		queue_info->instance_queue_count = p_enc_info->instance_queue_count;
956 		queue_info->report_queue_count = p_enc_info->report_queue_count;
957 		break;
958 	}
959 	default:
960 		return -EINVAL;
961 	}
962 	return 0;
963 }
964 
965 int wave5_vpu_enc_issue_seq_init(struct vpu_instance *inst)
966 {
967 	int ret;
968 	struct vpu_device *vpu_dev = inst->dev;
969 
970 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
971 	if (ret)
972 		return ret;
973 
974 	ret = wave5_vpu_enc_init_seq(inst);
975 
976 	mutex_unlock(&vpu_dev->hw_lock);
977 
978 	return ret;
979 }
980 
981 int wave5_vpu_enc_complete_seq_init(struct vpu_instance *inst, struct enc_initial_info *info)
982 {
983 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
984 	int ret;
985 	struct vpu_device *vpu_dev = inst->dev;
986 
987 	if (!info)
988 		return -EINVAL;
989 
990 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
991 	if (ret)
992 		return ret;
993 
994 	ret = wave5_vpu_enc_get_seq_info(inst, info);
995 	if (ret) {
996 		p_enc_info->initial_info_obtained = false;
997 		mutex_unlock(&vpu_dev->hw_lock);
998 		return ret;
999 	}
1000 
1001 	p_enc_info->initial_info_obtained = true;
1002 	p_enc_info->initial_info = *info;
1003 
1004 	mutex_unlock(&vpu_dev->hw_lock);
1005 
1006 	return 0;
1007 }
1008