xref: /linux/drivers/media/platform/renesas/vsp1/vsp1_vspx.c (revision ec2e0fb07d789976c601bec19ecced7a501c3705)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * vsp1_vspx.c  --  R-Car Gen 4 VSPX
4  *
5  * Copyright (C) 2025 Ideas On Board Oy
6  * Copyright (C) 2025 Renesas Electronics Corporation
7  */
8 
9 #include "vsp1_vspx.h"
10 
11 #include <linux/cleanup.h>
12 #include <linux/container_of.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/export.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 
21 #include <media/media-entity.h>
22 #include <media/v4l2-subdev.h>
23 #include <media/vsp1.h>
24 
25 #include "vsp1_dl.h"
26 #include "vsp1_iif.h"
27 #include "vsp1_pipe.h"
28 #include "vsp1_rwpf.h"
29 
30 /*
31  * struct vsp1_vspx_pipeline - VSPX pipeline
32  * @pipe: the VSP1 pipeline
33  * @partition: the pre-calculated partition used by the pipeline
34  * @mutex: protects the streaming start/stop sequences
35  * @lock: protect access to the enabled flag
36  * @enabled: the enable flag
37  * @vspx_frame_end: frame end callback
38  * @frame_end_data: data for the frame end callback
39  */
40 struct vsp1_vspx_pipeline {
41 	struct vsp1_pipeline pipe;
42 	struct vsp1_partition partition;
43 
44 	/*
45 	 * Protects the streaming start/stop sequences.
46 	 *
47 	 * The start/stop sequences cannot be locked with the 'lock' spinlock
48 	 * as they acquire mutexes when handling the pm runtime and the vsp1
49 	 * pipe start/stop operations. Provide a dedicated mutex for this
50 	 * reason.
51 	 */
52 	struct mutex mutex;
53 
54 	/*
55 	 * Protects the enable flag.
56 	 *
57 	 * The enabled flag is contended between the start/stop streaming
58 	 * routines and the job_run one, which cannot take a mutex as it is
59 	 * called from the ISP irq context.
60 	 */
61 	spinlock_t lock;
62 	bool enabled;
63 
64 	void (*vspx_frame_end)(void *frame_end_data);
65 	void *frame_end_data;
66 };
67 
68 static inline struct vsp1_vspx_pipeline *
69 to_vsp1_vspx_pipeline(struct vsp1_pipeline *pipe)
70 {
71 	return container_of(pipe, struct vsp1_vspx_pipeline, pipe);
72 }
73 
74 /*
75  * struct vsp1_vspx - VSPX device
76  * @vsp1: the VSP1 device
77  * @pipe: the VSPX pipeline
78  */
79 struct vsp1_vspx {
80 	struct vsp1_device *vsp1;
81 	struct vsp1_vspx_pipeline pipe;
82 };
83 
84 /* Apply the given width, height and fourcc to the RWPF's subdevice */
85 static int vsp1_vspx_rwpf_set_subdev_fmt(struct vsp1_device *vsp1,
86 					 struct vsp1_rwpf *rwpf,
87 					 u32 isp_fourcc,
88 					 unsigned int width,
89 					 unsigned int height)
90 {
91 	struct vsp1_entity *ent = &rwpf->entity;
92 	struct v4l2_subdev_format format = {};
93 	u32 vspx_fourcc;
94 
95 	switch (isp_fourcc) {
96 	case V4L2_PIX_FMT_GREY:
97 		/* 8 bit RAW Bayer image. */
98 		vspx_fourcc = V4L2_PIX_FMT_RGB332;
99 		break;
100 	case V4L2_PIX_FMT_Y10:
101 	case V4L2_PIX_FMT_Y12:
102 	case V4L2_PIX_FMT_Y16:
103 		/* 10, 12 and 16 bit RAW Bayer image. */
104 		vspx_fourcc = V4L2_PIX_FMT_RGB565;
105 		break;
106 	case V4L2_META_FMT_GENERIC_8:
107 		/* ConfigDMA parameters buffer. */
108 		vspx_fourcc = V4L2_PIX_FMT_XBGR32;
109 		break;
110 	default:
111 		return -EINVAL;
112 	}
113 
114 	rwpf->fmtinfo = vsp1_get_format_info(vsp1, vspx_fourcc);
115 
116 	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
117 	format.pad = RWPF_PAD_SINK;
118 	format.format.width = width;
119 	format.format.height = height;
120 	format.format.field = V4L2_FIELD_NONE;
121 	format.format.code = rwpf->fmtinfo->mbus;
122 
123 	return v4l2_subdev_call(&ent->subdev, pad, set_fmt, NULL, &format);
124 }
125 
126 /* Configure the RPF->IIF->WPF pipeline for ConfigDMA or RAW image transfer. */
127 static int vsp1_vspx_pipeline_configure(struct vsp1_device *vsp1,
128 					dma_addr_t addr, u32 isp_fourcc,
129 					unsigned int width, unsigned int height,
130 					unsigned int stride,
131 					unsigned int iif_sink_pad,
132 					struct vsp1_dl_list *dl,
133 					struct vsp1_dl_body *dlb)
134 {
135 	struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
136 	struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
137 	struct vsp1_rwpf *rpf0 = pipe->inputs[0];
138 	int ret;
139 
140 	ret = vsp1_vspx_rwpf_set_subdev_fmt(vsp1, rpf0, isp_fourcc, width,
141 					    height);
142 	if (ret)
143 		return ret;
144 
145 	ret = vsp1_vspx_rwpf_set_subdev_fmt(vsp1, pipe->output, isp_fourcc,
146 					    width, height);
147 	if (ret)
148 		return ret;
149 
150 	vsp1_pipeline_calculate_partition(pipe, &pipe->part_table[0], width, 0);
151 	rpf0->format.plane_fmt[0].bytesperline = stride;
152 	rpf0->format.num_planes = 1;
153 	rpf0->mem.addr[0] = addr;
154 
155 	/*
156 	 * Connect RPF0 to the IIF sink pad corresponding to the config or image
157 	 * path.
158 	 */
159 	rpf0->entity.sink_pad = iif_sink_pad;
160 
161 	vsp1_entity_route_setup(&rpf0->entity, pipe, dlb);
162 	vsp1_entity_configure_stream(&rpf0->entity, rpf0->entity.state, pipe,
163 				     dl, dlb);
164 	vsp1_entity_configure_partition(&rpf0->entity, pipe,
165 					&pipe->part_table[0], dl, dlb);
166 
167 	return 0;
168 }
169 
170 /* -----------------------------------------------------------------------------
171  * Interrupt handling
172  */
173 
174 static void vsp1_vspx_pipeline_frame_end(struct vsp1_pipeline *pipe,
175 					 unsigned int completion)
176 {
177 	struct vsp1_vspx_pipeline *vspx_pipe = to_vsp1_vspx_pipeline(pipe);
178 
179 	scoped_guard(spinlock_irqsave, &pipe->irqlock) {
180 		/*
181 		 * Operating the vsp1_pipe in singleshot mode requires to
182 		 * manually set the pipeline state to stopped when a transfer
183 		 * is completed.
184 		 */
185 		pipe->state = VSP1_PIPELINE_STOPPED;
186 	}
187 
188 	if (vspx_pipe->vspx_frame_end)
189 		vspx_pipe->vspx_frame_end(vspx_pipe->frame_end_data);
190 }
191 
192 /* -----------------------------------------------------------------------------
193  * ISP Driver API (include/media/vsp1.h)
194  */
195 
196 /**
197  * vsp1_isp_init() - Initialize the VSPX
198  * @dev: The VSP1 struct device
199  *
200  * Return: %0 on success or a negative error code on failure
201  */
202 int vsp1_isp_init(struct device *dev)
203 {
204 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
205 
206 	if (!vsp1)
207 		return -EPROBE_DEFER;
208 
209 	return 0;
210 }
211 EXPORT_SYMBOL_GPL(vsp1_isp_init);
212 
213 /**
214  * vsp1_isp_get_bus_master - Get VSPX bus master
215  * @dev: The VSP1 struct device
216  *
217  * The VSPX accesses memory through an FCPX instance. When allocating memory
218  * buffers that will have to be accessed by the VSPX the 'struct device' of
219  * the FCPX should be used. Use this function to get a reference to it.
220  *
221  * Return: a pointer to the bus master's device
222  */
223 struct device *vsp1_isp_get_bus_master(struct device *dev)
224 {
225 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
226 
227 	if (!vsp1)
228 		return ERR_PTR(-ENODEV);
229 
230 	return vsp1->bus_master;
231 }
232 EXPORT_SYMBOL_GPL(vsp1_isp_get_bus_master);
233 
234 /**
235  * vsp1_isp_alloc_buffer - Allocate a buffer in the VSPX address space
236  * @dev: The VSP1 struct device
237  * @size: The size of the buffer to be allocated by the VSPX
238  * @buffer_desc: The buffer descriptor. Will be filled with the buffer
239  *		 CPU-mapped address, the bus address and the size of the
240  *		 allocated buffer
241  *
242  * Allocate a buffer that will be later accessed by the VSPX. Buffers allocated
243  * using vsp1_isp_alloc_buffer() shall be released with a call to
244  * vsp1_isp_free_buffer(). This function is used by the ISP driver to allocate
245  * memory for the ConfigDMA parameters buffer.
246  *
247  * Return: %0 on success or a negative error code on failure
248  */
249 int vsp1_isp_alloc_buffer(struct device *dev, size_t size,
250 			  struct vsp1_isp_buffer_desc *buffer_desc)
251 {
252 	struct device *bus_master = vsp1_isp_get_bus_master(dev);
253 
254 	if (IS_ERR_OR_NULL(bus_master))
255 		return -ENODEV;
256 
257 	buffer_desc->cpu_addr = dma_alloc_coherent(bus_master, size,
258 						   &buffer_desc->dma_addr,
259 						   GFP_KERNEL);
260 	if (!buffer_desc->cpu_addr)
261 		return -ENOMEM;
262 
263 	buffer_desc->size = size;
264 
265 	return 0;
266 }
267 EXPORT_SYMBOL_GPL(vsp1_isp_alloc_buffer);
268 
269 /**
270  * vsp1_isp_free_buffer - Release a buffer allocated by vsp1_isp_alloc_buffer()
271  * @dev: The VSP1 struct device
272  * @buffer_desc: The descriptor of the buffer to release as returned by
273  *		 vsp1_isp_alloc_buffer()
274  *
275  * Release memory in the VSPX address space allocated by
276  * vsp1_isp_alloc_buffer().
277  */
278 void vsp1_isp_free_buffer(struct device *dev,
279 			  struct vsp1_isp_buffer_desc *buffer_desc)
280 {
281 	struct device *bus_master = vsp1_isp_get_bus_master(dev);
282 
283 	if (IS_ERR_OR_NULL(bus_master))
284 		return;
285 
286 	dma_free_coherent(bus_master, buffer_desc->size, buffer_desc->cpu_addr,
287 			  buffer_desc->dma_addr);
288 }
289 EXPORT_SYMBOL_GPL(vsp1_isp_free_buffer);
290 
291 /**
292  * vsp1_isp_start_streaming - Start processing VSPX jobs
293  * @dev: The VSP1 struct device
294  * @frame_end: The frame end callback description
295  *
296  * Start the VSPX and prepare for accepting buffer transfer job requests.
297  * The caller is responsible for tracking the started state of the VSPX.
298  * Attempting to start an already started VSPX instance is an error.
299  *
300  * Return: %0 on success or a negative error code on failure
301  */
302 int vsp1_isp_start_streaming(struct device *dev,
303 			     struct vsp1_vspx_frame_end *frame_end)
304 {
305 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
306 	struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
307 	struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
308 	u32 value;
309 	int ret;
310 
311 	if (!frame_end)
312 		return -EINVAL;
313 
314 	guard(mutex)(&vspx_pipe->mutex);
315 
316 	scoped_guard(spinlock_irq, &vspx_pipe->lock) {
317 		if (vspx_pipe->enabled)
318 			return -EBUSY;
319 	}
320 
321 	vspx_pipe->vspx_frame_end = frame_end->vspx_frame_end;
322 	vspx_pipe->frame_end_data = frame_end->frame_end_data;
323 
324 	/* Enable the VSP1 and prepare for streaming. */
325 	vsp1_pipeline_dump(pipe, "VSPX job");
326 
327 	ret = vsp1_device_get(vsp1);
328 	if (ret < 0)
329 		return ret;
330 
331 	/*
332 	 * Make sure VSPX is not active. This should never happen in normal
333 	 * usage
334 	 */
335 	value = vsp1_read(vsp1, VI6_CMD(0));
336 	if (value & VI6_CMD_STRCMD) {
337 		dev_err(vsp1->dev,
338 			"%s: Starting of WPF0 already reserved\n", __func__);
339 		ret = -EBUSY;
340 		goto error_put;
341 	}
342 
343 	value = vsp1_read(vsp1, VI6_STATUS);
344 	if (value & VI6_STATUS_SYS_ACT(0)) {
345 		dev_err(vsp1->dev,
346 			"%s: WPF0 has not entered idle state\n", __func__);
347 		ret = -EBUSY;
348 		goto error_put;
349 	}
350 
351 	scoped_guard(spinlock_irq, &vspx_pipe->lock) {
352 		vspx_pipe->enabled = true;
353 	}
354 
355 	return 0;
356 
357 error_put:
358 	vsp1_device_put(vsp1);
359 	return ret;
360 }
361 EXPORT_SYMBOL_GPL(vsp1_isp_start_streaming);
362 
363 /**
364  * vsp1_isp_stop_streaming - Stop the VSPX
365  * @dev: The VSP1 struct device
366  *
367  * Stop the VSPX operation by stopping the vsp1 pipeline and waiting for the
368  * last frame in transfer, if any, to complete.
369  *
370  * The caller is responsible for tracking the stopped state of the VSPX.
371  * Attempting to stop an already stopped VSPX instance is a nop.
372  */
373 void vsp1_isp_stop_streaming(struct device *dev)
374 {
375 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
376 	struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
377 	struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
378 
379 	guard(mutex)(&vspx_pipe->mutex);
380 
381 	scoped_guard(spinlock_irq, &vspx_pipe->lock) {
382 		if (!vspx_pipe->enabled)
383 			return;
384 
385 		vspx_pipe->enabled = false;
386 	}
387 
388 	WARN_ON_ONCE(vsp1_pipeline_stop(pipe));
389 
390 	vspx_pipe->vspx_frame_end = NULL;
391 	vsp1_dlm_reset(pipe->output->dlm);
392 	vsp1_device_put(vsp1);
393 }
394 EXPORT_SYMBOL_GPL(vsp1_isp_stop_streaming);
395 
396 /**
397  * vsp1_isp_job_prepare - Prepare a new buffer transfer job
398  * @dev: The VSP1 struct device
399  * @job: The job description
400  *
401  * Prepare a new buffer transfer job by populating a display list that will be
402  * later executed by a call to vsp1_isp_job_run(). All pending jobs must be
403  * released after stopping the streaming operations with a call to
404  * vsp1_isp_job_release().
405  *
406  * In order for the VSPX to accept new jobs to prepare the VSPX must have been
407  * started.
408  *
409  * Return: %0 on success or a negative error code on failure
410  */
411 int vsp1_isp_job_prepare(struct device *dev, struct vsp1_isp_job_desc *job)
412 {
413 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
414 	struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
415 	struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
416 	const struct v4l2_pix_format_mplane *pix_mp;
417 	struct vsp1_dl_list *second_dl = NULL;
418 	struct vsp1_dl_body *dlb;
419 	struct vsp1_dl_list *dl;
420 	int ret;
421 
422 	/*
423 	 * Transfer the buffers described in the job: an optional ConfigDMA
424 	 * parameters buffer and a RAW image.
425 	 */
426 
427 	job->dl = vsp1_dl_list_get(pipe->output->dlm);
428 	if (!job->dl)
429 		return -ENOMEM;
430 
431 	dl = job->dl;
432 	dlb = vsp1_dl_list_get_body0(dl);
433 
434 	/* Configure IIF routing and enable IIF function. */
435 	vsp1_entity_route_setup(pipe->iif, pipe, dlb);
436 	vsp1_entity_configure_stream(pipe->iif, pipe->iif->state, pipe,
437 				     dl, dlb);
438 
439 	/* Configure WPF0 to enable RPF0 as source. */
440 	vsp1_entity_route_setup(&pipe->output->entity, pipe, dlb);
441 	vsp1_entity_configure_stream(&pipe->output->entity,
442 				     pipe->output->entity.state, pipe,
443 				     dl, dlb);
444 
445 	if (job->config.pairs) {
446 		/*
447 		 * Writing less than 17 pairs corrupts the output images ( < 16
448 		 * pairs) or freezes the VSPX operations (= 16 pairs). Only
449 		 * allow more than 16 pairs to be written.
450 		 */
451 		if (job->config.pairs <= 16) {
452 			ret = -EINVAL;
453 			goto error_put_dl;
454 		}
455 
456 		/*
457 		 * Configure RPF0 for ConfigDMA data. Transfer the number of
458 		 * configuration pairs plus 2 words for the header.
459 		 */
460 		ret = vsp1_vspx_pipeline_configure(vsp1, job->config.mem,
461 						   V4L2_META_FMT_GENERIC_8,
462 						   job->config.pairs * 2 + 2, 1,
463 						   job->config.pairs * 2 + 2,
464 						   VSPX_IIF_SINK_PAD_CONFIG,
465 						   dl, dlb);
466 		if (ret)
467 			goto error_put_dl;
468 
469 		second_dl = vsp1_dl_list_get(pipe->output->dlm);
470 		if (!second_dl) {
471 			ret = -ENOMEM;
472 			goto error_put_dl;
473 		}
474 
475 		dl = second_dl;
476 		dlb = vsp1_dl_list_get_body0(dl);
477 	}
478 
479 	/* Configure RPF0 for RAW image transfer. */
480 	pix_mp = &job->img.fmt;
481 	ret = vsp1_vspx_pipeline_configure(vsp1, job->img.mem,
482 					   pix_mp->pixelformat,
483 					   pix_mp->width, pix_mp->height,
484 					   pix_mp->plane_fmt[0].bytesperline,
485 					   VSPX_IIF_SINK_PAD_IMG, dl, dlb);
486 	if (ret)
487 		goto error_put_dl;
488 
489 	if (second_dl)
490 		vsp1_dl_list_add_chain(job->dl, second_dl);
491 
492 	return 0;
493 
494 error_put_dl:
495 	if (second_dl)
496 		vsp1_dl_list_put(second_dl);
497 	vsp1_dl_list_put(job->dl);
498 	job->dl = NULL;
499 	return ret;
500 }
501 EXPORT_SYMBOL_GPL(vsp1_isp_job_prepare);
502 
503 /**
504  * vsp1_isp_job_run - Run a buffer transfer job
505  * @dev: The VSP1 struct device
506  * @job: The job to be run
507  *
508  * Run the display list contained in the job description provided by the caller.
509  * The job must have been prepared with a call to vsp1_isp_job_prepare() and
510  * the job's display list shall be valid.
511  *
512  * Jobs can be run only on VSPX instances which have been started. Requests
513  * to run a job after the VSPX has been stopped return -EINVAL and the job
514  * resources shall be released by the caller with vsp1_isp_job_release().
515  * When a job is run successfully all the resources acquired by
516  * vsp1_isp_job_prepare() are released by this function and no further action
517  * is required to the caller.
518  *
519  * Return: %0 on success or a negative error code on failure
520  */
521 int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job)
522 {
523 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
524 	struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
525 	struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
526 	u32 value;
527 
528 	/* Make sure VSPX is not busy processing a frame. */
529 	value = vsp1_read(vsp1, VI6_CMD(0));
530 	if (value) {
531 		dev_err(vsp1->dev,
532 			"%s: Starting of WPF0 already reserved\n", __func__);
533 		return -EBUSY;
534 	}
535 
536 	scoped_guard(spinlock_irqsave, &vspx_pipe->lock) {
537 		/*
538 		 * If a new job is scheduled when the VSPX is stopped, do not
539 		 * run it.
540 		 */
541 		if (!vspx_pipe->enabled)
542 			return -EINVAL;
543 
544 		vsp1_dl_list_commit(job->dl, 0);
545 
546 		/*
547 		 * The display list is now under control of the display list
548 		 * manager and will be released automatically when the job
549 		 * completes.
550 		 */
551 		job->dl = NULL;
552 	}
553 
554 	scoped_guard(spinlock_irqsave, &pipe->irqlock) {
555 		vsp1_pipeline_run(pipe);
556 	}
557 
558 	return 0;
559 }
560 EXPORT_SYMBOL_GPL(vsp1_isp_job_run);
561 
562 /**
563  * vsp1_isp_job_release - Release a non processed transfer job
564  * @dev: The VSP1 struct device
565  * @job: The job to release
566  *
567  * Release a job prepared by a call to vsp1_isp_job_prepare() and not yet
568  * run. All pending jobs shall be released after streaming has been stopped.
569  */
570 void vsp1_isp_job_release(struct device *dev,
571 			  struct vsp1_isp_job_desc *job)
572 {
573 	vsp1_dl_list_put(job->dl);
574 }
575 EXPORT_SYMBOL_GPL(vsp1_isp_job_release);
576 
577 /* -----------------------------------------------------------------------------
578  * Initialization and cleanup
579  */
580 
581 int vsp1_vspx_init(struct vsp1_device *vsp1)
582 {
583 	struct vsp1_vspx_pipeline *vspx_pipe;
584 	struct vsp1_pipeline *pipe;
585 
586 	vsp1->vspx = devm_kzalloc(vsp1->dev, sizeof(*vsp1->vspx), GFP_KERNEL);
587 	if (!vsp1->vspx)
588 		return -ENOMEM;
589 
590 	vsp1->vspx->vsp1 = vsp1;
591 
592 	vspx_pipe = &vsp1->vspx->pipe;
593 	vspx_pipe->enabled = false;
594 
595 	pipe = &vspx_pipe->pipe;
596 
597 	vsp1_pipeline_init(pipe);
598 
599 	pipe->partitions = 1;
600 	pipe->part_table = &vspx_pipe->partition;
601 	pipe->interlaced = false;
602 	pipe->frame_end = vsp1_vspx_pipeline_frame_end;
603 
604 	mutex_init(&vspx_pipe->mutex);
605 	spin_lock_init(&vspx_pipe->lock);
606 
607 	/*
608 	 * Initialize RPF0 as input for VSPX and use it unconditionally for
609 	 * now.
610 	 */
611 	pipe->inputs[0] = vsp1->rpf[0];
612 	pipe->inputs[0]->entity.pipe = pipe;
613 	pipe->inputs[0]->entity.sink = &vsp1->iif->entity;
614 	list_add_tail(&pipe->inputs[0]->entity.list_pipe, &pipe->entities);
615 
616 	pipe->iif = &vsp1->iif->entity;
617 	pipe->iif->pipe = pipe;
618 	pipe->iif->sink = &vsp1->wpf[0]->entity;
619 	pipe->iif->sink_pad = RWPF_PAD_SINK;
620 	list_add_tail(&pipe->iif->list_pipe, &pipe->entities);
621 
622 	pipe->output = vsp1->wpf[0];
623 	pipe->output->entity.pipe = pipe;
624 	list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities);
625 
626 	return 0;
627 }
628 
629 void vsp1_vspx_cleanup(struct vsp1_device *vsp1)
630 {
631 	struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
632 
633 	mutex_destroy(&vspx_pipe->mutex);
634 }
635