xref: /linux/drivers/gpu/ipu-v3/ipu-image-convert.c (revision 96e9d754b35e87a5be2de7dce3c810ffdd769c84)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2012-2016 Mentor Graphics Inc.
4  *
5  * Queued image conversion support, with tiling and rotation.
6  */
7 
8 #include <linux/interrupt.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/math.h>
11 
12 #include <video/imx-ipu-image-convert.h>
13 
14 #include "ipu-prv.h"
15 
16 /*
17  * The IC Resizer has a restriction that the output frame from the
18  * resizer must be 1024 or less in both width (pixels) and height
19  * (lines).
20  *
21  * The image converter attempts to split up a conversion when
22  * the desired output (converted) frame resolution exceeds the
23  * IC resizer limit of 1024 in either dimension.
24  *
25  * If either dimension of the output frame exceeds the limit, the
26  * dimension is split into 1, 2, or 4 equal stripes, for a maximum
27  * of 4*4 or 16 tiles. A conversion is then carried out for each
28  * tile (but taking care to pass the full frame stride length to
29  * the DMA channel's parameter memory!). IDMA double-buffering is used
30  * to convert each tile back-to-back when possible (see note below
31  * when double_buffering boolean is set).
32  *
33  * Note that the input frame must be split up into the same number
34  * of tiles as the output frame:
35  *
36  *                       +---------+-----+
37  *   +-----+---+         |  A      | B   |
38  *   | A   | B |         |         |     |
39  *   +-----+---+   -->   +---------+-----+
40  *   | C   | D |         |  C      | D   |
41  *   +-----+---+         |         |     |
42  *                       +---------+-----+
43  *
44  * Clockwise 90° rotations are handled by first rescaling into a
45  * reusable temporary tile buffer and then rotating with the 8x8
46  * block rotator, writing to the correct destination:
47  *
48  *                                         +-----+-----+
49  *                                         |     |     |
50  *   +-----+---+         +---------+       | C   | A   |
51  *   | A   | B |         | A,B, |  |       |     |     |
52  *   +-----+---+   -->   | C,D  |  |  -->  |     |     |
53  *   | C   | D |         +---------+       +-----+-----+
54  *   +-----+---+                           | D   | B   |
55  *                                         |     |     |
56  *                                         +-----+-----+
57  *
58  * If the 8x8 block rotator is used, horizontal or vertical flipping
59  * is done during the rotation step, otherwise flipping is done
60  * during the scaling step.
61  * With rotation or flipping, tile order changes between input and
62  * output image. Tiles are numbered row major from top left to bottom
63  * right for both input and output image.
64  */
65 
66 #define MAX_STRIPES_W    4
67 #define MAX_STRIPES_H    4
68 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
69 
70 #define MIN_W     16
71 #define MIN_H     8
72 #define MAX_W     4096
73 #define MAX_H     4096
74 
75 enum ipu_image_convert_type {
76 	IMAGE_CONVERT_IN = 0,
77 	IMAGE_CONVERT_OUT,
78 };
79 
80 struct ipu_image_convert_dma_buf {
81 	void          *virt;
82 	dma_addr_t    phys;
83 	unsigned long len;
84 };
85 
86 struct ipu_image_convert_dma_chan {
87 	int in;
88 	int out;
89 	int rot_in;
90 	int rot_out;
91 	int vdi_in_p;
92 	int vdi_in;
93 	int vdi_in_n;
94 };
95 
96 /* dimensions of one tile */
97 struct ipu_image_tile {
98 	u32 width;
99 	u32 height;
100 	u32 left;
101 	u32 top;
102 	/* size and strides are in bytes */
103 	u32 size;
104 	u32 stride;
105 	u32 rot_stride;
106 	/* start Y or packed offset of this tile */
107 	u32 offset;
108 	/* offset from start to tile in U plane, for planar formats */
109 	u32 u_off;
110 	/* offset from start to tile in V plane, for planar formats */
111 	u32 v_off;
112 };
113 
114 struct ipu_image_convert_image {
115 	struct ipu_image base;
116 	enum ipu_image_convert_type type;
117 
118 	const struct ipu_image_pixfmt *fmt;
119 	unsigned int stride;
120 
121 	/* # of rows (horizontal stripes) if dest height is > 1024 */
122 	unsigned int num_rows;
123 	/* # of columns (vertical stripes) if dest width is > 1024 */
124 	unsigned int num_cols;
125 
126 	struct ipu_image_tile tile[MAX_TILES];
127 };
128 
129 struct ipu_image_pixfmt {
130 	u32	fourcc;        /* V4L2 fourcc */
131 	int     bpp;           /* total bpp */
132 	int     uv_width_dec;  /* decimation in width for U/V planes */
133 	int     uv_height_dec; /* decimation in height for U/V planes */
134 	bool    planar;        /* planar format */
135 	bool    uv_swapped;    /* U and V planes are swapped */
136 	bool    uv_packed;     /* partial planar (U and V in same plane) */
137 };
138 
139 struct ipu_image_convert_ctx;
140 struct ipu_image_convert_chan;
141 struct ipu_image_convert_priv;
142 
143 enum eof_irq_mask {
144 	EOF_IRQ_IN      = BIT(0),
145 	EOF_IRQ_ROT_IN  = BIT(1),
146 	EOF_IRQ_OUT     = BIT(2),
147 	EOF_IRQ_ROT_OUT = BIT(3),
148 };
149 
150 #define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
151 #define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT |	\
152 			      EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
153 
154 struct ipu_image_convert_ctx {
155 	struct ipu_image_convert_chan *chan;
156 
157 	ipu_image_convert_cb_t complete;
158 	void *complete_context;
159 
160 	/* Source/destination image data and rotation mode */
161 	struct ipu_image_convert_image in;
162 	struct ipu_image_convert_image out;
163 	struct ipu_ic_csc csc;
164 	enum ipu_rotate_mode rot_mode;
165 	u32 downsize_coeff_h;
166 	u32 downsize_coeff_v;
167 	u32 image_resize_coeff_h;
168 	u32 image_resize_coeff_v;
169 	u32 resize_coeffs_h[MAX_STRIPES_W];
170 	u32 resize_coeffs_v[MAX_STRIPES_H];
171 
172 	/* intermediate buffer for rotation */
173 	struct ipu_image_convert_dma_buf rot_intermediate[2];
174 
175 	/* current buffer number for double buffering */
176 	int cur_buf_num;
177 
178 	bool aborting;
179 	struct completion aborted;
180 
181 	/* can we use double-buffering for this conversion operation? */
182 	bool double_buffering;
183 	/* num_rows * num_cols */
184 	unsigned int num_tiles;
185 	/* next tile to process */
186 	unsigned int next_tile;
187 	/* where to place converted tile in dest image */
188 	unsigned int out_tile_map[MAX_TILES];
189 
190 	/* mask of completed EOF irqs at every tile conversion */
191 	enum eof_irq_mask eof_mask;
192 
193 	struct list_head list;
194 };
195 
196 struct ipu_image_convert_chan {
197 	struct ipu_image_convert_priv *priv;
198 
199 	enum ipu_ic_task ic_task;
200 	const struct ipu_image_convert_dma_chan *dma_ch;
201 
202 	struct ipu_ic *ic;
203 	struct ipuv3_channel *in_chan;
204 	struct ipuv3_channel *out_chan;
205 	struct ipuv3_channel *rotation_in_chan;
206 	struct ipuv3_channel *rotation_out_chan;
207 
208 	/* the IPU end-of-frame irqs */
209 	int in_eof_irq;
210 	int rot_in_eof_irq;
211 	int out_eof_irq;
212 	int rot_out_eof_irq;
213 
214 	spinlock_t irqlock;
215 
216 	/* list of convert contexts */
217 	struct list_head ctx_list;
218 	/* queue of conversion runs */
219 	struct list_head pending_q;
220 	/* queue of completed runs */
221 	struct list_head done_q;
222 
223 	/* the current conversion run */
224 	struct ipu_image_convert_run *current_run;
225 };
226 
227 struct ipu_image_convert_priv {
228 	struct ipu_image_convert_chan chan[IC_NUM_TASKS];
229 	struct ipu_soc *ipu;
230 };
231 
232 static const struct ipu_image_convert_dma_chan
233 image_convert_dma_chan[IC_NUM_TASKS] = {
234 	[IC_TASK_VIEWFINDER] = {
235 		.in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
236 		.out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
237 		.rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
238 		.rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
239 		.vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
240 		.vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
241 		.vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
242 	},
243 	[IC_TASK_POST_PROCESSOR] = {
244 		.in = IPUV3_CHANNEL_MEM_IC_PP,
245 		.out = IPUV3_CHANNEL_IC_PP_MEM,
246 		.rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
247 		.rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
248 	},
249 };
250 
251 static const struct ipu_image_pixfmt image_convert_formats[] = {
252 	{
253 		.fourcc	= V4L2_PIX_FMT_RGB565,
254 		.bpp    = 16,
255 	}, {
256 		.fourcc	= V4L2_PIX_FMT_RGB24,
257 		.bpp    = 24,
258 	}, {
259 		.fourcc	= V4L2_PIX_FMT_BGR24,
260 		.bpp    = 24,
261 	}, {
262 		.fourcc	= V4L2_PIX_FMT_RGB32,
263 		.bpp    = 32,
264 	}, {
265 		.fourcc	= V4L2_PIX_FMT_BGR32,
266 		.bpp    = 32,
267 	}, {
268 		.fourcc	= V4L2_PIX_FMT_XRGB32,
269 		.bpp    = 32,
270 	}, {
271 		.fourcc	= V4L2_PIX_FMT_XBGR32,
272 		.bpp    = 32,
273 	}, {
274 		.fourcc	= V4L2_PIX_FMT_BGRX32,
275 		.bpp    = 32,
276 	}, {
277 		.fourcc	= V4L2_PIX_FMT_RGBX32,
278 		.bpp    = 32,
279 	}, {
280 		.fourcc	= V4L2_PIX_FMT_YUYV,
281 		.bpp    = 16,
282 		.uv_width_dec = 2,
283 		.uv_height_dec = 1,
284 	}, {
285 		.fourcc	= V4L2_PIX_FMT_UYVY,
286 		.bpp    = 16,
287 		.uv_width_dec = 2,
288 		.uv_height_dec = 1,
289 	}, {
290 		.fourcc	= V4L2_PIX_FMT_YUV420,
291 		.bpp    = 12,
292 		.planar = true,
293 		.uv_width_dec = 2,
294 		.uv_height_dec = 2,
295 	}, {
296 		.fourcc	= V4L2_PIX_FMT_YVU420,
297 		.bpp    = 12,
298 		.planar = true,
299 		.uv_width_dec = 2,
300 		.uv_height_dec = 2,
301 		.uv_swapped = true,
302 	}, {
303 		.fourcc = V4L2_PIX_FMT_NV12,
304 		.bpp    = 12,
305 		.planar = true,
306 		.uv_width_dec = 2,
307 		.uv_height_dec = 2,
308 		.uv_packed = true,
309 	}, {
310 		.fourcc = V4L2_PIX_FMT_YUV422P,
311 		.bpp    = 16,
312 		.planar = true,
313 		.uv_width_dec = 2,
314 		.uv_height_dec = 1,
315 	}, {
316 		.fourcc = V4L2_PIX_FMT_NV16,
317 		.bpp    = 16,
318 		.planar = true,
319 		.uv_width_dec = 2,
320 		.uv_height_dec = 1,
321 		.uv_packed = true,
322 	},
323 };
324 
325 static const struct ipu_image_pixfmt *get_format(u32 fourcc)
326 {
327 	const struct ipu_image_pixfmt *ret = NULL;
328 	unsigned int i;
329 
330 	for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
331 		if (image_convert_formats[i].fourcc == fourcc) {
332 			ret = &image_convert_formats[i];
333 			break;
334 		}
335 	}
336 
337 	return ret;
338 }
339 
340 static void dump_format(struct ipu_image_convert_ctx *ctx,
341 			struct ipu_image_convert_image *ic_image)
342 {
343 	struct ipu_image_convert_chan *chan = ctx->chan;
344 	struct ipu_image_convert_priv *priv = chan->priv;
345 
346 	dev_dbg(priv->ipu->dev,
347 		"task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
348 		chan->ic_task, ctx,
349 		ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
350 		ic_image->base.pix.width, ic_image->base.pix.height,
351 		ic_image->num_cols, ic_image->num_rows,
352 		ic_image->fmt->fourcc & 0xff,
353 		(ic_image->fmt->fourcc >> 8) & 0xff,
354 		(ic_image->fmt->fourcc >> 16) & 0xff,
355 		(ic_image->fmt->fourcc >> 24) & 0xff);
356 }
357 
358 static void free_dma_buf(struct ipu_image_convert_priv *priv,
359 			 struct ipu_image_convert_dma_buf *buf)
360 {
361 	if (buf->virt)
362 		dma_free_coherent(priv->ipu->dev,
363 				  buf->len, buf->virt, buf->phys);
364 	buf->virt = NULL;
365 	buf->phys = 0;
366 }
367 
368 static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
369 			 struct ipu_image_convert_dma_buf *buf,
370 			 int size)
371 {
372 	buf->len = PAGE_ALIGN(size);
373 	buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
374 				       GFP_DMA | GFP_KERNEL);
375 	if (!buf->virt) {
376 		dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
377 		return -ENOMEM;
378 	}
379 
380 	return 0;
381 }
382 
383 static inline int num_stripes(int dim)
384 {
385 	return (dim - 1) / 1024 + 1;
386 }
387 
388 /*
389  * Calculate downsizing coefficients, which are the same for all tiles,
390  * and initial bilinear resizing coefficients, which are used to find the
391  * best seam positions.
392  * Also determine the number of tiles necessary to guarantee that no tile
393  * is larger than 1024 pixels in either dimension at the output and between
394  * IC downsizing and main processing sections.
395  */
396 static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
397 					  struct ipu_image *in,
398 					  struct ipu_image *out)
399 {
400 	u32 downsized_width = in->rect.width;
401 	u32 downsized_height = in->rect.height;
402 	u32 downsize_coeff_v = 0;
403 	u32 downsize_coeff_h = 0;
404 	u32 resized_width = out->rect.width;
405 	u32 resized_height = out->rect.height;
406 	u32 resize_coeff_h;
407 	u32 resize_coeff_v;
408 	u32 cols;
409 	u32 rows;
410 
411 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
412 		resized_width = out->rect.height;
413 		resized_height = out->rect.width;
414 	}
415 
416 	/* Do not let invalid input lead to an endless loop below */
417 	if (WARN_ON(resized_width == 0 || resized_height == 0))
418 		return -EINVAL;
419 
420 	while (downsized_width >= resized_width * 2) {
421 		downsized_width >>= 1;
422 		downsize_coeff_h++;
423 	}
424 
425 	while (downsized_height >= resized_height * 2) {
426 		downsized_height >>= 1;
427 		downsize_coeff_v++;
428 	}
429 
430 	/*
431 	 * Calculate the bilinear resizing coefficients that could be used if
432 	 * we were converting with a single tile. The bottom right output pixel
433 	 * should sample as close as possible to the bottom right input pixel
434 	 * out of the decimator, but not overshoot it:
435 	 */
436 	resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
437 	resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
438 
439 	/*
440 	 * Both the output of the IC downsizing section before being passed to
441 	 * the IC main processing section and the final output of the IC main
442 	 * processing section must be <= 1024 pixels in both dimensions.
443 	 */
444 	cols = num_stripes(max_t(u32, downsized_width, resized_width));
445 	rows = num_stripes(max_t(u32, downsized_height, resized_height));
446 
447 	dev_dbg(ctx->chan->priv->ipu->dev,
448 		"%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
449 		__func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
450 		resize_coeff_v, cols, rows);
451 
452 	if (downsize_coeff_h > 2 || downsize_coeff_v  > 2 ||
453 	    resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
454 		return -EINVAL;
455 
456 	ctx->downsize_coeff_h = downsize_coeff_h;
457 	ctx->downsize_coeff_v = downsize_coeff_v;
458 	ctx->image_resize_coeff_h = resize_coeff_h;
459 	ctx->image_resize_coeff_v = resize_coeff_v;
460 	ctx->in.num_cols = cols;
461 	ctx->in.num_rows = rows;
462 
463 	return 0;
464 }
465 
466 #define round_closest(x, y) round_down((x) + (y)/2, (y))
467 
468 /*
469  * Find the best aligned seam position for the given column / row index.
470  * Rotation and image offsets are out of scope.
471  *
472  * @index: column / row index, used to calculate valid interval
473  * @in_edge: input right / bottom edge
474  * @out_edge: output right / bottom edge
475  * @in_align: input alignment, either horizontal 8-byte line start address
476  *            alignment, or pixel alignment due to image format
477  * @out_align: output alignment, either horizontal 8-byte line start address
478  *             alignment, or pixel alignment due to image format or rotator
479  *             block size
480  * @in_burst: horizontal input burst size in case of horizontal flip
481  * @out_burst: horizontal output burst size or rotator block size
482  * @downsize_coeff: downsizing section coefficient
483  * @resize_coeff: main processing section resizing coefficient
484  * @_in_seam: aligned input seam position return value
485  * @_out_seam: aligned output seam position return value
486  */
487 static void find_best_seam(struct ipu_image_convert_ctx *ctx,
488 			   unsigned int index,
489 			   unsigned int in_edge,
490 			   unsigned int out_edge,
491 			   unsigned int in_align,
492 			   unsigned int out_align,
493 			   unsigned int in_burst,
494 			   unsigned int out_burst,
495 			   unsigned int downsize_coeff,
496 			   unsigned int resize_coeff,
497 			   u32 *_in_seam,
498 			   u32 *_out_seam)
499 {
500 	struct device *dev = ctx->chan->priv->ipu->dev;
501 	unsigned int out_pos;
502 	/* Input / output seam position candidates */
503 	unsigned int out_seam = 0;
504 	unsigned int in_seam = 0;
505 	unsigned int min_diff = UINT_MAX;
506 	unsigned int out_start;
507 	unsigned int out_end;
508 	unsigned int in_start;
509 	unsigned int in_end;
510 
511 	/* Start within 1024 pixels of the right / bottom edge */
512 	out_start = max_t(int, index * out_align, out_edge - 1024);
513 	/* End before having to add more columns to the left / rows above */
514 	out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
515 
516 	/*
517 	 * Limit input seam position to make sure that the downsized input tile
518 	 * to the right or bottom does not exceed 1024 pixels.
519 	 */
520 	in_start = max_t(int, index * in_align,
521 			 in_edge - (1024 << downsize_coeff));
522 	in_end = min_t(unsigned int, in_edge,
523 		       index * (1024 << downsize_coeff) + 1);
524 
525 	/*
526 	 * Output tiles must start at a multiple of 8 bytes horizontally and
527 	 * possibly at an even line horizontally depending on the pixel format.
528 	 * Only consider output aligned positions for the seam.
529 	 */
530 	out_start = round_up(out_start, out_align);
531 	for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
532 		unsigned int in_pos;
533 		unsigned int in_pos_aligned;
534 		unsigned int in_pos_rounded;
535 		unsigned int diff;
536 
537 		/*
538 		 * Tiles in the right row / bottom column may not be allowed to
539 		 * overshoot horizontally / vertically. out_burst may be the
540 		 * actual DMA burst size, or the rotator block size.
541 		 */
542 		if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
543 			continue;
544 
545 		/*
546 		 * Input sample position, corresponding to out_pos, 19.13 fixed
547 		 * point.
548 		 */
549 		in_pos = (out_pos * resize_coeff) << downsize_coeff;
550 		/*
551 		 * The closest input sample position that we could actually
552 		 * start the input tile at, 19.13 fixed point.
553 		 */
554 		in_pos_aligned = round_closest(in_pos, 8192U * in_align);
555 		/* Convert 19.13 fixed point to integer */
556 		in_pos_rounded = in_pos_aligned / 8192U;
557 
558 		if (in_pos_rounded < in_start)
559 			continue;
560 		if (in_pos_rounded >= in_end)
561 			break;
562 
563 		if ((in_burst > 1) &&
564 		    (in_edge - in_pos_rounded) % in_burst)
565 			continue;
566 
567 		diff = abs_diff(in_pos, in_pos_aligned);
568 		if (diff < min_diff) {
569 			in_seam = in_pos_rounded;
570 			out_seam = out_pos;
571 			min_diff = diff;
572 		}
573 	}
574 
575 	*_out_seam = out_seam;
576 	*_in_seam = in_seam;
577 
578 	dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
579 		__func__, out_seam, out_align, out_start, out_end,
580 		in_seam, in_align, in_start, in_end, min_diff / 8192,
581 		DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
582 }
583 
584 /*
585  * Tile left edges are required to be aligned to multiples of 8 bytes
586  * by the IDMAC.
587  */
588 static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
589 {
590 	if (fmt->planar)
591 		return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
592 	else
593 		return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
594 }
595 
596 /*
597  * Tile top edge alignment is only limited by chroma subsampling.
598  */
599 static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
600 {
601 	return fmt->uv_height_dec > 1 ? 2 : 1;
602 }
603 
604 static inline u32 tile_width_align(enum ipu_image_convert_type type,
605 				   const struct ipu_image_pixfmt *fmt,
606 				   enum ipu_rotate_mode rot_mode)
607 {
608 	if (type == IMAGE_CONVERT_IN) {
609 		/*
610 		 * The IC burst reads 8 pixels at a time. Reading beyond the
611 		 * end of the line is usually acceptable. Those pixels are
612 		 * ignored, unless the IC has to write the scaled line in
613 		 * reverse.
614 		 */
615 		return (!ipu_rot_mode_is_irt(rot_mode) &&
616 			(rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
617 	}
618 
619 	/*
620 	 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
621 	 * formats to guarantee 8-byte aligned line start addresses in the
622 	 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
623 	 * for all other formats.
624 	 */
625 	return (ipu_rot_mode_is_irt(rot_mode) &&
626 		fmt->planar && !fmt->uv_packed) ?
627 		8 * fmt->uv_width_dec : 8;
628 }
629 
630 static inline u32 tile_height_align(enum ipu_image_convert_type type,
631 				    const struct ipu_image_pixfmt *fmt,
632 				    enum ipu_rotate_mode rot_mode)
633 {
634 	if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
635 		return 2;
636 
637 	/*
638 	 * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
639 	 * formats to guarantee 8-byte aligned line start addresses in the
640 	 * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
641 	 * for all other formats.
642 	 */
643 	return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
644 }
645 
646 /*
647  * Fill in left position and width and for all tiles in an input column, and
648  * for all corresponding output tiles. If the 90° rotator is used, the output
649  * tiles are in a row, and output tile top position and height are set.
650  */
651 static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
652 			     unsigned int col,
653 			     struct ipu_image_convert_image *in,
654 			     unsigned int in_left, unsigned int in_width,
655 			     struct ipu_image_convert_image *out,
656 			     unsigned int out_left, unsigned int out_width)
657 {
658 	unsigned int row, tile_idx;
659 	struct ipu_image_tile *in_tile, *out_tile;
660 
661 	for (row = 0; row < in->num_rows; row++) {
662 		tile_idx = in->num_cols * row + col;
663 		in_tile = &in->tile[tile_idx];
664 		out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
665 
666 		in_tile->left = in_left;
667 		in_tile->width = in_width;
668 
669 		if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
670 			out_tile->top = out_left;
671 			out_tile->height = out_width;
672 		} else {
673 			out_tile->left = out_left;
674 			out_tile->width = out_width;
675 		}
676 	}
677 }
678 
679 /*
680  * Fill in top position and height and for all tiles in an input row, and
681  * for all corresponding output tiles. If the 90° rotator is used, the output
682  * tiles are in a column, and output tile left position and width are set.
683  */
684 static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
685 			  struct ipu_image_convert_image *in,
686 			  unsigned int in_top, unsigned int in_height,
687 			  struct ipu_image_convert_image *out,
688 			  unsigned int out_top, unsigned int out_height)
689 {
690 	unsigned int col, tile_idx;
691 	struct ipu_image_tile *in_tile, *out_tile;
692 
693 	for (col = 0; col < in->num_cols; col++) {
694 		tile_idx = in->num_cols * row + col;
695 		in_tile = &in->tile[tile_idx];
696 		out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
697 
698 		in_tile->top = in_top;
699 		in_tile->height = in_height;
700 
701 		if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
702 			out_tile->left = out_top;
703 			out_tile->width = out_height;
704 		} else {
705 			out_tile->top = out_top;
706 			out_tile->height = out_height;
707 		}
708 	}
709 }
710 
711 /*
712  * Find the best horizontal and vertical seam positions to split into tiles.
713  * Minimize the fractional part of the input sampling position for the
714  * top / left pixels of each tile.
715  */
716 static void find_seams(struct ipu_image_convert_ctx *ctx,
717 		       struct ipu_image_convert_image *in,
718 		       struct ipu_image_convert_image *out)
719 {
720 	struct device *dev = ctx->chan->priv->ipu->dev;
721 	unsigned int resized_width = out->base.rect.width;
722 	unsigned int resized_height = out->base.rect.height;
723 	unsigned int col;
724 	unsigned int row;
725 	unsigned int in_left_align = tile_left_align(in->fmt);
726 	unsigned int in_top_align = tile_top_align(in->fmt);
727 	unsigned int out_left_align = tile_left_align(out->fmt);
728 	unsigned int out_top_align = tile_top_align(out->fmt);
729 	unsigned int out_width_align = tile_width_align(out->type, out->fmt,
730 							ctx->rot_mode);
731 	unsigned int out_height_align = tile_height_align(out->type, out->fmt,
732 							  ctx->rot_mode);
733 	unsigned int in_right = in->base.rect.width;
734 	unsigned int in_bottom = in->base.rect.height;
735 	unsigned int out_right = out->base.rect.width;
736 	unsigned int out_bottom = out->base.rect.height;
737 	unsigned int flipped_out_left;
738 	unsigned int flipped_out_top;
739 
740 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
741 		/* Switch width/height and align top left to IRT block size */
742 		resized_width = out->base.rect.height;
743 		resized_height = out->base.rect.width;
744 		out_left_align = out_height_align;
745 		out_top_align = out_width_align;
746 		out_width_align = out_left_align;
747 		out_height_align = out_top_align;
748 		out_right = out->base.rect.height;
749 		out_bottom = out->base.rect.width;
750 	}
751 
752 	for (col = in->num_cols - 1; col > 0; col--) {
753 		bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
754 					  !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
755 		bool allow_out_overshoot = (col < in->num_cols - 1) &&
756 					   !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
757 		unsigned int in_left;
758 		unsigned int out_left;
759 
760 		/*
761 		 * Align input width to burst length if the scaling step flips
762 		 * horizontally.
763 		 */
764 
765 		find_best_seam(ctx, col,
766 			       in_right, out_right,
767 			       in_left_align, out_left_align,
768 			       allow_in_overshoot ? 1 : 8 /* burst length */,
769 			       allow_out_overshoot ? 1 : out_width_align,
770 			       ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
771 			       &in_left, &out_left);
772 
773 		if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
774 			flipped_out_left = resized_width - out_right;
775 		else
776 			flipped_out_left = out_left;
777 
778 		fill_tile_column(ctx, col, in, in_left, in_right - in_left,
779 				 out, flipped_out_left, out_right - out_left);
780 
781 		dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
782 			in_left, in_right - in_left,
783 			flipped_out_left, out_right - out_left);
784 
785 		in_right = in_left;
786 		out_right = out_left;
787 	}
788 
789 	flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
790 			   resized_width - out_right : 0;
791 
792 	fill_tile_column(ctx, 0, in, 0, in_right,
793 			 out, flipped_out_left, out_right);
794 
795 	dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
796 		in_right, flipped_out_left, out_right);
797 
798 	for (row = in->num_rows - 1; row > 0; row--) {
799 		bool allow_overshoot = row < in->num_rows - 1;
800 		unsigned int in_top;
801 		unsigned int out_top;
802 
803 		find_best_seam(ctx, row,
804 			       in_bottom, out_bottom,
805 			       in_top_align, out_top_align,
806 			       1, allow_overshoot ? 1 : out_height_align,
807 			       ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
808 			       &in_top, &out_top);
809 
810 		if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
811 		    ipu_rot_mode_is_irt(ctx->rot_mode))
812 			flipped_out_top = resized_height - out_bottom;
813 		else
814 			flipped_out_top = out_top;
815 
816 		fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
817 			      out, flipped_out_top, out_bottom - out_top);
818 
819 		dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
820 			in_top, in_bottom - in_top,
821 			flipped_out_top, out_bottom - out_top);
822 
823 		in_bottom = in_top;
824 		out_bottom = out_top;
825 	}
826 
827 	if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
828 	    ipu_rot_mode_is_irt(ctx->rot_mode))
829 		flipped_out_top = resized_height - out_bottom;
830 	else
831 		flipped_out_top = 0;
832 
833 	fill_tile_row(ctx, 0, in, 0, in_bottom,
834 		      out, flipped_out_top, out_bottom);
835 
836 	dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
837 		in_bottom, flipped_out_top, out_bottom);
838 }
839 
840 static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
841 				struct ipu_image_convert_image *image)
842 {
843 	struct ipu_image_convert_chan *chan = ctx->chan;
844 	struct ipu_image_convert_priv *priv = chan->priv;
845 	unsigned int max_width = 1024;
846 	unsigned int max_height = 1024;
847 	unsigned int i;
848 
849 	if (image->type == IMAGE_CONVERT_IN) {
850 		/* Up to 4096x4096 input tile size */
851 		max_width <<= ctx->downsize_coeff_h;
852 		max_height <<= ctx->downsize_coeff_v;
853 	}
854 
855 	for (i = 0; i < ctx->num_tiles; i++) {
856 		struct ipu_image_tile *tile;
857 		const unsigned int row = i / image->num_cols;
858 		const unsigned int col = i % image->num_cols;
859 
860 		if (image->type == IMAGE_CONVERT_OUT)
861 			tile = &image->tile[ctx->out_tile_map[i]];
862 		else
863 			tile = &image->tile[i];
864 
865 		tile->size = ((tile->height * image->fmt->bpp) >> 3) *
866 			tile->width;
867 
868 		if (image->fmt->planar) {
869 			tile->stride = tile->width;
870 			tile->rot_stride = tile->height;
871 		} else {
872 			tile->stride =
873 				(image->fmt->bpp * tile->width) >> 3;
874 			tile->rot_stride =
875 				(image->fmt->bpp * tile->height) >> 3;
876 		}
877 
878 		dev_dbg(priv->ipu->dev,
879 			"task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
880 			chan->ic_task, ctx,
881 			image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
882 			row, col,
883 			tile->width, tile->height, tile->left, tile->top);
884 
885 		if (!tile->width || tile->width > max_width ||
886 		    !tile->height || tile->height > max_height) {
887 			dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
888 				image->type == IMAGE_CONVERT_IN ? "input" :
889 				"output", tile->width, tile->height);
890 			return -EINVAL;
891 		}
892 	}
893 
894 	return 0;
895 }
896 
897 /*
898  * Use the rotation transformation to find the tile coordinates
899  * (row, col) of a tile in the destination frame that corresponds
900  * to the given tile coordinates of a source frame. The destination
901  * coordinate is then converted to a tile index.
902  */
903 static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
904 				int src_row, int src_col)
905 {
906 	struct ipu_image_convert_chan *chan = ctx->chan;
907 	struct ipu_image_convert_priv *priv = chan->priv;
908 	struct ipu_image_convert_image *s_image = &ctx->in;
909 	struct ipu_image_convert_image *d_image = &ctx->out;
910 	int dst_row, dst_col;
911 
912 	/* with no rotation it's a 1:1 mapping */
913 	if (ctx->rot_mode == IPU_ROTATE_NONE)
914 		return src_row * s_image->num_cols + src_col;
915 
916 	/*
917 	 * before doing the transform, first we have to translate
918 	 * source row,col for an origin in the center of s_image
919 	 */
920 	src_row = src_row * 2 - (s_image->num_rows - 1);
921 	src_col = src_col * 2 - (s_image->num_cols - 1);
922 
923 	/* do the rotation transform */
924 	if (ctx->rot_mode & IPU_ROT_BIT_90) {
925 		dst_col = -src_row;
926 		dst_row = src_col;
927 	} else {
928 		dst_col = src_col;
929 		dst_row = src_row;
930 	}
931 
932 	/* apply flip */
933 	if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
934 		dst_col = -dst_col;
935 	if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
936 		dst_row = -dst_row;
937 
938 	dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
939 		chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
940 
941 	/*
942 	 * finally translate dest row,col using an origin in upper
943 	 * left of d_image
944 	 */
945 	dst_row += d_image->num_rows - 1;
946 	dst_col += d_image->num_cols - 1;
947 	dst_row /= 2;
948 	dst_col /= 2;
949 
950 	return dst_row * d_image->num_cols + dst_col;
951 }
952 
953 /*
954  * Fill the out_tile_map[] with transformed destination tile indeces.
955  */
956 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
957 {
958 	struct ipu_image_convert_image *s_image = &ctx->in;
959 	unsigned int row, col, tile = 0;
960 
961 	for (row = 0; row < s_image->num_rows; row++) {
962 		for (col = 0; col < s_image->num_cols; col++) {
963 			ctx->out_tile_map[tile] =
964 				transform_tile_index(ctx, row, col);
965 			tile++;
966 		}
967 	}
968 }
969 
970 static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
971 				    struct ipu_image_convert_image *image)
972 {
973 	struct ipu_image_convert_chan *chan = ctx->chan;
974 	struct ipu_image_convert_priv *priv = chan->priv;
975 	const struct ipu_image_pixfmt *fmt = image->fmt;
976 	unsigned int row, col, tile = 0;
977 	u32 H, top, y_stride, uv_stride;
978 	u32 uv_row_off, uv_col_off, uv_off, u_off, v_off;
979 	u32 y_row_off, y_col_off, y_off;
980 	u32 y_size, uv_size;
981 
982 	/* setup some convenience vars */
983 	H = image->base.pix.height;
984 
985 	y_stride = image->stride;
986 	uv_stride = y_stride / fmt->uv_width_dec;
987 	if (fmt->uv_packed)
988 		uv_stride *= 2;
989 
990 	y_size = H * y_stride;
991 	uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
992 
993 	for (row = 0; row < image->num_rows; row++) {
994 		top = image->tile[tile].top;
995 		y_row_off = top * y_stride;
996 		uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
997 
998 		for (col = 0; col < image->num_cols; col++) {
999 			y_col_off = image->tile[tile].left;
1000 			uv_col_off = y_col_off / fmt->uv_width_dec;
1001 			if (fmt->uv_packed)
1002 				uv_col_off *= 2;
1003 
1004 			y_off = y_row_off + y_col_off;
1005 			uv_off = uv_row_off + uv_col_off;
1006 
1007 			u_off = y_size - y_off + uv_off;
1008 			v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
1009 			if (fmt->uv_swapped)
1010 				swap(u_off, v_off);
1011 
1012 			image->tile[tile].offset = y_off;
1013 			image->tile[tile].u_off = u_off;
1014 			image->tile[tile++].v_off = v_off;
1015 
1016 			if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
1017 				dev_err(priv->ipu->dev,
1018 					"task %u: ctx %p: %s@[%d,%d]: "
1019 					"y_off %08x, u_off %08x, v_off %08x\n",
1020 					chan->ic_task, ctx,
1021 					image->type == IMAGE_CONVERT_IN ?
1022 					"Input" : "Output", row, col,
1023 					y_off, u_off, v_off);
1024 				return -EINVAL;
1025 			}
1026 		}
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
1033 				    struct ipu_image_convert_image *image)
1034 {
1035 	struct ipu_image_convert_chan *chan = ctx->chan;
1036 	struct ipu_image_convert_priv *priv = chan->priv;
1037 	const struct ipu_image_pixfmt *fmt = image->fmt;
1038 	unsigned int row, col, tile = 0;
1039 	u32 bpp, stride, offset;
1040 	u32 row_off, col_off;
1041 
1042 	/* setup some convenience vars */
1043 	stride = image->stride;
1044 	bpp = fmt->bpp;
1045 
1046 	for (row = 0; row < image->num_rows; row++) {
1047 		row_off = image->tile[tile].top * stride;
1048 
1049 		for (col = 0; col < image->num_cols; col++) {
1050 			col_off = (image->tile[tile].left * bpp) >> 3;
1051 
1052 			offset = row_off + col_off;
1053 
1054 			image->tile[tile].offset = offset;
1055 			image->tile[tile].u_off = 0;
1056 			image->tile[tile++].v_off = 0;
1057 
1058 			if (offset & 0x7) {
1059 				dev_err(priv->ipu->dev,
1060 					"task %u: ctx %p: %s@[%d,%d]: "
1061 					"phys %08x\n",
1062 					chan->ic_task, ctx,
1063 					image->type == IMAGE_CONVERT_IN ?
1064 					"Input" : "Output", row, col,
1065 					row_off + col_off);
1066 				return -EINVAL;
1067 			}
1068 		}
1069 	}
1070 
1071 	return 0;
1072 }
1073 
1074 static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
1075 			      struct ipu_image_convert_image *image)
1076 {
1077 	if (image->fmt->planar)
1078 		return calc_tile_offsets_planar(ctx, image);
1079 
1080 	return calc_tile_offsets_packed(ctx, image);
1081 }
1082 
1083 /*
1084  * Calculate the resizing ratio for the IC main processing section given input
1085  * size, fixed downsizing coefficient, and output size.
1086  * Either round to closest for the next tile's first pixel to minimize seams
1087  * and distortion (for all but right column / bottom row), or round down to
1088  * avoid sampling beyond the edges of the input image for this tile's last
1089  * pixel.
1090  * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
1091  */
1092 static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
1093 			     u32 output_size, bool allow_overshoot)
1094 {
1095 	u32 downsized = input_size >> downsize_coeff;
1096 
1097 	if (allow_overshoot)
1098 		return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
1099 	else
1100 		return 8192 * (downsized - 1) / (output_size - 1);
1101 }
1102 
1103 /*
1104  * Slightly modify resize coefficients per tile to hide the bilinear
1105  * interpolator reset at tile borders, shifting the right / bottom edge
1106  * by up to a half input pixel. This removes noticeable seams between
1107  * tiles at higher upscaling factors.
1108  */
1109 static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
1110 {
1111 	struct ipu_image_convert_chan *chan = ctx->chan;
1112 	struct ipu_image_convert_priv *priv = chan->priv;
1113 	struct ipu_image_tile *in_tile, *out_tile;
1114 	unsigned int col, row, tile_idx;
1115 	unsigned int last_output;
1116 
1117 	for (col = 0; col < ctx->in.num_cols; col++) {
1118 		bool closest = (col < ctx->in.num_cols - 1) &&
1119 			       !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
1120 		u32 resized_width;
1121 		u32 resize_coeff_h;
1122 		u32 in_width;
1123 
1124 		tile_idx = col;
1125 		in_tile = &ctx->in.tile[tile_idx];
1126 		out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1127 
1128 		if (ipu_rot_mode_is_irt(ctx->rot_mode))
1129 			resized_width = out_tile->height;
1130 		else
1131 			resized_width = out_tile->width;
1132 
1133 		resize_coeff_h = calc_resize_coeff(in_tile->width,
1134 						   ctx->downsize_coeff_h,
1135 						   resized_width, closest);
1136 
1137 		dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
1138 			__func__, col, resize_coeff_h);
1139 
1140 		/*
1141 		 * With the horizontal scaling factor known, round up resized
1142 		 * width (output width or height) to burst size.
1143 		 */
1144 		resized_width = round_up(resized_width, 8);
1145 
1146 		/*
1147 		 * Calculate input width from the last accessed input pixel
1148 		 * given resized width and scaling coefficients. Round up to
1149 		 * burst size.
1150 		 */
1151 		last_output = resized_width - 1;
1152 		if (closest && ((last_output * resize_coeff_h) % 8192))
1153 			last_output++;
1154 		in_width = round_up(
1155 			(DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
1156 			<< ctx->downsize_coeff_h, 8);
1157 
1158 		for (row = 0; row < ctx->in.num_rows; row++) {
1159 			tile_idx = row * ctx->in.num_cols + col;
1160 			in_tile = &ctx->in.tile[tile_idx];
1161 			out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1162 
1163 			if (ipu_rot_mode_is_irt(ctx->rot_mode))
1164 				out_tile->height = resized_width;
1165 			else
1166 				out_tile->width = resized_width;
1167 
1168 			in_tile->width = in_width;
1169 		}
1170 
1171 		ctx->resize_coeffs_h[col] = resize_coeff_h;
1172 	}
1173 
1174 	for (row = 0; row < ctx->in.num_rows; row++) {
1175 		bool closest = (row < ctx->in.num_rows - 1) &&
1176 			       !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
1177 		u32 resized_height;
1178 		u32 resize_coeff_v;
1179 		u32 in_height;
1180 
1181 		tile_idx = row * ctx->in.num_cols;
1182 		in_tile = &ctx->in.tile[tile_idx];
1183 		out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1184 
1185 		if (ipu_rot_mode_is_irt(ctx->rot_mode))
1186 			resized_height = out_tile->width;
1187 		else
1188 			resized_height = out_tile->height;
1189 
1190 		resize_coeff_v = calc_resize_coeff(in_tile->height,
1191 						   ctx->downsize_coeff_v,
1192 						   resized_height, closest);
1193 
1194 		dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
1195 			__func__, row, resize_coeff_v);
1196 
1197 		/*
1198 		 * With the vertical scaling factor known, round up resized
1199 		 * height (output width or height) to IDMAC limitations.
1200 		 */
1201 		resized_height = round_up(resized_height, 2);
1202 
1203 		/*
1204 		 * Calculate input width from the last accessed input pixel
1205 		 * given resized height and scaling coefficients. Align to
1206 		 * IDMAC restrictions.
1207 		 */
1208 		last_output = resized_height - 1;
1209 		if (closest && ((last_output * resize_coeff_v) % 8192))
1210 			last_output++;
1211 		in_height = round_up(
1212 			(DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
1213 			<< ctx->downsize_coeff_v, 2);
1214 
1215 		for (col = 0; col < ctx->in.num_cols; col++) {
1216 			tile_idx = row * ctx->in.num_cols + col;
1217 			in_tile = &ctx->in.tile[tile_idx];
1218 			out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
1219 
1220 			if (ipu_rot_mode_is_irt(ctx->rot_mode))
1221 				out_tile->width = resized_height;
1222 			else
1223 				out_tile->height = resized_height;
1224 
1225 			in_tile->height = in_height;
1226 		}
1227 
1228 		ctx->resize_coeffs_v[row] = resize_coeff_v;
1229 	}
1230 }
1231 
1232 /*
1233  * return the number of runs in given queue (pending_q or done_q)
1234  * for this context. hold irqlock when calling.
1235  */
1236 static int get_run_count(struct ipu_image_convert_ctx *ctx,
1237 			 struct list_head *q)
1238 {
1239 	struct ipu_image_convert_run *run;
1240 	int count = 0;
1241 
1242 	lockdep_assert_held(&ctx->chan->irqlock);
1243 
1244 	list_for_each_entry(run, q, list) {
1245 		if (run->ctx == ctx)
1246 			count++;
1247 	}
1248 
1249 	return count;
1250 }
1251 
1252 static void convert_stop(struct ipu_image_convert_run *run)
1253 {
1254 	struct ipu_image_convert_ctx *ctx = run->ctx;
1255 	struct ipu_image_convert_chan *chan = ctx->chan;
1256 	struct ipu_image_convert_priv *priv = chan->priv;
1257 
1258 	dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
1259 		__func__, chan->ic_task, ctx, run);
1260 
1261 	/* disable IC tasks and the channels */
1262 	ipu_ic_task_disable(chan->ic);
1263 	ipu_idmac_disable_channel(chan->in_chan);
1264 	ipu_idmac_disable_channel(chan->out_chan);
1265 
1266 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1267 		ipu_idmac_disable_channel(chan->rotation_in_chan);
1268 		ipu_idmac_disable_channel(chan->rotation_out_chan);
1269 		ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
1270 	}
1271 
1272 	ipu_ic_disable(chan->ic);
1273 }
1274 
1275 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
1276 			       struct ipuv3_channel *channel,
1277 			       struct ipu_image_convert_image *image,
1278 			       enum ipu_rotate_mode rot_mode,
1279 			       bool rot_swap_width_height,
1280 			       unsigned int tile)
1281 {
1282 	struct ipu_image_convert_chan *chan = ctx->chan;
1283 	unsigned int burst_size;
1284 	u32 width, height, stride;
1285 	dma_addr_t addr0, addr1 = 0;
1286 	struct ipu_image tile_image;
1287 	unsigned int tile_idx[2];
1288 
1289 	if (image->type == IMAGE_CONVERT_OUT) {
1290 		tile_idx[0] = ctx->out_tile_map[tile];
1291 		tile_idx[1] = ctx->out_tile_map[1];
1292 	} else {
1293 		tile_idx[0] = tile;
1294 		tile_idx[1] = 1;
1295 	}
1296 
1297 	if (rot_swap_width_height) {
1298 		width = image->tile[tile_idx[0]].height;
1299 		height = image->tile[tile_idx[0]].width;
1300 		stride = image->tile[tile_idx[0]].rot_stride;
1301 		addr0 = ctx->rot_intermediate[0].phys;
1302 		if (ctx->double_buffering)
1303 			addr1 = ctx->rot_intermediate[1].phys;
1304 	} else {
1305 		width = image->tile[tile_idx[0]].width;
1306 		height = image->tile[tile_idx[0]].height;
1307 		stride = image->stride;
1308 		addr0 = image->base.phys0 +
1309 			image->tile[tile_idx[0]].offset;
1310 		if (ctx->double_buffering)
1311 			addr1 = image->base.phys0 +
1312 				image->tile[tile_idx[1]].offset;
1313 	}
1314 
1315 	ipu_cpmem_zero(channel);
1316 
1317 	memset(&tile_image, 0, sizeof(tile_image));
1318 	tile_image.pix.width = tile_image.rect.width = width;
1319 	tile_image.pix.height = tile_image.rect.height = height;
1320 	tile_image.pix.bytesperline = stride;
1321 	tile_image.pix.pixelformat =  image->fmt->fourcc;
1322 	tile_image.phys0 = addr0;
1323 	tile_image.phys1 = addr1;
1324 	if (image->fmt->planar && !rot_swap_width_height) {
1325 		tile_image.u_offset = image->tile[tile_idx[0]].u_off;
1326 		tile_image.v_offset = image->tile[tile_idx[0]].v_off;
1327 	}
1328 
1329 	ipu_cpmem_set_image(channel, &tile_image);
1330 
1331 	if (rot_mode)
1332 		ipu_cpmem_set_rotation(channel, rot_mode);
1333 
1334 	/*
1335 	 * Skip writing U and V components to odd rows in the output
1336 	 * channels for planar 4:2:0.
1337 	 */
1338 	if ((channel == chan->out_chan ||
1339 	     channel == chan->rotation_out_chan) &&
1340 	    image->fmt->planar && image->fmt->uv_height_dec == 2)
1341 		ipu_cpmem_skip_odd_chroma_rows(channel);
1342 
1343 	if (channel == chan->rotation_in_chan ||
1344 	    channel == chan->rotation_out_chan) {
1345 		burst_size = 8;
1346 		ipu_cpmem_set_block_mode(channel);
1347 	} else
1348 		burst_size = (width % 16) ? 8 : 16;
1349 
1350 	ipu_cpmem_set_burstsize(channel, burst_size);
1351 
1352 	ipu_ic_task_idma_init(chan->ic, channel, width, height,
1353 			      burst_size, rot_mode);
1354 
1355 	/*
1356 	 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
1357 	 * only do this when there is no PRG present.
1358 	 */
1359 	if (!channel->ipu->prg_priv)
1360 		ipu_cpmem_set_axi_id(channel, 1);
1361 
1362 	ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
1363 }
1364 
1365 static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
1366 {
1367 	struct ipu_image_convert_ctx *ctx = run->ctx;
1368 	struct ipu_image_convert_chan *chan = ctx->chan;
1369 	struct ipu_image_convert_priv *priv = chan->priv;
1370 	struct ipu_image_convert_image *s_image = &ctx->in;
1371 	struct ipu_image_convert_image *d_image = &ctx->out;
1372 	unsigned int dst_tile = ctx->out_tile_map[tile];
1373 	unsigned int dest_width, dest_height;
1374 	unsigned int col, row;
1375 	u32 rsc;
1376 	int ret;
1377 
1378 	dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
1379 		__func__, chan->ic_task, ctx, run, tile, dst_tile);
1380 
1381 	/* clear EOF irq mask */
1382 	ctx->eof_mask = 0;
1383 
1384 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1385 		/* swap width/height for resizer */
1386 		dest_width = d_image->tile[dst_tile].height;
1387 		dest_height = d_image->tile[dst_tile].width;
1388 	} else {
1389 		dest_width = d_image->tile[dst_tile].width;
1390 		dest_height = d_image->tile[dst_tile].height;
1391 	}
1392 
1393 	row = tile / s_image->num_cols;
1394 	col = tile % s_image->num_cols;
1395 
1396 	rsc =  (ctx->downsize_coeff_v << 30) |
1397 	       (ctx->resize_coeffs_v[row] << 16) |
1398 	       (ctx->downsize_coeff_h << 14) |
1399 	       (ctx->resize_coeffs_h[col]);
1400 
1401 	dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
1402 		__func__, s_image->tile[tile].width,
1403 		s_image->tile[tile].height, dest_width, dest_height, rsc);
1404 
1405 	/* setup the IC resizer and CSC */
1406 	ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
1407 				   s_image->tile[tile].width,
1408 				   s_image->tile[tile].height,
1409 				   dest_width,
1410 				   dest_height,
1411 				   rsc);
1412 	if (ret) {
1413 		dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
1414 		return ret;
1415 	}
1416 
1417 	/* init the source MEM-->IC PP IDMAC channel */
1418 	init_idmac_channel(ctx, chan->in_chan, s_image,
1419 			   IPU_ROTATE_NONE, false, tile);
1420 
1421 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1422 		/* init the IC PP-->MEM IDMAC channel */
1423 		init_idmac_channel(ctx, chan->out_chan, d_image,
1424 				   IPU_ROTATE_NONE, true, tile);
1425 
1426 		/* init the MEM-->IC PP ROT IDMAC channel */
1427 		init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
1428 				   ctx->rot_mode, true, tile);
1429 
1430 		/* init the destination IC PP ROT-->MEM IDMAC channel */
1431 		init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
1432 				   IPU_ROTATE_NONE, false, tile);
1433 
1434 		/* now link IC PP-->MEM to MEM-->IC PP ROT */
1435 		ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
1436 	} else {
1437 		/* init the destination IC PP-->MEM IDMAC channel */
1438 		init_idmac_channel(ctx, chan->out_chan, d_image,
1439 				   ctx->rot_mode, false, tile);
1440 	}
1441 
1442 	/* enable the IC */
1443 	ipu_ic_enable(chan->ic);
1444 
1445 	/* set buffers ready */
1446 	ipu_idmac_select_buffer(chan->in_chan, 0);
1447 	ipu_idmac_select_buffer(chan->out_chan, 0);
1448 	if (ipu_rot_mode_is_irt(ctx->rot_mode))
1449 		ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
1450 	if (ctx->double_buffering) {
1451 		ipu_idmac_select_buffer(chan->in_chan, 1);
1452 		ipu_idmac_select_buffer(chan->out_chan, 1);
1453 		if (ipu_rot_mode_is_irt(ctx->rot_mode))
1454 			ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
1455 	}
1456 
1457 	/* enable the channels! */
1458 	ipu_idmac_enable_channel(chan->in_chan);
1459 	ipu_idmac_enable_channel(chan->out_chan);
1460 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1461 		ipu_idmac_enable_channel(chan->rotation_in_chan);
1462 		ipu_idmac_enable_channel(chan->rotation_out_chan);
1463 	}
1464 
1465 	ipu_ic_task_enable(chan->ic);
1466 
1467 	ipu_cpmem_dump(chan->in_chan);
1468 	ipu_cpmem_dump(chan->out_chan);
1469 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1470 		ipu_cpmem_dump(chan->rotation_in_chan);
1471 		ipu_cpmem_dump(chan->rotation_out_chan);
1472 	}
1473 
1474 	ipu_dump(priv->ipu);
1475 
1476 	return 0;
1477 }
1478 
1479 /* hold irqlock when calling */
1480 static int do_run(struct ipu_image_convert_run *run)
1481 {
1482 	struct ipu_image_convert_ctx *ctx = run->ctx;
1483 	struct ipu_image_convert_chan *chan = ctx->chan;
1484 
1485 	lockdep_assert_held(&chan->irqlock);
1486 
1487 	ctx->in.base.phys0 = run->in_phys;
1488 	ctx->out.base.phys0 = run->out_phys;
1489 
1490 	ctx->cur_buf_num = 0;
1491 	ctx->next_tile = 1;
1492 
1493 	/* remove run from pending_q and set as current */
1494 	list_del(&run->list);
1495 	chan->current_run = run;
1496 
1497 	return convert_start(run, 0);
1498 }
1499 
1500 /* hold irqlock when calling */
1501 static void run_next(struct ipu_image_convert_chan *chan)
1502 {
1503 	struct ipu_image_convert_priv *priv = chan->priv;
1504 	struct ipu_image_convert_run *run, *tmp;
1505 	int ret;
1506 
1507 	lockdep_assert_held(&chan->irqlock);
1508 
1509 	list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1510 		/* skip contexts that are aborting */
1511 		if (run->ctx->aborting) {
1512 			dev_dbg(priv->ipu->dev,
1513 				"%s: task %u: skipping aborting ctx %p run %p\n",
1514 				__func__, chan->ic_task, run->ctx, run);
1515 			continue;
1516 		}
1517 
1518 		ret = do_run(run);
1519 		if (!ret)
1520 			break;
1521 
1522 		/*
1523 		 * something went wrong with start, add the run
1524 		 * to done q and continue to the next run in the
1525 		 * pending q.
1526 		 */
1527 		run->status = ret;
1528 		list_add_tail(&run->list, &chan->done_q);
1529 		chan->current_run = NULL;
1530 	}
1531 }
1532 
1533 static void empty_done_q(struct ipu_image_convert_chan *chan)
1534 {
1535 	struct ipu_image_convert_priv *priv = chan->priv;
1536 	struct ipu_image_convert_run *run;
1537 	unsigned long flags;
1538 
1539 	spin_lock_irqsave(&chan->irqlock, flags);
1540 
1541 	while (!list_empty(&chan->done_q)) {
1542 		run = list_entry(chan->done_q.next,
1543 				 struct ipu_image_convert_run,
1544 				 list);
1545 
1546 		list_del(&run->list);
1547 
1548 		dev_dbg(priv->ipu->dev,
1549 			"%s: task %u: completing ctx %p run %p with %d\n",
1550 			__func__, chan->ic_task, run->ctx, run, run->status);
1551 
1552 		/* call the completion callback and free the run */
1553 		spin_unlock_irqrestore(&chan->irqlock, flags);
1554 		run->ctx->complete(run, run->ctx->complete_context);
1555 		spin_lock_irqsave(&chan->irqlock, flags);
1556 	}
1557 
1558 	spin_unlock_irqrestore(&chan->irqlock, flags);
1559 }
1560 
1561 /*
1562  * the bottom half thread clears out the done_q, calling the
1563  * completion handler for each.
1564  */
1565 static irqreturn_t do_bh(int irq, void *dev_id)
1566 {
1567 	struct ipu_image_convert_chan *chan = dev_id;
1568 	struct ipu_image_convert_priv *priv = chan->priv;
1569 	struct ipu_image_convert_ctx *ctx;
1570 	unsigned long flags;
1571 
1572 	dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
1573 		chan->ic_task);
1574 
1575 	empty_done_q(chan);
1576 
1577 	spin_lock_irqsave(&chan->irqlock, flags);
1578 
1579 	/*
1580 	 * the done_q is cleared out, signal any contexts
1581 	 * that are aborting that abort can complete.
1582 	 */
1583 	list_for_each_entry(ctx, &chan->ctx_list, list) {
1584 		if (ctx->aborting) {
1585 			dev_dbg(priv->ipu->dev,
1586 				"%s: task %u: signaling abort for ctx %p\n",
1587 				__func__, chan->ic_task, ctx);
1588 			complete_all(&ctx->aborted);
1589 		}
1590 	}
1591 
1592 	spin_unlock_irqrestore(&chan->irqlock, flags);
1593 
1594 	dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
1595 		chan->ic_task);
1596 
1597 	return IRQ_HANDLED;
1598 }
1599 
1600 static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
1601 {
1602 	unsigned int cur_tile = ctx->next_tile - 1;
1603 	unsigned int next_tile = ctx->next_tile;
1604 
1605 	if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
1606 	    ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
1607 	    ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
1608 	    ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
1609 	    ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
1610 	    ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
1611 	    ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
1612 	    ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
1613 		return true;
1614 
1615 	return false;
1616 }
1617 
1618 /* hold irqlock when calling */
1619 static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
1620 {
1621 	struct ipu_image_convert_ctx *ctx = run->ctx;
1622 	struct ipu_image_convert_chan *chan = ctx->chan;
1623 	struct ipu_image_tile *src_tile, *dst_tile;
1624 	struct ipu_image_convert_image *s_image = &ctx->in;
1625 	struct ipu_image_convert_image *d_image = &ctx->out;
1626 	struct ipuv3_channel *outch;
1627 	unsigned int dst_idx;
1628 
1629 	lockdep_assert_held(&chan->irqlock);
1630 
1631 	outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
1632 		chan->rotation_out_chan : chan->out_chan;
1633 
1634 	/*
1635 	 * It is difficult to stop the channel DMA before the channels
1636 	 * enter the paused state. Without double-buffering the channels
1637 	 * are always in a paused state when the EOF irq occurs, so it
1638 	 * is safe to stop the channels now. For double-buffering we
1639 	 * just ignore the abort until the operation completes, when it
1640 	 * is safe to shut down.
1641 	 */
1642 	if (ctx->aborting && !ctx->double_buffering) {
1643 		convert_stop(run);
1644 		run->status = -EIO;
1645 		goto done;
1646 	}
1647 
1648 	if (ctx->next_tile == ctx->num_tiles) {
1649 		/*
1650 		 * the conversion is complete
1651 		 */
1652 		convert_stop(run);
1653 		run->status = 0;
1654 		goto done;
1655 	}
1656 
1657 	/*
1658 	 * not done, place the next tile buffers.
1659 	 */
1660 	if (!ctx->double_buffering) {
1661 		if (ic_settings_changed(ctx)) {
1662 			convert_stop(run);
1663 			convert_start(run, ctx->next_tile);
1664 		} else {
1665 			src_tile = &s_image->tile[ctx->next_tile];
1666 			dst_idx = ctx->out_tile_map[ctx->next_tile];
1667 			dst_tile = &d_image->tile[dst_idx];
1668 
1669 			ipu_cpmem_set_buffer(chan->in_chan, 0,
1670 					     s_image->base.phys0 +
1671 					     src_tile->offset);
1672 			ipu_cpmem_set_buffer(outch, 0,
1673 					     d_image->base.phys0 +
1674 					     dst_tile->offset);
1675 			if (s_image->fmt->planar)
1676 				ipu_cpmem_set_uv_offset(chan->in_chan,
1677 							src_tile->u_off,
1678 							src_tile->v_off);
1679 			if (d_image->fmt->planar)
1680 				ipu_cpmem_set_uv_offset(outch,
1681 							dst_tile->u_off,
1682 							dst_tile->v_off);
1683 
1684 			ipu_idmac_select_buffer(chan->in_chan, 0);
1685 			ipu_idmac_select_buffer(outch, 0);
1686 		}
1687 	} else if (ctx->next_tile < ctx->num_tiles - 1) {
1688 
1689 		src_tile = &s_image->tile[ctx->next_tile + 1];
1690 		dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
1691 		dst_tile = &d_image->tile[dst_idx];
1692 
1693 		ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
1694 				     s_image->base.phys0 + src_tile->offset);
1695 		ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
1696 				     d_image->base.phys0 + dst_tile->offset);
1697 
1698 		ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
1699 		ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
1700 
1701 		ctx->cur_buf_num ^= 1;
1702 	}
1703 
1704 	ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
1705 	ctx->next_tile++;
1706 	return IRQ_HANDLED;
1707 done:
1708 	list_add_tail(&run->list, &chan->done_q);
1709 	chan->current_run = NULL;
1710 	run_next(chan);
1711 	return IRQ_WAKE_THREAD;
1712 }
1713 
1714 static irqreturn_t eof_irq(int irq, void *data)
1715 {
1716 	struct ipu_image_convert_chan *chan = data;
1717 	struct ipu_image_convert_priv *priv = chan->priv;
1718 	struct ipu_image_convert_ctx *ctx;
1719 	struct ipu_image_convert_run *run;
1720 	irqreturn_t ret = IRQ_HANDLED;
1721 	bool tile_complete = false;
1722 	unsigned long flags;
1723 
1724 	spin_lock_irqsave(&chan->irqlock, flags);
1725 
1726 	/* get current run and its context */
1727 	run = chan->current_run;
1728 	if (!run) {
1729 		ret = IRQ_NONE;
1730 		goto out;
1731 	}
1732 
1733 	ctx = run->ctx;
1734 
1735 	if (irq == chan->in_eof_irq) {
1736 		ctx->eof_mask |= EOF_IRQ_IN;
1737 	} else if (irq == chan->out_eof_irq) {
1738 		ctx->eof_mask |= EOF_IRQ_OUT;
1739 	} else if (irq == chan->rot_in_eof_irq ||
1740 		   irq == chan->rot_out_eof_irq) {
1741 		if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1742 			/* this was NOT a rotation op, shouldn't happen */
1743 			dev_err(priv->ipu->dev,
1744 				"Unexpected rotation interrupt\n");
1745 			goto out;
1746 		}
1747 		ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
1748 			EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
1749 	} else {
1750 		dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
1751 		ret = IRQ_NONE;
1752 		goto out;
1753 	}
1754 
1755 	if (ipu_rot_mode_is_irt(ctx->rot_mode))
1756 		tile_complete =	(ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
1757 	else
1758 		tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
1759 
1760 	if (tile_complete)
1761 		ret = do_tile_complete(run);
1762 out:
1763 	spin_unlock_irqrestore(&chan->irqlock, flags);
1764 	return ret;
1765 }
1766 
1767 /*
1768  * try to force the completion of runs for this ctx. Called when
1769  * abort wait times out in ipu_image_convert_abort().
1770  */
1771 static void force_abort(struct ipu_image_convert_ctx *ctx)
1772 {
1773 	struct ipu_image_convert_chan *chan = ctx->chan;
1774 	struct ipu_image_convert_run *run;
1775 	unsigned long flags;
1776 
1777 	spin_lock_irqsave(&chan->irqlock, flags);
1778 
1779 	run = chan->current_run;
1780 	if (run && run->ctx == ctx) {
1781 		convert_stop(run);
1782 		run->status = -EIO;
1783 		list_add_tail(&run->list, &chan->done_q);
1784 		chan->current_run = NULL;
1785 		run_next(chan);
1786 	}
1787 
1788 	spin_unlock_irqrestore(&chan->irqlock, flags);
1789 
1790 	empty_done_q(chan);
1791 }
1792 
1793 static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1794 {
1795 	if (chan->in_eof_irq >= 0)
1796 		free_irq(chan->in_eof_irq, chan);
1797 	if (chan->rot_in_eof_irq >= 0)
1798 		free_irq(chan->rot_in_eof_irq, chan);
1799 	if (chan->out_eof_irq >= 0)
1800 		free_irq(chan->out_eof_irq, chan);
1801 	if (chan->rot_out_eof_irq >= 0)
1802 		free_irq(chan->rot_out_eof_irq, chan);
1803 
1804 	if (!IS_ERR_OR_NULL(chan->in_chan))
1805 		ipu_idmac_put(chan->in_chan);
1806 	if (!IS_ERR_OR_NULL(chan->out_chan))
1807 		ipu_idmac_put(chan->out_chan);
1808 	if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1809 		ipu_idmac_put(chan->rotation_in_chan);
1810 	if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1811 		ipu_idmac_put(chan->rotation_out_chan);
1812 	if (!IS_ERR_OR_NULL(chan->ic))
1813 		ipu_ic_put(chan->ic);
1814 
1815 	chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1816 		chan->rotation_out_chan = NULL;
1817 	chan->in_eof_irq = -1;
1818 	chan->rot_in_eof_irq = -1;
1819 	chan->out_eof_irq = -1;
1820 	chan->rot_out_eof_irq = -1;
1821 }
1822 
1823 static int get_eof_irq(struct ipu_image_convert_chan *chan,
1824 		       struct ipuv3_channel *channel)
1825 {
1826 	struct ipu_image_convert_priv *priv = chan->priv;
1827 	int ret, irq;
1828 
1829 	irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
1830 
1831 	ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
1832 	if (ret < 0) {
1833 		dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
1834 		return ret;
1835 	}
1836 
1837 	return irq;
1838 }
1839 
1840 static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1841 {
1842 	const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1843 	struct ipu_image_convert_priv *priv = chan->priv;
1844 	int ret;
1845 
1846 	/* get IC */
1847 	chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1848 	if (IS_ERR(chan->ic)) {
1849 		dev_err(priv->ipu->dev, "could not acquire IC\n");
1850 		ret = PTR_ERR(chan->ic);
1851 		goto err;
1852 	}
1853 
1854 	/* get IDMAC channels */
1855 	chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1856 	chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1857 	if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1858 		dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1859 		ret = -EBUSY;
1860 		goto err;
1861 	}
1862 
1863 	chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1864 	chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1865 	if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1866 		dev_err(priv->ipu->dev,
1867 			"could not acquire idmac rotation channels\n");
1868 		ret = -EBUSY;
1869 		goto err;
1870 	}
1871 
1872 	/* acquire the EOF interrupts */
1873 	ret = get_eof_irq(chan, chan->in_chan);
1874 	if (ret < 0) {
1875 		chan->in_eof_irq = -1;
1876 		goto err;
1877 	}
1878 	chan->in_eof_irq = ret;
1879 
1880 	ret = get_eof_irq(chan, chan->rotation_in_chan);
1881 	if (ret < 0) {
1882 		chan->rot_in_eof_irq = -1;
1883 		goto err;
1884 	}
1885 	chan->rot_in_eof_irq = ret;
1886 
1887 	ret = get_eof_irq(chan, chan->out_chan);
1888 	if (ret < 0) {
1889 		chan->out_eof_irq = -1;
1890 		goto err;
1891 	}
1892 	chan->out_eof_irq = ret;
1893 
1894 	ret = get_eof_irq(chan, chan->rotation_out_chan);
1895 	if (ret < 0) {
1896 		chan->rot_out_eof_irq = -1;
1897 		goto err;
1898 	}
1899 	chan->rot_out_eof_irq = ret;
1900 
1901 	return 0;
1902 err:
1903 	release_ipu_resources(chan);
1904 	return ret;
1905 }
1906 
1907 static int fill_image(struct ipu_image_convert_ctx *ctx,
1908 		      struct ipu_image_convert_image *ic_image,
1909 		      struct ipu_image *image,
1910 		      enum ipu_image_convert_type type)
1911 {
1912 	struct ipu_image_convert_priv *priv = ctx->chan->priv;
1913 
1914 	ic_image->base = *image;
1915 	ic_image->type = type;
1916 
1917 	ic_image->fmt = get_format(image->pix.pixelformat);
1918 	if (!ic_image->fmt) {
1919 		dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1920 			type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1921 		return -EINVAL;
1922 	}
1923 
1924 	if (ic_image->fmt->planar)
1925 		ic_image->stride = ic_image->base.pix.width;
1926 	else
1927 		ic_image->stride  = ic_image->base.pix.bytesperline;
1928 
1929 	return 0;
1930 }
1931 
1932 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1933 static unsigned int clamp_align(unsigned int x, unsigned int min,
1934 				unsigned int max, unsigned int align)
1935 {
1936 	/* Bits that must be zero to be aligned */
1937 	unsigned int mask = ~((1 << align) - 1);
1938 
1939 	/* Clamp to aligned min and max */
1940 	x = clamp(x, (min + ~mask) & mask, max & mask);
1941 
1942 	/* Round to nearest aligned value */
1943 	if (align)
1944 		x = (x + (1 << (align - 1))) & mask;
1945 
1946 	return x;
1947 }
1948 
1949 /* Adjusts input/output images to IPU restrictions */
1950 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1951 			      enum ipu_rotate_mode rot_mode)
1952 {
1953 	const struct ipu_image_pixfmt *infmt, *outfmt;
1954 	u32 w_align_out, h_align_out;
1955 	u32 w_align_in, h_align_in;
1956 
1957 	infmt = get_format(in->pix.pixelformat);
1958 	outfmt = get_format(out->pix.pixelformat);
1959 
1960 	/* set some default pixel formats if needed */
1961 	if (!infmt) {
1962 		in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1963 		infmt = get_format(V4L2_PIX_FMT_RGB24);
1964 	}
1965 	if (!outfmt) {
1966 		out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1967 		outfmt = get_format(V4L2_PIX_FMT_RGB24);
1968 	}
1969 
1970 	/* image converter does not handle fields */
1971 	in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1972 
1973 	/* resizer cannot downsize more than 4:1 */
1974 	if (ipu_rot_mode_is_irt(rot_mode)) {
1975 		out->pix.height = max_t(__u32, out->pix.height,
1976 					in->pix.width / 4);
1977 		out->pix.width = max_t(__u32, out->pix.width,
1978 				       in->pix.height / 4);
1979 	} else {
1980 		out->pix.width = max_t(__u32, out->pix.width,
1981 				       in->pix.width / 4);
1982 		out->pix.height = max_t(__u32, out->pix.height,
1983 					in->pix.height / 4);
1984 	}
1985 
1986 	/* align input width/height */
1987 	w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
1988 					    rot_mode));
1989 	h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
1990 					     rot_mode));
1991 	in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
1992 				    w_align_in);
1993 	in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
1994 				     h_align_in);
1995 
1996 	/* align output width/height */
1997 	w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
1998 					     rot_mode));
1999 	h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
2000 					      rot_mode));
2001 	out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
2002 				     w_align_out);
2003 	out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
2004 				      h_align_out);
2005 
2006 	/* set input/output strides and image sizes */
2007 	in->pix.bytesperline = infmt->planar ?
2008 		clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
2009 			    w_align_in) :
2010 		clamp_align((in->pix.width * infmt->bpp) >> 3,
2011 			    ((2 << w_align_in) * infmt->bpp) >> 3,
2012 			    (MAX_W * infmt->bpp) >> 3,
2013 			    w_align_in);
2014 	in->pix.sizeimage = infmt->planar ?
2015 		(in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
2016 		in->pix.height * in->pix.bytesperline;
2017 	out->pix.bytesperline = outfmt->planar ? out->pix.width :
2018 		(out->pix.width * outfmt->bpp) >> 3;
2019 	out->pix.sizeimage = outfmt->planar ?
2020 		(out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
2021 		out->pix.height * out->pix.bytesperline;
2022 }
2023 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
2024 
2025 /*
2026  * this is used by ipu_image_convert_prepare() to verify set input and
2027  * output images are valid before starting the conversion. Clients can
2028  * also call it before calling ipu_image_convert_prepare().
2029  */
2030 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
2031 			     enum ipu_rotate_mode rot_mode)
2032 {
2033 	struct ipu_image testin, testout;
2034 
2035 	testin = *in;
2036 	testout = *out;
2037 
2038 	ipu_image_convert_adjust(&testin, &testout, rot_mode);
2039 
2040 	if (testin.pix.width != in->pix.width ||
2041 	    testin.pix.height != in->pix.height ||
2042 	    testout.pix.width != out->pix.width ||
2043 	    testout.pix.height != out->pix.height)
2044 		return -EINVAL;
2045 
2046 	return 0;
2047 }
2048 EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
2049 
2050 /*
2051  * Call ipu_image_convert_prepare() to prepare for the conversion of
2052  * given images and rotation mode. Returns a new conversion context.
2053  */
2054 struct ipu_image_convert_ctx *
2055 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2056 			  struct ipu_image *in, struct ipu_image *out,
2057 			  enum ipu_rotate_mode rot_mode,
2058 			  ipu_image_convert_cb_t complete,
2059 			  void *complete_context)
2060 {
2061 	struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
2062 	struct ipu_image_convert_image *s_image, *d_image;
2063 	struct ipu_image_convert_chan *chan;
2064 	struct ipu_image_convert_ctx *ctx;
2065 	unsigned long flags;
2066 	unsigned int i;
2067 	bool get_res;
2068 	int ret;
2069 
2070 	if (!in || !out || !complete ||
2071 	    (ic_task != IC_TASK_VIEWFINDER &&
2072 	     ic_task != IC_TASK_POST_PROCESSOR))
2073 		return ERR_PTR(-EINVAL);
2074 
2075 	/* verify the in/out images before continuing */
2076 	ret = ipu_image_convert_verify(in, out, rot_mode);
2077 	if (ret) {
2078 		dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
2079 			__func__);
2080 		return ERR_PTR(ret);
2081 	}
2082 
2083 	chan = &priv->chan[ic_task];
2084 
2085 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2086 	if (!ctx)
2087 		return ERR_PTR(-ENOMEM);
2088 
2089 	dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
2090 		chan->ic_task, ctx);
2091 
2092 	ctx->chan = chan;
2093 	init_completion(&ctx->aborted);
2094 
2095 	ctx->rot_mode = rot_mode;
2096 
2097 	/* Sets ctx->in.num_rows/cols as well */
2098 	ret = calc_image_resize_coefficients(ctx, in, out);
2099 	if (ret)
2100 		goto out_free;
2101 
2102 	s_image = &ctx->in;
2103 	d_image = &ctx->out;
2104 
2105 	/* set tiling and rotation */
2106 	if (ipu_rot_mode_is_irt(rot_mode)) {
2107 		d_image->num_rows = s_image->num_cols;
2108 		d_image->num_cols = s_image->num_rows;
2109 	} else {
2110 		d_image->num_rows = s_image->num_rows;
2111 		d_image->num_cols = s_image->num_cols;
2112 	}
2113 
2114 	ctx->num_tiles = d_image->num_cols * d_image->num_rows;
2115 
2116 	ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
2117 	if (ret)
2118 		goto out_free;
2119 	ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
2120 	if (ret)
2121 		goto out_free;
2122 
2123 	calc_out_tile_map(ctx);
2124 
2125 	find_seams(ctx, s_image, d_image);
2126 
2127 	ret = calc_tile_dimensions(ctx, s_image);
2128 	if (ret)
2129 		goto out_free;
2130 
2131 	ret = calc_tile_offsets(ctx, s_image);
2132 	if (ret)
2133 		goto out_free;
2134 
2135 	calc_tile_dimensions(ctx, d_image);
2136 	ret = calc_tile_offsets(ctx, d_image);
2137 	if (ret)
2138 		goto out_free;
2139 
2140 	calc_tile_resize_coefficients(ctx);
2141 
2142 	ret = ipu_ic_calc_csc(&ctx->csc,
2143 			s_image->base.pix.ycbcr_enc,
2144 			s_image->base.pix.quantization,
2145 			ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
2146 			d_image->base.pix.ycbcr_enc,
2147 			d_image->base.pix.quantization,
2148 			ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
2149 	if (ret)
2150 		goto out_free;
2151 
2152 	dump_format(ctx, s_image);
2153 	dump_format(ctx, d_image);
2154 
2155 	ctx->complete = complete;
2156 	ctx->complete_context = complete_context;
2157 
2158 	/*
2159 	 * Can we use double-buffering for this operation? If there is
2160 	 * only one tile (the whole image can be converted in a single
2161 	 * operation) there's no point in using double-buffering. Also,
2162 	 * the IPU's IDMAC channels allow only a single U and V plane
2163 	 * offset shared between both buffers, but these offsets change
2164 	 * for every tile, and therefore would have to be updated for
2165 	 * each buffer which is not possible. So double-buffering is
2166 	 * impossible when either the source or destination images are
2167 	 * a planar format (YUV420, YUV422P, etc.). Further, differently
2168 	 * sized tiles or different resizing coefficients per tile
2169 	 * prevent double-buffering as well.
2170 	 */
2171 	ctx->double_buffering = (ctx->num_tiles > 1 &&
2172 				 !s_image->fmt->planar &&
2173 				 !d_image->fmt->planar);
2174 	for (i = 1; i < ctx->num_tiles; i++) {
2175 		if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
2176 		    ctx->in.tile[i].height != ctx->in.tile[0].height ||
2177 		    ctx->out.tile[i].width != ctx->out.tile[0].width ||
2178 		    ctx->out.tile[i].height != ctx->out.tile[0].height) {
2179 			ctx->double_buffering = false;
2180 			break;
2181 		}
2182 	}
2183 	for (i = 1; i < ctx->in.num_cols; i++) {
2184 		if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
2185 			ctx->double_buffering = false;
2186 			break;
2187 		}
2188 	}
2189 	for (i = 1; i < ctx->in.num_rows; i++) {
2190 		if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
2191 			ctx->double_buffering = false;
2192 			break;
2193 		}
2194 	}
2195 
2196 	if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
2197 		unsigned long intermediate_size = d_image->tile[0].size;
2198 
2199 		for (i = 1; i < ctx->num_tiles; i++) {
2200 			if (d_image->tile[i].size > intermediate_size)
2201 				intermediate_size = d_image->tile[i].size;
2202 		}
2203 
2204 		ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
2205 				    intermediate_size);
2206 		if (ret)
2207 			goto out_free;
2208 		if (ctx->double_buffering) {
2209 			ret = alloc_dma_buf(priv,
2210 					    &ctx->rot_intermediate[1],
2211 					    intermediate_size);
2212 			if (ret)
2213 				goto out_free_dmabuf0;
2214 		}
2215 	}
2216 
2217 	spin_lock_irqsave(&chan->irqlock, flags);
2218 
2219 	get_res = list_empty(&chan->ctx_list);
2220 
2221 	list_add_tail(&ctx->list, &chan->ctx_list);
2222 
2223 	spin_unlock_irqrestore(&chan->irqlock, flags);
2224 
2225 	if (get_res) {
2226 		ret = get_ipu_resources(chan);
2227 		if (ret)
2228 			goto out_free_dmabuf1;
2229 	}
2230 
2231 	return ctx;
2232 
2233 out_free_dmabuf1:
2234 	free_dma_buf(priv, &ctx->rot_intermediate[1]);
2235 	spin_lock_irqsave(&chan->irqlock, flags);
2236 	list_del(&ctx->list);
2237 	spin_unlock_irqrestore(&chan->irqlock, flags);
2238 out_free_dmabuf0:
2239 	free_dma_buf(priv, &ctx->rot_intermediate[0]);
2240 out_free:
2241 	kfree(ctx);
2242 	return ERR_PTR(ret);
2243 }
2244 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
2245 
2246 /*
2247  * Carry out a single image conversion run. Only the physaddr's of the input
2248  * and output image buffers are needed. The conversion context must have
2249  * been created previously with ipu_image_convert_prepare().
2250  */
2251 int ipu_image_convert_queue(struct ipu_image_convert_run *run)
2252 {
2253 	struct ipu_image_convert_chan *chan;
2254 	struct ipu_image_convert_priv *priv;
2255 	struct ipu_image_convert_ctx *ctx;
2256 	unsigned long flags;
2257 	int ret = 0;
2258 
2259 	if (!run || !run->ctx || !run->in_phys || !run->out_phys)
2260 		return -EINVAL;
2261 
2262 	ctx = run->ctx;
2263 	chan = ctx->chan;
2264 	priv = chan->priv;
2265 
2266 	dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
2267 		chan->ic_task, ctx, run);
2268 
2269 	INIT_LIST_HEAD(&run->list);
2270 
2271 	spin_lock_irqsave(&chan->irqlock, flags);
2272 
2273 	if (ctx->aborting) {
2274 		ret = -EIO;
2275 		goto unlock;
2276 	}
2277 
2278 	list_add_tail(&run->list, &chan->pending_q);
2279 
2280 	if (!chan->current_run) {
2281 		ret = do_run(run);
2282 		if (ret)
2283 			chan->current_run = NULL;
2284 	}
2285 unlock:
2286 	spin_unlock_irqrestore(&chan->irqlock, flags);
2287 	return ret;
2288 }
2289 EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
2290 
2291 /* Abort any active or pending conversions for this context */
2292 static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
2293 {
2294 	struct ipu_image_convert_chan *chan = ctx->chan;
2295 	struct ipu_image_convert_priv *priv = chan->priv;
2296 	struct ipu_image_convert_run *run, *active_run, *tmp;
2297 	unsigned long flags;
2298 	int run_count, ret;
2299 
2300 	spin_lock_irqsave(&chan->irqlock, flags);
2301 
2302 	/* move all remaining pending runs in this context to done_q */
2303 	list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
2304 		if (run->ctx != ctx)
2305 			continue;
2306 		run->status = -EIO;
2307 		list_move_tail(&run->list, &chan->done_q);
2308 	}
2309 
2310 	run_count = get_run_count(ctx, &chan->done_q);
2311 	active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
2312 		chan->current_run : NULL;
2313 
2314 	if (active_run)
2315 		reinit_completion(&ctx->aborted);
2316 
2317 	ctx->aborting = true;
2318 
2319 	spin_unlock_irqrestore(&chan->irqlock, flags);
2320 
2321 	if (!run_count && !active_run) {
2322 		dev_dbg(priv->ipu->dev,
2323 			"%s: task %u: no abort needed for ctx %p\n",
2324 			__func__, chan->ic_task, ctx);
2325 		return;
2326 	}
2327 
2328 	if (!active_run) {
2329 		empty_done_q(chan);
2330 		return;
2331 	}
2332 
2333 	dev_dbg(priv->ipu->dev,
2334 		"%s: task %u: wait for completion: %d runs\n",
2335 		__func__, chan->ic_task, run_count);
2336 
2337 	ret = wait_for_completion_timeout(&ctx->aborted,
2338 					  msecs_to_jiffies(10000));
2339 	if (ret == 0) {
2340 		dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
2341 		force_abort(ctx);
2342 	}
2343 }
2344 
2345 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
2346 {
2347 	__ipu_image_convert_abort(ctx);
2348 	ctx->aborting = false;
2349 }
2350 EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
2351 
2352 /* Unprepare image conversion context */
2353 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
2354 {
2355 	struct ipu_image_convert_chan *chan = ctx->chan;
2356 	struct ipu_image_convert_priv *priv = chan->priv;
2357 	unsigned long flags;
2358 	bool put_res;
2359 
2360 	/* make sure no runs are hanging around */
2361 	__ipu_image_convert_abort(ctx);
2362 
2363 	dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
2364 		chan->ic_task, ctx);
2365 
2366 	spin_lock_irqsave(&chan->irqlock, flags);
2367 
2368 	list_del(&ctx->list);
2369 
2370 	put_res = list_empty(&chan->ctx_list);
2371 
2372 	spin_unlock_irqrestore(&chan->irqlock, flags);
2373 
2374 	if (put_res)
2375 		release_ipu_resources(chan);
2376 
2377 	free_dma_buf(priv, &ctx->rot_intermediate[1]);
2378 	free_dma_buf(priv, &ctx->rot_intermediate[0]);
2379 
2380 	kfree(ctx);
2381 }
2382 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
2383 
2384 /*
2385  * "Canned" asynchronous single image conversion. Allocates and returns
2386  * a new conversion run.  On successful return the caller must free the
2387  * run and call ipu_image_convert_unprepare() after conversion completes.
2388  */
2389 struct ipu_image_convert_run *
2390 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
2391 		  struct ipu_image *in, struct ipu_image *out,
2392 		  enum ipu_rotate_mode rot_mode,
2393 		  ipu_image_convert_cb_t complete,
2394 		  void *complete_context)
2395 {
2396 	struct ipu_image_convert_ctx *ctx;
2397 	struct ipu_image_convert_run *run;
2398 	int ret;
2399 
2400 	ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
2401 					complete, complete_context);
2402 	if (IS_ERR(ctx))
2403 		return ERR_CAST(ctx);
2404 
2405 	run = kzalloc(sizeof(*run), GFP_KERNEL);
2406 	if (!run) {
2407 		ipu_image_convert_unprepare(ctx);
2408 		return ERR_PTR(-ENOMEM);
2409 	}
2410 
2411 	run->ctx = ctx;
2412 	run->in_phys = in->phys0;
2413 	run->out_phys = out->phys0;
2414 
2415 	ret = ipu_image_convert_queue(run);
2416 	if (ret) {
2417 		ipu_image_convert_unprepare(ctx);
2418 		kfree(run);
2419 		return ERR_PTR(ret);
2420 	}
2421 
2422 	return run;
2423 }
2424 EXPORT_SYMBOL_GPL(ipu_image_convert);
2425 
2426 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
2427 {
2428 	struct ipu_image_convert_priv *priv;
2429 	int i;
2430 
2431 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2432 	if (!priv)
2433 		return -ENOMEM;
2434 
2435 	ipu->image_convert_priv = priv;
2436 	priv->ipu = ipu;
2437 
2438 	for (i = 0; i < IC_NUM_TASKS; i++) {
2439 		struct ipu_image_convert_chan *chan = &priv->chan[i];
2440 
2441 		chan->ic_task = i;
2442 		chan->priv = priv;
2443 		chan->dma_ch = &image_convert_dma_chan[i];
2444 		chan->in_eof_irq = -1;
2445 		chan->rot_in_eof_irq = -1;
2446 		chan->out_eof_irq = -1;
2447 		chan->rot_out_eof_irq = -1;
2448 
2449 		spin_lock_init(&chan->irqlock);
2450 		INIT_LIST_HEAD(&chan->ctx_list);
2451 		INIT_LIST_HEAD(&chan->pending_q);
2452 		INIT_LIST_HEAD(&chan->done_q);
2453 	}
2454 
2455 	return 0;
2456 }
2457 
2458 void ipu_image_convert_exit(struct ipu_soc *ipu)
2459 {
2460 }
2461