xref: /linux/drivers/media/common/saa7146/saa7146_hlp.c (revision 6fd600d742744dc7ef7fc65ca26daa2b1163158a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/kernel.h>
5 #include <linux/export.h>
6 #include <media/drv-intf/saa7146_vv.h>
7 
calculate_output_format_register(struct saa7146_dev * saa,u32 palette,u32 * clip_format)8 static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
9 {
10 	/* clear out the necessary bits */
11 	*clip_format &= 0x0000ffff;
12 	/* set these bits new */
13 	*clip_format |=  (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16));
14 }
15 
calculate_hps_source_and_sync(struct saa7146_dev * dev,int source,int sync,u32 * hps_ctrl)16 static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl)
17 {
18 	*hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28);
19 	*hps_ctrl |= (source << 30) | (sync << 28);
20 }
21 
calculate_hxo_and_hyo(struct saa7146_vv * vv,u32 * hps_h_scale,u32 * hps_ctrl)22 static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl)
23 {
24 	int hyo = 0, hxo = 0;
25 
26 	hyo = vv->standard->v_offset;
27 	hxo = vv->standard->h_offset;
28 
29 	*hps_h_scale	&= ~(MASK_B0 | 0xf00);
30 	*hps_h_scale	|= (hxo <<  0);
31 
32 	*hps_ctrl	&= ~(MASK_W0 | MASK_B2);
33 	*hps_ctrl	|= (hyo << 12);
34 }
35 
36 /* helper functions for the calculation of the horizontal- and vertical
37    scaling registers, clip-format-register etc ...
38    these functions take pointers to the (most-likely read-out
39    original-values) and manipulate them according to the requested
40    changes.
41 */
42 
43 /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
44 static struct {
45 	u16 hps_coeff;
46 	u16 weight_sum;
47 } hps_h_coeff_tab [] = {
48 	{0x00,   2}, {0x02,   4}, {0x00,   4}, {0x06,   8}, {0x02,   8},
49 	{0x08,   8}, {0x00,   8}, {0x1E,  16}, {0x0E,   8}, {0x26,   8},
50 	{0x06,   8}, {0x42,   8}, {0x02,   8}, {0x80,   8}, {0x00,   8},
51 	{0xFE,  16}, {0xFE,   8}, {0x7E,   8}, {0x7E,   8}, {0x3E,   8},
52 	{0x3E,   8}, {0x1E,   8}, {0x1E,   8}, {0x0E,   8}, {0x0E,   8},
53 	{0x06,   8}, {0x06,   8}, {0x02,   8}, {0x02,   8}, {0x00,   8},
54 	{0x00,   8}, {0xFE,  16}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8},
55 	{0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8},
56 	{0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8},
57 	{0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0xFE,   8}, {0x7E,   8},
58 	{0x7E,   8}, {0x3E,   8}, {0x3E,   8}, {0x1E,   8}, {0x1E,   8},
59 	{0x0E,   8}, {0x0E,   8}, {0x06,   8}, {0x06,   8}, {0x02,   8},
60 	{0x02,   8}, {0x00,   8}, {0x00,   8}, {0xFE,  16}
61 };
62 
63 /* table of attenuation values for horizontal scaling */
64 static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
65 
66 /* calculate horizontal scale registers */
calculate_h_scale_registers(struct saa7146_dev * dev,int in_x,int out_x,int flip_lr,u32 * hps_ctrl,u32 * hps_v_gain,u32 * hps_h_prescale,u32 * hps_h_scale)67 static int calculate_h_scale_registers(struct saa7146_dev *dev,
68 	int in_x, int out_x, int flip_lr,
69 	u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale)
70 {
71 	/* horizontal prescaler */
72 	u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0;
73 	/* horizontal scaler */
74 	u32 xim = 0, xp = 0, xsci =0;
75 	/* vertical scale & gain */
76 	u32 pfuv = 0;
77 
78 	/* helper variables */
79 	u32 h_atten = 0, i = 0;
80 
81 	if ( 0 == out_x ) {
82 		return -EINVAL;
83 	}
84 
85 	/* mask out vanity-bit */
86 	*hps_ctrl &= ~MASK_29;
87 
88 	/* calculate prescale-(xspc)-value:	[n   .. 1/2) : 1
89 						[1/2 .. 1/3) : 2
90 						[1/3 .. 1/4) : 3
91 						...		*/
92 	if (in_x > out_x) {
93 		xpsc = in_x / out_x;
94 	}
95 	else {
96 		/* zooming */
97 		xpsc = 1;
98 	}
99 
100 	/* if flip_lr-bit is set, number of pixels after
101 	   horizontal prescaling must be < 384 */
102 	if ( 0 != flip_lr ) {
103 
104 		/* set vanity bit */
105 		*hps_ctrl |= MASK_29;
106 
107 		while (in_x / xpsc >= 384 )
108 			xpsc++;
109 	}
110 	/* if zooming is wanted, number of pixels after
111 	   horizontal prescaling must be < 768 */
112 	else {
113 		while ( in_x / xpsc >= 768 )
114 			xpsc++;
115 	}
116 
117 	/* maximum prescale is 64 (p.69) */
118 	if ( xpsc > 64 )
119 		xpsc = 64;
120 
121 	/* keep xacm clear*/
122 	xacm = 0;
123 
124 	/* set horizontal filter parameters (CXY = CXUV) */
125 	cxy = hps_h_coeff_tab[min(xpsc - 1, 63)].hps_coeff;
126 	cxuv = cxy;
127 
128 	/* calculate and set horizontal fine scale (xsci) */
129 
130 	/* bypass the horizontal scaler ? */
131 	if ( (in_x == out_x) && ( 1 == xpsc ) )
132 		xsci = 0x400;
133 	else
134 		xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc;
135 
136 	/* set start phase for horizontal fine scale (xp) to 0 */
137 	xp = 0;
138 
139 	/* set xim, if we bypass the horizontal scaler */
140 	if ( 0x400 == xsci )
141 		xim = 1;
142 	else
143 		xim = 0;
144 
145 	/* if the prescaler is bypassed, enable horizontal
146 	   accumulation mode (xacm) and clear dcgx */
147 	if( 1 == xpsc ) {
148 		xacm = 1;
149 		dcgx = 0;
150 	} else {
151 		xacm = 0;
152 		/* get best match in the table of attenuations
153 		   for horizontal scaling */
154 		h_atten = hps_h_coeff_tab[min(xpsc - 1, 63)].weight_sum;
155 
156 		for (i = 0; h_attenuation[i] != 0; i++) {
157 			if (h_attenuation[i] >= h_atten)
158 				break;
159 		}
160 
161 		dcgx = i;
162 	}
163 
164 	/* the horizontal scaling increment controls the UV filter
165 	   to reduce the bandwidth to improve the display quality,
166 	   so set it ... */
167 	if ( xsci == 0x400)
168 		pfuv = 0x00;
169 	else if ( xsci < 0x600)
170 		pfuv = 0x01;
171 	else if ( xsci < 0x680)
172 		pfuv = 0x11;
173 	else if ( xsci < 0x700)
174 		pfuv = 0x22;
175 	else
176 		pfuv = 0x33;
177 
178 
179 	*hps_v_gain  &= MASK_W0|MASK_B2;
180 	*hps_v_gain  |= (pfuv << 24);
181 
182 	*hps_h_scale	&= ~(MASK_W1 | 0xf000);
183 	*hps_h_scale	|= (xim << 31) | (xp << 24) | (xsci << 12);
184 
185 	*hps_h_prescale	|= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0);
186 
187 	return 0;
188 }
189 
190 static struct {
191 	u16 hps_coeff;
192 	u16 weight_sum;
193 } hps_v_coeff_tab [] = {
194  {0x0100,   2},  {0x0102,   4},  {0x0300,   4},  {0x0106,   8},  {0x0502,   8},
195  {0x0708,   8},  {0x0F00,   8},  {0x011E,  16},  {0x110E,  16},  {0x1926,  16},
196  {0x3906,  16},  {0x3D42,  16},  {0x7D02,  16},  {0x7F80,  16},  {0xFF00,  16},
197  {0x01FE,  32},  {0x01FE,  32},  {0x817E,  32},  {0x817E,  32},  {0xC13E,  32},
198  {0xC13E,  32},  {0xE11E,  32},  {0xE11E,  32},  {0xF10E,  32},  {0xF10E,  32},
199  {0xF906,  32},  {0xF906,  32},  {0xFD02,  32},  {0xFD02,  32},  {0xFF00,  32},
200  {0xFF00,  32},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},
201  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},
202  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},
203  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x01FE,  64},  {0x817E,  64},
204  {0x817E,  64},  {0xC13E,  64},  {0xC13E,  64},  {0xE11E,  64},  {0xE11E,  64},
205  {0xF10E,  64},  {0xF10E,  64},  {0xF906,  64},  {0xF906,  64},  {0xFD02,  64},
206  {0xFD02,  64},  {0xFF00,  64},  {0xFF00,  64},  {0x01FE, 128}
207 };
208 
209 /* table of attenuation values for vertical scaling */
210 static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
211 
212 /* calculate vertical scale registers */
calculate_v_scale_registers(struct saa7146_dev * dev,enum v4l2_field field,int in_y,int out_y,u32 * hps_v_scale,u32 * hps_v_gain)213 static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field,
214 	int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain)
215 {
216 	int lpi = 0;
217 
218 	/* vertical scaling */
219 	u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0;
220 	/* vertical scale & gain */
221 	u32 dcgy = 0, cya_cyb = 0;
222 
223 	/* helper variables */
224 	u32 v_atten = 0, i = 0;
225 
226 	/* error, if vertical zooming */
227 	if ( in_y < out_y ) {
228 		return -EINVAL;
229 	}
230 
231 	/* linear phase interpolation may be used
232 	   if scaling is between 1 and 1/2 (both fields used)
233 	   or scaling is between 1/2 and 1/4 (if only one field is used) */
234 
235 	if (V4L2_FIELD_HAS_BOTH(field)) {
236 		if( 2*out_y >= in_y) {
237 			lpi = 1;
238 		}
239 	} else if (field == V4L2_FIELD_TOP
240 		|| field == V4L2_FIELD_ALTERNATE
241 		|| field == V4L2_FIELD_BOTTOM) {
242 		if( 4*out_y >= in_y ) {
243 			lpi = 1;
244 		}
245 		out_y *= 2;
246 	}
247 	if( 0 != lpi ) {
248 
249 		yacm = 0;
250 		yacl = 0;
251 		cya_cyb = 0x00ff;
252 
253 		/* calculate scaling increment */
254 		if ( in_y > out_y )
255 			ysci = ((1024 * in_y) / (out_y + 1)) - 1024;
256 		else
257 			ysci = 0;
258 
259 		dcgy = 0;
260 
261 		/* calculate ype and ypo */
262 		ype = ysci / 16;
263 		ypo = ype + (ysci / 64);
264 
265 	} else {
266 		yacm = 1;
267 
268 		/* calculate scaling increment */
269 		ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10;
270 
271 		/* calculate ype and ypo */
272 		ypo = ype = ((ysci + 15) / 16);
273 
274 		/* the sequence length interval (yacl) has to be set according
275 		   to the prescale value, e.g.	[n   .. 1/2) : 0
276 						[1/2 .. 1/3) : 1
277 						[1/3 .. 1/4) : 2
278 						... */
279 		if ( ysci < 512) {
280 			yacl = 0;
281 		} else {
282 			yacl = ( ysci / (1024 - ysci) );
283 		}
284 
285 		/* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
286 		cya_cyb = hps_v_coeff_tab[min(yacl, 63)].hps_coeff;
287 
288 		/* get best match in the table of attenuations for vertical scaling */
289 		v_atten = hps_v_coeff_tab[min(yacl, 63)].weight_sum;
290 
291 		for (i = 0; v_attenuation[i] != 0; i++) {
292 			if (v_attenuation[i] >= v_atten)
293 				break;
294 		}
295 
296 		dcgy = i;
297 	}
298 
299 	/* ypo and ype swapped in spec ? */
300 	*hps_v_scale	|= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1);
301 
302 	*hps_v_gain	&= ~(MASK_W0|MASK_B2);
303 	*hps_v_gain	|= (dcgy << 16) | (cya_cyb << 0);
304 
305 	return 0;
306 }
307 
308 /* simple bubble-sort algorithm with duplicate elimination */
saa7146_set_window(struct saa7146_dev * dev,int width,int height,enum v4l2_field field)309 static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field)
310 {
311 	struct saa7146_vv *vv = dev->vv_data;
312 
313 	int source = vv->current_hps_source;
314 	int sync = vv->current_hps_sync;
315 
316 	u32 hps_v_scale = 0, hps_v_gain  = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0;
317 
318 	/* set vertical scale */
319 	hps_v_scale = 0; /* all bits get set by the function-call */
320 	hps_v_gain  = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/
321 	calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain);
322 
323 	/* set horizontal scale */
324 	hps_ctrl	= 0;
325 	hps_h_prescale	= 0; /* all bits get set in the function */
326 	hps_h_scale	= 0;
327 	calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale);
328 
329 	/* set hyo and hxo */
330 	calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl);
331 	calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl);
332 
333 	/* write out new register contents */
334 	saa7146_write(dev, HPS_V_SCALE,	hps_v_scale);
335 	saa7146_write(dev, HPS_V_GAIN,	hps_v_gain);
336 	saa7146_write(dev, HPS_CTRL,	hps_ctrl);
337 	saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale);
338 	saa7146_write(dev, HPS_H_SCALE,	hps_h_scale);
339 
340 	/* upload shadow-ram registers */
341 	saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) );
342 }
343 
saa7146_set_output_format(struct saa7146_dev * dev,unsigned long palette)344 static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette)
345 {
346 	u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
347 
348 	/* call helper function */
349 	calculate_output_format_register(dev,palette,&clip_format);
350 
351 	/* update the hps registers */
352 	saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format);
353 	saa7146_write(dev, MC2, (MASK_05 | MASK_21));
354 }
355 
356 /* select input-source */
saa7146_set_hps_source_and_sync(struct saa7146_dev * dev,int source,int sync)357 void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync)
358 {
359 	struct saa7146_vv *vv = dev->vv_data;
360 	u32 hps_ctrl = 0;
361 
362 	/* read old state */
363 	hps_ctrl = saa7146_read(dev, HPS_CTRL);
364 
365 	hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 );
366 	hps_ctrl |= (source << 30) | (sync << 28);
367 
368 	/* write back & upload register */
369 	saa7146_write(dev, HPS_CTRL, hps_ctrl);
370 	saa7146_write(dev, MC2, (MASK_05 | MASK_21));
371 
372 	vv->current_hps_source = source;
373 	vv->current_hps_sync = sync;
374 }
375 EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync);
376 
saa7146_write_out_dma(struct saa7146_dev * dev,int which,struct saa7146_video_dma * vdma)377 void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma)
378 {
379 	int where = 0;
380 
381 	if( which < 1 || which > 3) {
382 		return;
383 	}
384 
385 	/* calculate starting address */
386 	where  = (which-1)*0x18;
387 
388 	saa7146_write(dev, where,	vdma->base_odd);
389 	saa7146_write(dev, where+0x04,	vdma->base_even);
390 	saa7146_write(dev, where+0x08,	vdma->prot_addr);
391 	saa7146_write(dev, where+0x0c,	vdma->pitch);
392 	saa7146_write(dev, where+0x10,	vdma->base_page);
393 	saa7146_write(dev, where+0x14,	vdma->num_line_byte);
394 
395 	/* upload */
396 	saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1)));
397 /*
398 	printk("vdma%d.base_even:     0x%08x\n", which,vdma->base_even);
399 	printk("vdma%d.base_odd:      0x%08x\n", which,vdma->base_odd);
400 	printk("vdma%d.prot_addr:     0x%08x\n", which,vdma->prot_addr);
401 	printk("vdma%d.base_page:     0x%08x\n", which,vdma->base_page);
402 	printk("vdma%d.pitch:         0x%08x\n", which,vdma->pitch);
403 	printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte);
404 */
405 }
406 
calculate_video_dma_grab_packed(struct saa7146_dev * dev,struct saa7146_buf * buf)407 static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf)
408 {
409 	struct saa7146_vv *vv = dev->vv_data;
410 	struct v4l2_pix_format *pix = &vv->video_fmt;
411 	struct saa7146_video_dma vdma1;
412 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat);
413 
414 	int width = pix->width;
415 	int height = pix->height;
416 	int bytesperline = pix->bytesperline;
417 	enum v4l2_field field = pix->field;
418 
419 	int depth = sfmt->depth;
420 
421 	DEB_CAP("[size=%dx%d,fields=%s]\n",
422 		width, height, v4l2_field_names[field]);
423 
424 	if( bytesperline != 0) {
425 		vdma1.pitch = bytesperline*2;
426 	} else {
427 		vdma1.pitch = (width*depth*2)/8;
428 	}
429 	vdma1.num_line_byte	= ((vv->standard->v_field<<16) + vv->standard->h_pixels);
430 	vdma1.base_page		= buf->pt[0].dma | ME1 | sfmt->swap;
431 
432 	if( 0 != vv->vflip ) {
433 		vdma1.prot_addr	= buf->pt[0].offset;
434 		vdma1.base_even	= buf->pt[0].offset+(vdma1.pitch/2)*height;
435 		vdma1.base_odd	= vdma1.base_even - (vdma1.pitch/2);
436 	} else {
437 		vdma1.base_even	= buf->pt[0].offset;
438 		vdma1.base_odd	= vdma1.base_even + (vdma1.pitch/2);
439 		vdma1.prot_addr	= buf->pt[0].offset+(vdma1.pitch/2)*height;
440 	}
441 
442 	if (V4L2_FIELD_HAS_BOTH(field)) {
443 	} else if (field == V4L2_FIELD_ALTERNATE) {
444 		/* fixme */
445 		if ( vv->last_field == V4L2_FIELD_TOP ) {
446 			vdma1.base_odd	= vdma1.prot_addr;
447 			vdma1.pitch /= 2;
448 		} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
449 			vdma1.base_odd	= vdma1.base_even;
450 			vdma1.base_even = vdma1.prot_addr;
451 			vdma1.pitch /= 2;
452 		}
453 	} else if (field == V4L2_FIELD_TOP) {
454 		vdma1.base_odd	= vdma1.prot_addr;
455 		vdma1.pitch /= 2;
456 	} else if (field == V4L2_FIELD_BOTTOM) {
457 		vdma1.base_odd	= vdma1.base_even;
458 		vdma1.base_even = vdma1.prot_addr;
459 		vdma1.pitch /= 2;
460 	}
461 
462 	if( 0 != vv->vflip ) {
463 		vdma1.pitch *= -1;
464 	}
465 
466 	saa7146_write_out_dma(dev, 1, &vdma1);
467 	return 0;
468 }
469 
calc_planar_422(struct saa7146_vv * vv,struct saa7146_buf * buf,struct saa7146_video_dma * vdma2,struct saa7146_video_dma * vdma3)470 static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
471 {
472 	struct v4l2_pix_format *pix = &vv->video_fmt;
473 	int height = pix->height;
474 	int width = pix->width;
475 
476 	vdma2->pitch	= width;
477 	vdma3->pitch	= width;
478 
479 	/* fixme: look at bytesperline! */
480 
481 	if( 0 != vv->vflip ) {
482 		vdma2->prot_addr	= buf->pt[1].offset;
483 		vdma2->base_even	= ((vdma2->pitch/2)*height)+buf->pt[1].offset;
484 		vdma2->base_odd		= vdma2->base_even - (vdma2->pitch/2);
485 
486 		vdma3->prot_addr	= buf->pt[2].offset;
487 		vdma3->base_even	= ((vdma3->pitch/2)*height)+buf->pt[2].offset;
488 		vdma3->base_odd		= vdma3->base_even - (vdma3->pitch/2);
489 	} else {
490 		vdma3->base_even	= buf->pt[2].offset;
491 		vdma3->base_odd		= vdma3->base_even + (vdma3->pitch/2);
492 		vdma3->prot_addr	= (vdma3->pitch/2)*height+buf->pt[2].offset;
493 
494 		vdma2->base_even	= buf->pt[1].offset;
495 		vdma2->base_odd		= vdma2->base_even + (vdma2->pitch/2);
496 		vdma2->prot_addr	= (vdma2->pitch/2)*height+buf->pt[1].offset;
497 	}
498 
499 	return 0;
500 }
501 
calc_planar_420(struct saa7146_vv * vv,struct saa7146_buf * buf,struct saa7146_video_dma * vdma2,struct saa7146_video_dma * vdma3)502 static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
503 {
504 	struct v4l2_pix_format *pix = &vv->video_fmt;
505 	int height = pix->height;
506 	int width = pix->width;
507 
508 	vdma2->pitch	= width/2;
509 	vdma3->pitch	= width/2;
510 
511 	if( 0 != vv->vflip ) {
512 		vdma2->prot_addr	= buf->pt[2].offset;
513 		vdma2->base_even	= ((vdma2->pitch/2)*height)+buf->pt[2].offset;
514 		vdma2->base_odd		= vdma2->base_even - (vdma2->pitch/2);
515 
516 		vdma3->prot_addr	= buf->pt[1].offset;
517 		vdma3->base_even	= ((vdma3->pitch/2)*height)+buf->pt[1].offset;
518 		vdma3->base_odd		= vdma3->base_even - (vdma3->pitch/2);
519 
520 	} else {
521 		vdma3->base_even	= buf->pt[2].offset;
522 		vdma3->base_odd		= vdma3->base_even + (vdma3->pitch);
523 		vdma3->prot_addr	= (vdma3->pitch/2)*height+buf->pt[2].offset;
524 
525 		vdma2->base_even	= buf->pt[1].offset;
526 		vdma2->base_odd		= vdma2->base_even + (vdma2->pitch);
527 		vdma2->prot_addr	= (vdma2->pitch/2)*height+buf->pt[1].offset;
528 	}
529 	return 0;
530 }
531 
calculate_video_dma_grab_planar(struct saa7146_dev * dev,struct saa7146_buf * buf)532 static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf)
533 {
534 	struct saa7146_vv *vv = dev->vv_data;
535 	struct v4l2_pix_format *pix = &vv->video_fmt;
536 	struct saa7146_video_dma vdma1;
537 	struct saa7146_video_dma vdma2;
538 	struct saa7146_video_dma vdma3;
539 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat);
540 
541 	int width = pix->width;
542 	int height = pix->height;
543 	enum v4l2_field field = pix->field;
544 
545 	if (WARN_ON(!buf->pt[0].dma) ||
546 	    WARN_ON(!buf->pt[1].dma) ||
547 	    WARN_ON(!buf->pt[2].dma))
548 		return -1;
549 
550 	DEB_CAP("[size=%dx%d,fields=%s]\n",
551 		width, height, v4l2_field_names[field]);
552 
553 	/* fixme: look at bytesperline! */
554 
555 	/* fixme: what happens for user space buffers here?. The offsets are
556 	   most likely wrong, this version here only works for page-aligned
557 	   buffers, modifications to the pagetable-functions are necessary...*/
558 
559 	vdma1.pitch		= width*2;
560 	vdma1.num_line_byte	= ((vv->standard->v_field<<16) + vv->standard->h_pixels);
561 	vdma1.base_page		= buf->pt[0].dma | ME1;
562 
563 	if( 0 != vv->vflip ) {
564 		vdma1.prot_addr	= buf->pt[0].offset;
565 		vdma1.base_even	= ((vdma1.pitch/2)*height)+buf->pt[0].offset;
566 		vdma1.base_odd	= vdma1.base_even - (vdma1.pitch/2);
567 	} else {
568 		vdma1.base_even	= buf->pt[0].offset;
569 		vdma1.base_odd	= vdma1.base_even + (vdma1.pitch/2);
570 		vdma1.prot_addr	= (vdma1.pitch/2)*height+buf->pt[0].offset;
571 	}
572 
573 	vdma2.num_line_byte	= 0; /* unused */
574 	vdma2.base_page		= buf->pt[1].dma | ME1;
575 
576 	vdma3.num_line_byte	= 0; /* unused */
577 	vdma3.base_page		= buf->pt[2].dma | ME1;
578 
579 	switch( sfmt->depth ) {
580 		case 12: {
581 			calc_planar_420(vv,buf,&vdma2,&vdma3);
582 			break;
583 		}
584 		case 16: {
585 			calc_planar_422(vv,buf,&vdma2,&vdma3);
586 			break;
587 		}
588 		default: {
589 			return -1;
590 		}
591 	}
592 
593 	if (V4L2_FIELD_HAS_BOTH(field)) {
594 	} else if (field == V4L2_FIELD_ALTERNATE) {
595 		/* fixme */
596 		vdma1.base_odd	= vdma1.prot_addr;
597 		vdma1.pitch /= 2;
598 		vdma2.base_odd	= vdma2.prot_addr;
599 		vdma2.pitch /= 2;
600 		vdma3.base_odd	= vdma3.prot_addr;
601 		vdma3.pitch /= 2;
602 	} else if (field == V4L2_FIELD_TOP) {
603 		vdma1.base_odd	= vdma1.prot_addr;
604 		vdma1.pitch /= 2;
605 		vdma2.base_odd	= vdma2.prot_addr;
606 		vdma2.pitch /= 2;
607 		vdma3.base_odd	= vdma3.prot_addr;
608 		vdma3.pitch /= 2;
609 	} else if (field == V4L2_FIELD_BOTTOM) {
610 		vdma1.base_odd	= vdma1.base_even;
611 		vdma1.base_even = vdma1.prot_addr;
612 		vdma1.pitch /= 2;
613 		vdma2.base_odd	= vdma2.base_even;
614 		vdma2.base_even = vdma2.prot_addr;
615 		vdma2.pitch /= 2;
616 		vdma3.base_odd	= vdma3.base_even;
617 		vdma3.base_even = vdma3.prot_addr;
618 		vdma3.pitch /= 2;
619 	}
620 
621 	if( 0 != vv->vflip ) {
622 		vdma1.pitch *= -1;
623 		vdma2.pitch *= -1;
624 		vdma3.pitch *= -1;
625 	}
626 
627 	saa7146_write_out_dma(dev, 1, &vdma1);
628 	if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) {
629 		saa7146_write_out_dma(dev, 3, &vdma2);
630 		saa7146_write_out_dma(dev, 2, &vdma3);
631 	} else {
632 		saa7146_write_out_dma(dev, 2, &vdma2);
633 		saa7146_write_out_dma(dev, 3, &vdma3);
634 	}
635 	return 0;
636 }
637 
program_capture_engine(struct saa7146_dev * dev,int planar)638 static void program_capture_engine(struct saa7146_dev *dev, int planar)
639 {
640 	struct saa7146_vv *vv = dev->vv_data;
641 	int count = 0;
642 
643 	unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
644 	unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
645 
646 	/* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/
647 	WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait);
648 	WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait);
649 
650 	/* set rps register 0 */
651 	WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4));
652 	WRITE_RPS0(MASK_27 | MASK_11);
653 
654 	/* turn on video-dma1 */
655 	WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
656 	WRITE_RPS0(MASK_06 | MASK_22);			/* => mask */
657 	WRITE_RPS0(MASK_06 | MASK_22);			/* => values */
658 	if( 0 != planar ) {
659 		/* turn on video-dma2 */
660 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
661 		WRITE_RPS0(MASK_05 | MASK_21);			/* => mask */
662 		WRITE_RPS0(MASK_05 | MASK_21);			/* => values */
663 
664 		/* turn on video-dma3 */
665 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
666 		WRITE_RPS0(MASK_04 | MASK_20);			/* => mask */
667 		WRITE_RPS0(MASK_04 | MASK_20);			/* => values */
668 	}
669 
670 	/* wait for o_fid_a/b / e_fid_a/b toggle */
671 	if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
672 		WRITE_RPS0(CMD_PAUSE | o_wait);
673 		WRITE_RPS0(CMD_PAUSE | e_wait);
674 	} else if ( vv->last_field == V4L2_FIELD_TOP ) {
675 		WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
676 		WRITE_RPS0(CMD_PAUSE | o_wait);
677 	} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
678 		WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
679 		WRITE_RPS0(CMD_PAUSE | e_wait);
680 	}
681 
682 	/* turn off video-dma1 */
683 	WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
684 	WRITE_RPS0(MASK_22 | MASK_06);			/* => mask */
685 	WRITE_RPS0(MASK_22);				/* => values */
686 	if( 0 != planar ) {
687 		/* turn off video-dma2 */
688 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
689 		WRITE_RPS0(MASK_05 | MASK_21);			/* => mask */
690 		WRITE_RPS0(MASK_21);				/* => values */
691 
692 		/* turn off video-dma3 */
693 		WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
694 		WRITE_RPS0(MASK_04 | MASK_20);			/* => mask */
695 		WRITE_RPS0(MASK_20);				/* => values */
696 	}
697 
698 	/* generate interrupt */
699 	WRITE_RPS0(CMD_INTERRUPT);
700 
701 	/* stop */
702 	WRITE_RPS0(CMD_STOP);
703 }
704 
705 /* disable clipping */
saa7146_disable_clipping(struct saa7146_dev * dev)706 static void saa7146_disable_clipping(struct saa7146_dev *dev)
707 {
708 	u32 clip_format	= saa7146_read(dev, CLIP_FORMAT_CTRL);
709 
710 	/* mask out relevant bits (=lower word)*/
711 	clip_format &= MASK_W1;
712 
713 	/* upload clipping-registers*/
714 	saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format);
715 	saa7146_write(dev, MC2, (MASK_05 | MASK_21));
716 
717 	/* disable video dma2 */
718 	saa7146_write(dev, MC1, MASK_21);
719 }
720 
saa7146_set_capture(struct saa7146_dev * dev,struct saa7146_buf * buf,struct saa7146_buf * next)721 void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
722 {
723 	struct saa7146_vv *vv = dev->vv_data;
724 	struct v4l2_pix_format *pix = &vv->video_fmt;
725 	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat);
726 	u32 vdma1_prot_addr;
727 
728 	DEB_CAP("buf:%p, next:%p\n", buf, next);
729 
730 	vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
731 	if( 0 == vdma1_prot_addr ) {
732 		/* clear out beginning of streaming bit (rps register 0)*/
733 		DEB_CAP("forcing sync to new frame\n");
734 		saa7146_write(dev, MC2, MASK_27 );
735 	}
736 
737 	saa7146_set_window(dev, pix->width, pix->height, pix->field);
738 	saa7146_set_output_format(dev, sfmt->trans);
739 	saa7146_disable_clipping(dev);
740 
741 	if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
742 	} else if ( vv->last_field == V4L2_FIELD_TOP ) {
743 		vv->last_field = V4L2_FIELD_BOTTOM;
744 	} else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
745 		vv->last_field = V4L2_FIELD_TOP;
746 	}
747 
748 	if( 0 != IS_PLANAR(sfmt->trans)) {
749 		calculate_video_dma_grab_planar(dev, buf);
750 		program_capture_engine(dev,1);
751 	} else {
752 		calculate_video_dma_grab_packed(dev, buf);
753 		program_capture_engine(dev,0);
754 	}
755 
756 /*
757 	printk("vdma%d.base_even:     0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
758 	printk("vdma%d.base_odd:      0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
759 	printk("vdma%d.prot_addr:     0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
760 	printk("vdma%d.base_page:     0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
761 	printk("vdma%d.pitch:         0x%08x\n", 1,saa7146_read(dev,PITCH1));
762 	printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
763 	printk("vdma%d => vptr      : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1));
764 */
765 
766 	/* write the address of the rps-program */
767 	saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
768 
769 	/* turn on rps */
770 	saa7146_write(dev, MC1, (MASK_12 | MASK_28));
771 }
772