1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/kernel.h> 5 #include <linux/export.h> 6 #include <media/drv-intf/saa7146_vv.h> 7 8 static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format) 9 { 10 /* clear out the necessary bits */ 11 *clip_format &= 0x0000ffff; 12 /* set these bits new */ 13 *clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16)); 14 } 15 16 static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl) 17 { 18 *hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28); 19 *hps_ctrl |= (source << 30) | (sync << 28); 20 } 21 22 static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl) 23 { 24 int hyo = 0, hxo = 0; 25 26 hyo = vv->standard->v_offset; 27 hxo = vv->standard->h_offset; 28 29 *hps_h_scale &= ~(MASK_B0 | 0xf00); 30 *hps_h_scale |= (hxo << 0); 31 32 *hps_ctrl &= ~(MASK_W0 | MASK_B2); 33 *hps_ctrl |= (hyo << 12); 34 } 35 36 /* helper functions for the calculation of the horizontal- and vertical 37 scaling registers, clip-format-register etc ... 38 these functions take pointers to the (most-likely read-out 39 original-values) and manipulate them according to the requested 40 changes. 41 */ 42 43 /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */ 44 static struct { 45 u16 hps_coeff; 46 u16 weight_sum; 47 } hps_h_coeff_tab [] = { 48 {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8}, 49 {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8}, 50 {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8}, 51 {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8}, 52 {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8}, 53 {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8}, 54 {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, 55 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, 56 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, 57 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8}, 58 {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, 59 {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8}, 60 {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16} 61 }; 62 63 /* table of attenuation values for horizontal scaling */ 64 static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0}; 65 66 /* calculate horizontal scale registers */ 67 static int calculate_h_scale_registers(struct saa7146_dev *dev, 68 int in_x, int out_x, int flip_lr, 69 u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale) 70 { 71 /* horizontal prescaler */ 72 u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0; 73 /* horizontal scaler */ 74 u32 xim = 0, xp = 0, xsci =0; 75 /* vertical scale & gain */ 76 u32 pfuv = 0; 77 78 /* helper variables */ 79 u32 h_atten = 0, i = 0; 80 81 if ( 0 == out_x ) { 82 return -EINVAL; 83 } 84 85 /* mask out vanity-bit */ 86 *hps_ctrl &= ~MASK_29; 87 88 /* calculate prescale-(xspc)-value: [n .. 1/2) : 1 89 [1/2 .. 1/3) : 2 90 [1/3 .. 1/4) : 3 91 ... */ 92 if (in_x > out_x) { 93 xpsc = in_x / out_x; 94 } 95 else { 96 /* zooming */ 97 xpsc = 1; 98 } 99 100 /* if flip_lr-bit is set, number of pixels after 101 horizontal prescaling must be < 384 */ 102 if ( 0 != flip_lr ) { 103 104 /* set vanity bit */ 105 *hps_ctrl |= MASK_29; 106 107 while (in_x / xpsc >= 384 ) 108 xpsc++; 109 } 110 /* if zooming is wanted, number of pixels after 111 horizontal prescaling must be < 768 */ 112 else { 113 while ( in_x / xpsc >= 768 ) 114 xpsc++; 115 } 116 117 /* maximum prescale is 64 (p.69) */ 118 if ( xpsc > 64 ) 119 xpsc = 64; 120 121 /* keep xacm clear*/ 122 xacm = 0; 123 124 /* set horizontal filter parameters (CXY = CXUV) */ 125 cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff; 126 cxuv = cxy; 127 128 /* calculate and set horizontal fine scale (xsci) */ 129 130 /* bypass the horizontal scaler ? */ 131 if ( (in_x == out_x) && ( 1 == xpsc ) ) 132 xsci = 0x400; 133 else 134 xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc; 135 136 /* set start phase for horizontal fine scale (xp) to 0 */ 137 xp = 0; 138 139 /* set xim, if we bypass the horizontal scaler */ 140 if ( 0x400 == xsci ) 141 xim = 1; 142 else 143 xim = 0; 144 145 /* if the prescaler is bypassed, enable horizontal 146 accumulation mode (xacm) and clear dcgx */ 147 if( 1 == xpsc ) { 148 xacm = 1; 149 dcgx = 0; 150 } else { 151 xacm = 0; 152 /* get best match in the table of attenuations 153 for horizontal scaling */ 154 h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum; 155 156 for (i = 0; h_attenuation[i] != 0; i++) { 157 if (h_attenuation[i] >= h_atten) 158 break; 159 } 160 161 dcgx = i; 162 } 163 164 /* the horizontal scaling increment controls the UV filter 165 to reduce the bandwidth to improve the display quality, 166 so set it ... */ 167 if ( xsci == 0x400) 168 pfuv = 0x00; 169 else if ( xsci < 0x600) 170 pfuv = 0x01; 171 else if ( xsci < 0x680) 172 pfuv = 0x11; 173 else if ( xsci < 0x700) 174 pfuv = 0x22; 175 else 176 pfuv = 0x33; 177 178 179 *hps_v_gain &= MASK_W0|MASK_B2; 180 *hps_v_gain |= (pfuv << 24); 181 182 *hps_h_scale &= ~(MASK_W1 | 0xf000); 183 *hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12); 184 185 *hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0); 186 187 return 0; 188 } 189 190 static struct { 191 u16 hps_coeff; 192 u16 weight_sum; 193 } hps_v_coeff_tab [] = { 194 {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8}, 195 {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16}, 196 {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16}, 197 {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32}, 198 {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32}, 199 {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32}, 200 {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, 201 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, 202 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, 203 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64}, 204 {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64}, 205 {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64}, 206 {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128} 207 }; 208 209 /* table of attenuation values for vertical scaling */ 210 static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0}; 211 212 /* calculate vertical scale registers */ 213 static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field, 214 int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain) 215 { 216 int lpi = 0; 217 218 /* vertical scaling */ 219 u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0; 220 /* vertical scale & gain */ 221 u32 dcgy = 0, cya_cyb = 0; 222 223 /* helper variables */ 224 u32 v_atten = 0, i = 0; 225 226 /* error, if vertical zooming */ 227 if ( in_y < out_y ) { 228 return -EINVAL; 229 } 230 231 /* linear phase interpolation may be used 232 if scaling is between 1 and 1/2 (both fields used) 233 or scaling is between 1/2 and 1/4 (if only one field is used) */ 234 235 if (V4L2_FIELD_HAS_BOTH(field)) { 236 if( 2*out_y >= in_y) { 237 lpi = 1; 238 } 239 } else if (field == V4L2_FIELD_TOP 240 || field == V4L2_FIELD_ALTERNATE 241 || field == V4L2_FIELD_BOTTOM) { 242 if( 4*out_y >= in_y ) { 243 lpi = 1; 244 } 245 out_y *= 2; 246 } 247 if( 0 != lpi ) { 248 249 yacm = 0; 250 yacl = 0; 251 cya_cyb = 0x00ff; 252 253 /* calculate scaling increment */ 254 if ( in_y > out_y ) 255 ysci = ((1024 * in_y) / (out_y + 1)) - 1024; 256 else 257 ysci = 0; 258 259 dcgy = 0; 260 261 /* calculate ype and ypo */ 262 ype = ysci / 16; 263 ypo = ype + (ysci / 64); 264 265 } else { 266 yacm = 1; 267 268 /* calculate scaling increment */ 269 ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10; 270 271 /* calculate ype and ypo */ 272 ypo = ype = ((ysci + 15) / 16); 273 274 /* the sequence length interval (yacl) has to be set according 275 to the prescale value, e.g. [n .. 1/2) : 0 276 [1/2 .. 1/3) : 1 277 [1/3 .. 1/4) : 2 278 ... */ 279 if ( ysci < 512) { 280 yacl = 0; 281 } else { 282 yacl = ( ysci / (1024 - ysci) ); 283 } 284 285 /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */ 286 cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff; 287 288 /* get best match in the table of attenuations for vertical scaling */ 289 v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum; 290 291 for (i = 0; v_attenuation[i] != 0; i++) { 292 if (v_attenuation[i] >= v_atten) 293 break; 294 } 295 296 dcgy = i; 297 } 298 299 /* ypo and ype swapped in spec ? */ 300 *hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1); 301 302 *hps_v_gain &= ~(MASK_W0|MASK_B2); 303 *hps_v_gain |= (dcgy << 16) | (cya_cyb << 0); 304 305 return 0; 306 } 307 308 /* simple bubble-sort algorithm with duplicate elimination */ 309 static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field) 310 { 311 struct saa7146_vv *vv = dev->vv_data; 312 313 int source = vv->current_hps_source; 314 int sync = vv->current_hps_sync; 315 316 u32 hps_v_scale = 0, hps_v_gain = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0; 317 318 /* set vertical scale */ 319 hps_v_scale = 0; /* all bits get set by the function-call */ 320 hps_v_gain = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/ 321 calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain); 322 323 /* set horizontal scale */ 324 hps_ctrl = 0; 325 hps_h_prescale = 0; /* all bits get set in the function */ 326 hps_h_scale = 0; 327 calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale); 328 329 /* set hyo and hxo */ 330 calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl); 331 calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl); 332 333 /* write out new register contents */ 334 saa7146_write(dev, HPS_V_SCALE, hps_v_scale); 335 saa7146_write(dev, HPS_V_GAIN, hps_v_gain); 336 saa7146_write(dev, HPS_CTRL, hps_ctrl); 337 saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale); 338 saa7146_write(dev, HPS_H_SCALE, hps_h_scale); 339 340 /* upload shadow-ram registers */ 341 saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) ); 342 } 343 344 static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette) 345 { 346 u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL); 347 348 /* call helper function */ 349 calculate_output_format_register(dev,palette,&clip_format); 350 351 /* update the hps registers */ 352 saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format); 353 saa7146_write(dev, MC2, (MASK_05 | MASK_21)); 354 } 355 356 /* select input-source */ 357 void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync) 358 { 359 struct saa7146_vv *vv = dev->vv_data; 360 u32 hps_ctrl = 0; 361 362 /* read old state */ 363 hps_ctrl = saa7146_read(dev, HPS_CTRL); 364 365 hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 ); 366 hps_ctrl |= (source << 30) | (sync << 28); 367 368 /* write back & upload register */ 369 saa7146_write(dev, HPS_CTRL, hps_ctrl); 370 saa7146_write(dev, MC2, (MASK_05 | MASK_21)); 371 372 vv->current_hps_source = source; 373 vv->current_hps_sync = sync; 374 } 375 EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync); 376 377 void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) 378 { 379 int where = 0; 380 381 if( which < 1 || which > 3) { 382 return; 383 } 384 385 /* calculate starting address */ 386 where = (which-1)*0x18; 387 388 saa7146_write(dev, where, vdma->base_odd); 389 saa7146_write(dev, where+0x04, vdma->base_even); 390 saa7146_write(dev, where+0x08, vdma->prot_addr); 391 saa7146_write(dev, where+0x0c, vdma->pitch); 392 saa7146_write(dev, where+0x10, vdma->base_page); 393 saa7146_write(dev, where+0x14, vdma->num_line_byte); 394 395 /* upload */ 396 saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1))); 397 /* 398 printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even); 399 printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd); 400 printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr); 401 printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page); 402 printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch); 403 printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte); 404 */ 405 } 406 407 static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf) 408 { 409 struct saa7146_vv *vv = dev->vv_data; 410 struct saa7146_video_dma vdma1; 411 412 struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 413 414 int width = buf->fmt->width; 415 int height = buf->fmt->height; 416 int bytesperline = buf->fmt->bytesperline; 417 enum v4l2_field field = buf->fmt->field; 418 419 int depth = sfmt->depth; 420 421 DEB_CAP("[size=%dx%d,fields=%s]\n", 422 width, height, v4l2_field_names[field]); 423 424 if( bytesperline != 0) { 425 vdma1.pitch = bytesperline*2; 426 } else { 427 vdma1.pitch = (width*depth*2)/8; 428 } 429 vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels); 430 vdma1.base_page = buf->pt[0].dma | ME1 | sfmt->swap; 431 432 if( 0 != vv->vflip ) { 433 vdma1.prot_addr = buf->pt[0].offset; 434 vdma1.base_even = buf->pt[0].offset+(vdma1.pitch/2)*height; 435 vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2); 436 } else { 437 vdma1.base_even = buf->pt[0].offset; 438 vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2); 439 vdma1.prot_addr = buf->pt[0].offset+(vdma1.pitch/2)*height; 440 } 441 442 if (V4L2_FIELD_HAS_BOTH(field)) { 443 } else if (field == V4L2_FIELD_ALTERNATE) { 444 /* fixme */ 445 if ( vv->last_field == V4L2_FIELD_TOP ) { 446 vdma1.base_odd = vdma1.prot_addr; 447 vdma1.pitch /= 2; 448 } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { 449 vdma1.base_odd = vdma1.base_even; 450 vdma1.base_even = vdma1.prot_addr; 451 vdma1.pitch /= 2; 452 } 453 } else if (field == V4L2_FIELD_TOP) { 454 vdma1.base_odd = vdma1.prot_addr; 455 vdma1.pitch /= 2; 456 } else if (field == V4L2_FIELD_BOTTOM) { 457 vdma1.base_odd = vdma1.base_even; 458 vdma1.base_even = vdma1.prot_addr; 459 vdma1.pitch /= 2; 460 } 461 462 if( 0 != vv->vflip ) { 463 vdma1.pitch *= -1; 464 } 465 466 saa7146_write_out_dma(dev, 1, &vdma1); 467 return 0; 468 } 469 470 static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3) 471 { 472 int height = buf->fmt->height; 473 int width = buf->fmt->width; 474 475 vdma2->pitch = width; 476 vdma3->pitch = width; 477 478 /* fixme: look at bytesperline! */ 479 480 if( 0 != vv->vflip ) { 481 vdma2->prot_addr = buf->pt[1].offset; 482 vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[1].offset; 483 vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2); 484 485 vdma3->prot_addr = buf->pt[2].offset; 486 vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[2].offset; 487 vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2); 488 } else { 489 vdma3->base_even = buf->pt[2].offset; 490 vdma3->base_odd = vdma3->base_even + (vdma3->pitch/2); 491 vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset; 492 493 vdma2->base_even = buf->pt[1].offset; 494 vdma2->base_odd = vdma2->base_even + (vdma2->pitch/2); 495 vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset; 496 } 497 498 return 0; 499 } 500 501 static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3) 502 { 503 int height = buf->fmt->height; 504 int width = buf->fmt->width; 505 506 vdma2->pitch = width/2; 507 vdma3->pitch = width/2; 508 509 if( 0 != vv->vflip ) { 510 vdma2->prot_addr = buf->pt[2].offset; 511 vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[2].offset; 512 vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2); 513 514 vdma3->prot_addr = buf->pt[1].offset; 515 vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[1].offset; 516 vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2); 517 518 } else { 519 vdma3->base_even = buf->pt[2].offset; 520 vdma3->base_odd = vdma3->base_even + (vdma3->pitch); 521 vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset; 522 523 vdma2->base_even = buf->pt[1].offset; 524 vdma2->base_odd = vdma2->base_even + (vdma2->pitch); 525 vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset; 526 } 527 return 0; 528 } 529 530 static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf) 531 { 532 struct saa7146_vv *vv = dev->vv_data; 533 struct saa7146_video_dma vdma1; 534 struct saa7146_video_dma vdma2; 535 struct saa7146_video_dma vdma3; 536 537 struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 538 539 int width = buf->fmt->width; 540 int height = buf->fmt->height; 541 enum v4l2_field field = buf->fmt->field; 542 543 BUG_ON(0 == buf->pt[0].dma); 544 BUG_ON(0 == buf->pt[1].dma); 545 BUG_ON(0 == buf->pt[2].dma); 546 547 DEB_CAP("[size=%dx%d,fields=%s]\n", 548 width, height, v4l2_field_names[field]); 549 550 /* fixme: look at bytesperline! */ 551 552 /* fixme: what happens for user space buffers here?. The offsets are 553 most likely wrong, this version here only works for page-aligned 554 buffers, modifications to the pagetable-functions are necessary...*/ 555 556 vdma1.pitch = width*2; 557 vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels); 558 vdma1.base_page = buf->pt[0].dma | ME1; 559 560 if( 0 != vv->vflip ) { 561 vdma1.prot_addr = buf->pt[0].offset; 562 vdma1.base_even = ((vdma1.pitch/2)*height)+buf->pt[0].offset; 563 vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2); 564 } else { 565 vdma1.base_even = buf->pt[0].offset; 566 vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2); 567 vdma1.prot_addr = (vdma1.pitch/2)*height+buf->pt[0].offset; 568 } 569 570 vdma2.num_line_byte = 0; /* unused */ 571 vdma2.base_page = buf->pt[1].dma | ME1; 572 573 vdma3.num_line_byte = 0; /* unused */ 574 vdma3.base_page = buf->pt[2].dma | ME1; 575 576 switch( sfmt->depth ) { 577 case 12: { 578 calc_planar_420(vv,buf,&vdma2,&vdma3); 579 break; 580 } 581 case 16: { 582 calc_planar_422(vv,buf,&vdma2,&vdma3); 583 break; 584 } 585 default: { 586 return -1; 587 } 588 } 589 590 if (V4L2_FIELD_HAS_BOTH(field)) { 591 } else if (field == V4L2_FIELD_ALTERNATE) { 592 /* fixme */ 593 vdma1.base_odd = vdma1.prot_addr; 594 vdma1.pitch /= 2; 595 vdma2.base_odd = vdma2.prot_addr; 596 vdma2.pitch /= 2; 597 vdma3.base_odd = vdma3.prot_addr; 598 vdma3.pitch /= 2; 599 } else if (field == V4L2_FIELD_TOP) { 600 vdma1.base_odd = vdma1.prot_addr; 601 vdma1.pitch /= 2; 602 vdma2.base_odd = vdma2.prot_addr; 603 vdma2.pitch /= 2; 604 vdma3.base_odd = vdma3.prot_addr; 605 vdma3.pitch /= 2; 606 } else if (field == V4L2_FIELD_BOTTOM) { 607 vdma1.base_odd = vdma1.base_even; 608 vdma1.base_even = vdma1.prot_addr; 609 vdma1.pitch /= 2; 610 vdma2.base_odd = vdma2.base_even; 611 vdma2.base_even = vdma2.prot_addr; 612 vdma2.pitch /= 2; 613 vdma3.base_odd = vdma3.base_even; 614 vdma3.base_even = vdma3.prot_addr; 615 vdma3.pitch /= 2; 616 } 617 618 if( 0 != vv->vflip ) { 619 vdma1.pitch *= -1; 620 vdma2.pitch *= -1; 621 vdma3.pitch *= -1; 622 } 623 624 saa7146_write_out_dma(dev, 1, &vdma1); 625 if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) { 626 saa7146_write_out_dma(dev, 3, &vdma2); 627 saa7146_write_out_dma(dev, 2, &vdma3); 628 } else { 629 saa7146_write_out_dma(dev, 2, &vdma2); 630 saa7146_write_out_dma(dev, 3, &vdma3); 631 } 632 return 0; 633 } 634 635 static void program_capture_engine(struct saa7146_dev *dev, int planar) 636 { 637 struct saa7146_vv *vv = dev->vv_data; 638 int count = 0; 639 640 unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B; 641 unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B; 642 643 /* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/ 644 WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait); 645 WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait); 646 647 /* set rps register 0 */ 648 WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4)); 649 WRITE_RPS0(MASK_27 | MASK_11); 650 651 /* turn on video-dma1 */ 652 WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); 653 WRITE_RPS0(MASK_06 | MASK_22); /* => mask */ 654 WRITE_RPS0(MASK_06 | MASK_22); /* => values */ 655 if( 0 != planar ) { 656 /* turn on video-dma2 */ 657 WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); 658 WRITE_RPS0(MASK_05 | MASK_21); /* => mask */ 659 WRITE_RPS0(MASK_05 | MASK_21); /* => values */ 660 661 /* turn on video-dma3 */ 662 WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); 663 WRITE_RPS0(MASK_04 | MASK_20); /* => mask */ 664 WRITE_RPS0(MASK_04 | MASK_20); /* => values */ 665 } 666 667 /* wait for o_fid_a/b / e_fid_a/b toggle */ 668 if ( vv->last_field == V4L2_FIELD_INTERLACED ) { 669 WRITE_RPS0(CMD_PAUSE | o_wait); 670 WRITE_RPS0(CMD_PAUSE | e_wait); 671 } else if ( vv->last_field == V4L2_FIELD_TOP ) { 672 WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09)); 673 WRITE_RPS0(CMD_PAUSE | o_wait); 674 } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { 675 WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09)); 676 WRITE_RPS0(CMD_PAUSE | e_wait); 677 } 678 679 /* turn off video-dma1 */ 680 WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); 681 WRITE_RPS0(MASK_22 | MASK_06); /* => mask */ 682 WRITE_RPS0(MASK_22); /* => values */ 683 if( 0 != planar ) { 684 /* turn off video-dma2 */ 685 WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); 686 WRITE_RPS0(MASK_05 | MASK_21); /* => mask */ 687 WRITE_RPS0(MASK_21); /* => values */ 688 689 /* turn off video-dma3 */ 690 WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); 691 WRITE_RPS0(MASK_04 | MASK_20); /* => mask */ 692 WRITE_RPS0(MASK_20); /* => values */ 693 } 694 695 /* generate interrupt */ 696 WRITE_RPS0(CMD_INTERRUPT); 697 698 /* stop */ 699 WRITE_RPS0(CMD_STOP); 700 } 701 702 void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) 703 { 704 struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); 705 struct saa7146_vv *vv = dev->vv_data; 706 u32 vdma1_prot_addr; 707 708 DEB_CAP("buf:%p, next:%p\n", buf, next); 709 710 vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1); 711 if( 0 == vdma1_prot_addr ) { 712 /* clear out beginning of streaming bit (rps register 0)*/ 713 DEB_CAP("forcing sync to new frame\n"); 714 saa7146_write(dev, MC2, MASK_27 ); 715 } 716 717 saa7146_set_window(dev, buf->fmt->width, buf->fmt->height, buf->fmt->field); 718 saa7146_set_output_format(dev, sfmt->trans); 719 720 if ( vv->last_field == V4L2_FIELD_INTERLACED ) { 721 } else if ( vv->last_field == V4L2_FIELD_TOP ) { 722 vv->last_field = V4L2_FIELD_BOTTOM; 723 } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { 724 vv->last_field = V4L2_FIELD_TOP; 725 } 726 727 if( 0 != IS_PLANAR(sfmt->trans)) { 728 calculate_video_dma_grab_planar(dev, buf); 729 program_capture_engine(dev,1); 730 } else { 731 calculate_video_dma_grab_packed(dev, buf); 732 program_capture_engine(dev,0); 733 } 734 735 /* 736 printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1)); 737 printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1)); 738 printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1)); 739 printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1)); 740 printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1)); 741 printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); 742 printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1)); 743 */ 744 745 /* write the address of the rps-program */ 746 saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle); 747 748 /* turn on rps */ 749 saa7146_write(dev, MC1, (MASK_12 | MASK_28)); 750 } 751