1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Renesas RZ/G2L CRU 4 * 5 * Copyright (C) 2022 Renesas Electronics Corp. 6 * 7 * Based on Renesas R-Car VIN 8 * Copyright (C) 2016 Renesas Electronics Corp. 9 * Copyright (C) 2011-2013 Renesas Solutions Corp. 10 * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com> 11 * Copyright (C) 2008 Magnus Damm 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/delay.h> 16 #include <linux/pm_runtime.h> 17 18 #include <media/mipi-csi2.h> 19 #include <media/v4l2-ioctl.h> 20 #include <media/videobuf2-dma-contig.h> 21 22 #include "rzg2l-cru.h" 23 #include "rzg2l-cru-regs.h" 24 25 #define RZG2L_TIMEOUT_MS 100 26 #define RZG2L_RETRIES 10 27 28 #define RZG2L_CRU_DEFAULT_FORMAT V4L2_PIX_FMT_UYVY 29 #define RZG2L_CRU_DEFAULT_WIDTH RZG2L_CRU_MIN_INPUT_WIDTH 30 #define RZG2L_CRU_DEFAULT_HEIGHT RZG2L_CRU_MIN_INPUT_HEIGHT 31 #define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE 32 #define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB 33 34 #define RZG2L_CRU_STRIDE_MAX 32640 35 #define RZG2L_CRU_STRIDE_ALIGN 128 36 37 struct rzg2l_cru_buffer { 38 struct vb2_v4l2_buffer vb; 39 struct list_head list; 40 }; 41 42 #define to_buf_list(vb2_buffer) \ 43 (&container_of(vb2_buffer, struct rzg2l_cru_buffer, vb)->list) 44 45 /* ----------------------------------------------------------------------------- 46 * DMA operations 47 */ 48 static void __rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value) 49 { 50 const u16 *regs = cru->info->regs; 51 52 /* 53 * CRUnCTRL is a first register on all CRU supported SoCs so validate 54 * rest of the registers have valid offset being set in cru->info->regs. 55 */ 56 if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) || 57 WARN_ON(offset != CRUnCTRL && regs[offset] == 0)) 58 return; 59 60 iowrite32(value, cru->base + regs[offset]); 61 } 62 63 static u32 __rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset) 64 { 65 const u16 *regs = cru->info->regs; 66 67 /* 68 * CRUnCTRL is a first register on all CRU supported SoCs so validate 69 * rest of the registers have valid offset being set in cru->info->regs. 70 */ 71 if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) || 72 WARN_ON(offset != CRUnCTRL && regs[offset] == 0)) 73 return 0; 74 75 return ioread32(cru->base + regs[offset]); 76 } 77 78 static __always_inline void 79 __rzg2l_cru_write_constant(struct rzg2l_cru_dev *cru, u32 offset, u32 value) 80 { 81 const u16 *regs = cru->info->regs; 82 83 BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG); 84 85 iowrite32(value, cru->base + regs[offset]); 86 } 87 88 static __always_inline u32 89 __rzg2l_cru_read_constant(struct rzg2l_cru_dev *cru, u32 offset) 90 { 91 const u16 *regs = cru->info->regs; 92 93 BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG); 94 95 return ioread32(cru->base + regs[offset]); 96 } 97 98 #define rzg2l_cru_write(cru, offset, value) \ 99 (__builtin_constant_p(offset) ? \ 100 __rzg2l_cru_write_constant(cru, offset, value) : \ 101 __rzg2l_cru_write(cru, offset, value)) 102 103 #define rzg2l_cru_read(cru, offset) \ 104 (__builtin_constant_p(offset) ? \ 105 __rzg2l_cru_read_constant(cru, offset) : \ 106 __rzg2l_cru_read(cru, offset)) 107 108 /* Need to hold qlock before calling */ 109 static void return_unused_buffers(struct rzg2l_cru_dev *cru, 110 enum vb2_buffer_state state) 111 { 112 struct rzg2l_cru_buffer *buf, *node; 113 unsigned long flags; 114 unsigned int i; 115 116 spin_lock_irqsave(&cru->qlock, flags); 117 for (i = 0; i < cru->num_buf; i++) { 118 if (cru->queue_buf[i]) { 119 vb2_buffer_done(&cru->queue_buf[i]->vb2_buf, 120 state); 121 cru->queue_buf[i] = NULL; 122 } 123 } 124 125 list_for_each_entry_safe(buf, node, &cru->buf_list, list) { 126 vb2_buffer_done(&buf->vb.vb2_buf, state); 127 list_del(&buf->list); 128 } 129 spin_unlock_irqrestore(&cru->qlock, flags); 130 } 131 132 static int rzg2l_cru_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, 133 unsigned int *nplanes, unsigned int sizes[], 134 struct device *alloc_devs[]) 135 { 136 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq); 137 138 /* Make sure the image size is large enough. */ 139 if (*nplanes) 140 return sizes[0] < cru->format.sizeimage ? -EINVAL : 0; 141 142 *nplanes = 1; 143 sizes[0] = cru->format.sizeimage; 144 145 return 0; 146 }; 147 148 static int rzg2l_cru_buffer_prepare(struct vb2_buffer *vb) 149 { 150 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue); 151 unsigned long size = cru->format.sizeimage; 152 153 if (vb2_plane_size(vb, 0) < size) { 154 dev_err(cru->dev, "buffer too small (%lu < %lu)\n", 155 vb2_plane_size(vb, 0), size); 156 return -EINVAL; 157 } 158 159 vb2_set_plane_payload(vb, 0, size); 160 161 return 0; 162 } 163 164 static void rzg2l_cru_buffer_queue(struct vb2_buffer *vb) 165 { 166 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 167 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue); 168 unsigned long flags; 169 170 spin_lock_irqsave(&cru->qlock, flags); 171 172 list_add_tail(to_buf_list(vbuf), &cru->buf_list); 173 174 spin_unlock_irqrestore(&cru->qlock, flags); 175 } 176 177 static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru, 178 int slot, dma_addr_t addr) 179 { 180 /* 181 * The address needs to be 512 bytes aligned. Driver should never accept 182 * settings that do not satisfy this in the first place... 183 */ 184 if (WARN_ON((addr) & RZG2L_CRU_HW_BUFFER_MASK)) 185 return; 186 187 /* Currently, we just use the buffer in 32 bits address */ 188 rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr); 189 rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0); 190 191 cru->buf_addr[slot] = addr; 192 } 193 194 /* 195 * Moves a buffer from the queue to the HW slot. If no buffer is 196 * available use the scratch buffer. The scratch buffer is never 197 * returned to userspace, its only function is to enable the capture 198 * loop to keep running. 199 */ 200 static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot) 201 { 202 struct vb2_v4l2_buffer *vbuf; 203 struct rzg2l_cru_buffer *buf; 204 dma_addr_t phys_addr; 205 206 /* A already populated slot shall never be overwritten. */ 207 if (WARN_ON(cru->queue_buf[slot])) 208 return; 209 210 dev_dbg(cru->dev, "Filling HW slot: %d\n", slot); 211 212 if (list_empty(&cru->buf_list)) { 213 cru->queue_buf[slot] = NULL; 214 phys_addr = cru->scratch_phys; 215 } else { 216 /* Keep track of buffer we give to HW */ 217 buf = list_entry(cru->buf_list.next, 218 struct rzg2l_cru_buffer, list); 219 vbuf = &buf->vb; 220 list_del_init(to_buf_list(vbuf)); 221 cru->queue_buf[slot] = vbuf; 222 223 /* Setup DMA */ 224 phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0); 225 } 226 227 rzg2l_cru_set_slot_addr(cru, slot, phys_addr); 228 } 229 230 static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru) 231 { 232 const struct rzg2l_cru_info *info = cru->info; 233 unsigned int slot; 234 u32 amnaxiattr; 235 236 /* 237 * Set image data memory banks. 238 * Currently, we will use maximum address. 239 */ 240 rzg2l_cru_write(cru, AMnMBVALID, AMnMBVALID_MBVALID(cru->num_buf - 1)); 241 242 for (slot = 0; slot < cru->num_buf; slot++) 243 rzg2l_cru_fill_hw_slot(cru, slot); 244 245 if (info->has_stride) { 246 u32 stride = cru->format.bytesperline; 247 u32 amnis; 248 249 stride /= RZG2L_CRU_STRIDE_ALIGN; 250 amnis = rzg2l_cru_read(cru, AMnIS) & ~AMnIS_IS_MASK; 251 rzg2l_cru_write(cru, AMnIS, amnis | AMnIS_IS(stride)); 252 } 253 254 /* Set AXI burst max length to recommended setting */ 255 amnaxiattr = rzg2l_cru_read(cru, AMnAXIATTR) & ~AMnAXIATTR_AXILEN_MASK; 256 amnaxiattr |= AMnAXIATTR_AXILEN; 257 rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr); 258 } 259 260 static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru, 261 const struct rzg2l_cru_ip_format *ip_fmt, 262 u8 csi_vc) 263 { 264 const struct rzg2l_cru_info *info = cru->info; 265 u32 icnmc = ICnMC_INF(ip_fmt->datatype); 266 267 if (cru->info->regs[ICnSVC]) { 268 rzg2l_cru_write(cru, ICnSVCNUM, csi_vc); 269 rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) | 270 ICnSVC_SVC2(2) | ICnSVC_SVC3(3)); 271 } 272 273 icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK; 274 275 /* Set virtual channel CSI2 */ 276 icnmc |= ICnMC_VCSEL(csi_vc); 277 278 rzg2l_cru_write(cru, info->image_conv, icnmc); 279 } 280 281 static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru, 282 struct v4l2_mbus_framefmt *ip_sd_fmt, 283 u8 csi_vc) 284 { 285 const struct rzg2l_cru_info *info = cru->info; 286 const struct rzg2l_cru_ip_format *cru_video_fmt; 287 const struct rzg2l_cru_ip_format *cru_ip_fmt; 288 289 cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code); 290 rzg2l_cru_csi2_setup(cru, cru_ip_fmt, csi_vc); 291 292 /* Output format */ 293 cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat); 294 if (!cru_video_fmt) { 295 dev_err(cru->dev, "Invalid pixelformat (0x%x)\n", 296 cru->format.pixelformat); 297 return -EINVAL; 298 } 299 300 /* If input and output use same colorspace, do bypass mode */ 301 if (cru_ip_fmt->yuv == cru_video_fmt->yuv) 302 rzg2l_cru_write(cru, info->image_conv, 303 rzg2l_cru_read(cru, info->image_conv) | ICnMC_CSCTHR); 304 else 305 rzg2l_cru_write(cru, info->image_conv, 306 rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_CSCTHR); 307 308 /* Set output data format */ 309 rzg2l_cru_write(cru, ICnDMR, cru_video_fmt->icndmr); 310 311 return 0; 312 } 313 314 bool rzg3e_fifo_empty(struct rzg2l_cru_dev *cru) 315 { 316 u32 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR); 317 318 if ((((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B1) >> 24) == 319 ((amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B1) >> 8)) && 320 (((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B0) >> 16) == 321 (amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B0))) 322 return true; 323 324 return false; 325 } 326 327 bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru) 328 { 329 u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y; 330 331 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR); 332 333 amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR; 334 amnfifopntr_r_y = 335 (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16; 336 337 return amnfifopntr_w == amnfifopntr_r_y; 338 } 339 340 void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru) 341 { 342 unsigned int retries = 0; 343 unsigned long flags; 344 u32 icnms; 345 346 spin_lock_irqsave(&cru->qlock, flags); 347 348 /* Disable and clear the interrupt */ 349 cru->info->disable_interrupts(cru); 350 351 /* Stop the operation of image conversion */ 352 rzg2l_cru_write(cru, ICnEN, 0); 353 354 /* Wait for streaming to stop */ 355 while ((rzg2l_cru_read(cru, ICnMS) & ICnMS_IA) && retries++ < RZG2L_RETRIES) { 356 spin_unlock_irqrestore(&cru->qlock, flags); 357 msleep(RZG2L_TIMEOUT_MS); 358 spin_lock_irqsave(&cru->qlock, flags); 359 } 360 361 icnms = rzg2l_cru_read(cru, ICnMS) & ICnMS_IA; 362 if (icnms) 363 dev_err(cru->dev, "Failed stop HW, something is seriously broken\n"); 364 365 cru->state = RZG2L_CRU_DMA_STOPPED; 366 367 /* Wait until the FIFO becomes empty */ 368 for (retries = 5; retries > 0; retries--) { 369 if (cru->info->fifo_empty(cru)) 370 break; 371 372 usleep_range(10, 20); 373 } 374 375 /* Notify that FIFO is not empty here */ 376 if (!retries) 377 dev_err(cru->dev, "Failed to empty FIFO\n"); 378 379 /* Stop AXI bus */ 380 rzg2l_cru_write(cru, AMnAXISTP, AMnAXISTP_AXI_STOP); 381 382 /* Wait until the AXI bus stop */ 383 for (retries = 5; retries > 0; retries--) { 384 if (rzg2l_cru_read(cru, AMnAXISTPACK) & 385 AMnAXISTPACK_AXI_STOP_ACK) 386 break; 387 388 usleep_range(10, 20); 389 } 390 391 /* Notify that AXI bus can not stop here */ 392 if (!retries) 393 dev_err(cru->dev, "Failed to stop AXI bus\n"); 394 395 /* Cancel the AXI bus stop request */ 396 rzg2l_cru_write(cru, AMnAXISTP, 0); 397 398 /* Reset the CRU (AXI-master) */ 399 reset_control_assert(cru->aresetn); 400 401 /* Resets the image processing module */ 402 rzg2l_cru_write(cru, CRUnRST, 0); 403 404 spin_unlock_irqrestore(&cru->qlock, flags); 405 } 406 407 static int rzg2l_cru_get_virtual_channel(struct rzg2l_cru_dev *cru) 408 { 409 struct v4l2_mbus_frame_desc fd = { }; 410 struct media_pad *remote_pad; 411 int ret; 412 413 remote_pad = media_pad_remote_pad_unique(&cru->ip.pads[RZG2L_CRU_IP_SINK]); 414 ret = v4l2_subdev_call(cru->ip.remote, pad, get_frame_desc, remote_pad->index, &fd); 415 if (ret < 0 && ret != -ENOIOCTLCMD) { 416 dev_err(cru->dev, "get_frame_desc failed on IP remote subdev\n"); 417 return ret; 418 } 419 /* If remote subdev does not implement .get_frame_desc default to VC0. */ 420 if (ret == -ENOIOCTLCMD) 421 return 0; 422 423 if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { 424 dev_err(cru->dev, "get_frame_desc returned invalid bus type %d\n", fd.type); 425 return -EINVAL; 426 } 427 428 if (!fd.num_entries) { 429 dev_err(cru->dev, "get_frame_desc returned zero entries\n"); 430 return -EINVAL; 431 } 432 433 return fd.entry[0].bus.csi2.vc; 434 } 435 436 void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru) 437 { 438 rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FSxE(cru->svc_channel)); 439 rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FExE(cru->svc_channel)); 440 } 441 442 void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru) 443 { 444 rzg2l_cru_write(cru, CRUnIE, 0); 445 rzg2l_cru_write(cru, CRUnIE2, 0); 446 rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS)); 447 rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2)); 448 } 449 450 void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru) 451 { 452 rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE); 453 } 454 455 void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru) 456 { 457 rzg2l_cru_write(cru, CRUnIE, 0); 458 rzg2l_cru_write(cru, CRUnINTS, 0x001f000f); 459 } 460 461 int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru) 462 { 463 struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru); 464 unsigned long flags; 465 u8 csi_vc; 466 int ret; 467 468 ret = rzg2l_cru_get_virtual_channel(cru); 469 if (ret < 0) 470 return ret; 471 csi_vc = ret; 472 cru->svc_channel = csi_vc; 473 474 spin_lock_irqsave(&cru->qlock, flags); 475 476 /* Select a video input */ 477 rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0)); 478 479 /* Cancel the software reset for image processing block */ 480 rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN); 481 482 /* Disable and clear the interrupt before using */ 483 cru->info->disable_interrupts(cru); 484 485 /* Initialize the AXI master */ 486 rzg2l_cru_initialize_axi(cru); 487 488 /* Initialize image convert */ 489 ret = rzg2l_cru_initialize_image_conv(cru, fmt, csi_vc); 490 if (ret) { 491 spin_unlock_irqrestore(&cru->qlock, flags); 492 return ret; 493 } 494 495 /* Enable interrupt */ 496 cru->info->enable_interrupts(cru); 497 498 /* Enable image processing reception */ 499 rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN); 500 501 spin_unlock_irqrestore(&cru->qlock, flags); 502 503 return 0; 504 } 505 506 static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on) 507 { 508 struct media_pipeline *pipe; 509 struct v4l2_subdev *sd; 510 struct media_pad *pad; 511 int ret; 512 513 pad = media_pad_remote_pad_first(&cru->pad); 514 if (!pad) 515 return -EPIPE; 516 517 sd = media_entity_to_v4l2_subdev(pad->entity); 518 519 if (!on) { 520 int stream_off_ret = 0; 521 522 ret = v4l2_subdev_call(sd, video, s_stream, 0); 523 if (ret) 524 stream_off_ret = ret; 525 526 ret = v4l2_subdev_call(sd, video, post_streamoff); 527 if (ret == -ENOIOCTLCMD) 528 ret = 0; 529 if (ret && !stream_off_ret) 530 stream_off_ret = ret; 531 532 video_device_pipeline_stop(&cru->vdev); 533 534 return stream_off_ret; 535 } 536 537 pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe; 538 ret = video_device_pipeline_start(&cru->vdev, pipe); 539 if (ret) 540 return ret; 541 542 ret = v4l2_subdev_call(sd, video, pre_streamon, 0); 543 if (ret && ret != -ENOIOCTLCMD) 544 goto pipe_line_stop; 545 546 ret = v4l2_subdev_call(sd, video, s_stream, 1); 547 if (ret && ret != -ENOIOCTLCMD) 548 goto err_s_stream; 549 550 return 0; 551 552 err_s_stream: 553 v4l2_subdev_call(sd, video, post_streamoff); 554 555 pipe_line_stop: 556 video_device_pipeline_stop(&cru->vdev); 557 558 return ret; 559 } 560 561 static void rzg2l_cru_stop_streaming(struct rzg2l_cru_dev *cru) 562 { 563 cru->state = RZG2L_CRU_DMA_STOPPING; 564 565 rzg2l_cru_set_stream(cru, 0); 566 } 567 568 irqreturn_t rzg2l_cru_irq(int irq, void *data) 569 { 570 struct rzg2l_cru_dev *cru = data; 571 unsigned int handled = 0; 572 unsigned long flags; 573 u32 irq_status; 574 u32 amnmbs; 575 int slot; 576 577 spin_lock_irqsave(&cru->qlock, flags); 578 579 irq_status = rzg2l_cru_read(cru, CRUnINTS); 580 if (!irq_status) 581 goto done; 582 583 handled = 1; 584 585 rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS)); 586 587 /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */ 588 if (cru->state == RZG2L_CRU_DMA_STOPPED) { 589 dev_dbg(cru->dev, "IRQ while state stopped\n"); 590 goto done; 591 } 592 593 /* Increase stop retries if capture status is 'RZG2L_CRU_DMA_STOPPING' */ 594 if (cru->state == RZG2L_CRU_DMA_STOPPING) { 595 if (irq_status & CRUnINTS_SFS) 596 dev_dbg(cru->dev, "IRQ while state stopping\n"); 597 goto done; 598 } 599 600 /* Prepare for capture and update state */ 601 amnmbs = rzg2l_cru_read(cru, AMnMBS); 602 slot = amnmbs & AMnMBS_MBSTS; 603 604 /* 605 * AMnMBS.MBSTS indicates the destination of Memory Bank (MB). 606 * Recalculate to get the current transfer complete MB. 607 */ 608 if (slot == 0) 609 slot = cru->num_buf - 1; 610 else 611 slot--; 612 613 /* 614 * To hand buffers back in a known order to userspace start 615 * to capture first from slot 0. 616 */ 617 if (cru->state == RZG2L_CRU_DMA_STARTING) { 618 if (slot != 0) { 619 dev_dbg(cru->dev, "Starting sync slot: %d\n", slot); 620 goto done; 621 } 622 623 dev_dbg(cru->dev, "Capture start synced!\n"); 624 cru->state = RZG2L_CRU_DMA_RUNNING; 625 } 626 627 /* Capture frame */ 628 if (cru->queue_buf[slot]) { 629 cru->queue_buf[slot]->field = cru->format.field; 630 cru->queue_buf[slot]->sequence = cru->sequence; 631 cru->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns(); 632 vb2_buffer_done(&cru->queue_buf[slot]->vb2_buf, 633 VB2_BUF_STATE_DONE); 634 cru->queue_buf[slot] = NULL; 635 } else { 636 /* Scratch buffer was used, dropping frame. */ 637 dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence); 638 } 639 640 cru->sequence++; 641 642 /* Prepare for next frame */ 643 rzg2l_cru_fill_hw_slot(cru, slot); 644 645 done: 646 spin_unlock_irqrestore(&cru->qlock, flags); 647 648 return IRQ_RETVAL(handled); 649 } 650 651 static int rzg3e_cru_get_current_slot(struct rzg2l_cru_dev *cru) 652 { 653 u64 amnmadrs; 654 int slot; 655 656 /* 657 * When AMnMADRSL is read, AMnMADRSH of the higher-order 658 * address also latches the address. 659 * 660 * AMnMADRSH must be read after AMnMADRSL has been read. 661 */ 662 amnmadrs = rzg2l_cru_read(cru, AMnMADRSL); 663 amnmadrs |= (u64)rzg2l_cru_read(cru, AMnMADRSH) << 32; 664 665 /* Ensure amnmadrs is within this buffer range */ 666 for (slot = 0; slot < cru->num_buf; slot++) { 667 if (amnmadrs >= cru->buf_addr[slot] && 668 amnmadrs < cru->buf_addr[slot] + cru->format.sizeimage) 669 return slot; 670 } 671 672 dev_err(cru->dev, "Invalid MB address 0x%llx (out of range)\n", amnmadrs); 673 return -EINVAL; 674 } 675 676 irqreturn_t rzg3e_cru_irq(int irq, void *data) 677 { 678 struct rzg2l_cru_dev *cru = data; 679 u32 irq_status; 680 int slot; 681 682 scoped_guard(spinlock, &cru->qlock) { 683 irq_status = rzg2l_cru_read(cru, CRUnINTS2); 684 if (!irq_status) 685 return IRQ_NONE; 686 687 dev_dbg(cru->dev, "CRUnINTS2 0x%x\n", irq_status); 688 689 rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2)); 690 691 /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */ 692 if (cru->state == RZG2L_CRU_DMA_STOPPED) { 693 dev_dbg(cru->dev, "IRQ while state stopped\n"); 694 return IRQ_HANDLED; 695 } 696 697 if (cru->state == RZG2L_CRU_DMA_STOPPING) { 698 if (irq_status & CRUnINTS2_FSxS(0) || 699 irq_status & CRUnINTS2_FSxS(1) || 700 irq_status & CRUnINTS2_FSxS(2) || 701 irq_status & CRUnINTS2_FSxS(3)) 702 dev_dbg(cru->dev, "IRQ while state stopping\n"); 703 return IRQ_HANDLED; 704 } 705 706 slot = rzg3e_cru_get_current_slot(cru); 707 if (slot < 0) 708 return IRQ_HANDLED; 709 710 dev_dbg(cru->dev, "Current written slot: %d\n", slot); 711 cru->buf_addr[slot] = 0; 712 713 /* 714 * To hand buffers back in a known order to userspace start 715 * to capture first from slot 0. 716 */ 717 if (cru->state == RZG2L_CRU_DMA_STARTING) { 718 if (slot != 0) { 719 dev_dbg(cru->dev, "Starting sync slot: %d\n", slot); 720 return IRQ_HANDLED; 721 } 722 dev_dbg(cru->dev, "Capture start synced!\n"); 723 cru->state = RZG2L_CRU_DMA_RUNNING; 724 } 725 726 /* Capture frame */ 727 if (cru->queue_buf[slot]) { 728 struct vb2_v4l2_buffer *buf = cru->queue_buf[slot]; 729 730 buf->field = cru->format.field; 731 buf->sequence = cru->sequence; 732 buf->vb2_buf.timestamp = ktime_get_ns(); 733 vb2_buffer_done(&buf->vb2_buf, VB2_BUF_STATE_DONE); 734 cru->queue_buf[slot] = NULL; 735 } else { 736 /* Scratch buffer was used, dropping frame. */ 737 dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence); 738 } 739 740 cru->sequence++; 741 742 /* Prepare for next frame */ 743 rzg2l_cru_fill_hw_slot(cru, slot); 744 } 745 746 return IRQ_HANDLED; 747 } 748 749 static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count) 750 { 751 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq); 752 int ret; 753 754 ret = pm_runtime_resume_and_get(cru->dev); 755 if (ret) 756 return ret; 757 758 ret = clk_prepare_enable(cru->vclk); 759 if (ret) 760 goto err_pm_put; 761 762 /* Release reset state */ 763 ret = reset_control_deassert(cru->aresetn); 764 if (ret) { 765 dev_err(cru->dev, "failed to deassert aresetn\n"); 766 goto err_vclk_disable; 767 } 768 769 ret = reset_control_deassert(cru->presetn); 770 if (ret) { 771 reset_control_assert(cru->aresetn); 772 dev_err(cru->dev, "failed to deassert presetn\n"); 773 goto assert_aresetn; 774 } 775 776 /* Allocate scratch buffer */ 777 cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage, 778 &cru->scratch_phys, GFP_KERNEL); 779 if (!cru->scratch) { 780 return_unused_buffers(cru, VB2_BUF_STATE_QUEUED); 781 dev_err(cru->dev, "Failed to allocate scratch buffer\n"); 782 ret = -ENOMEM; 783 goto assert_presetn; 784 } 785 786 cru->sequence = 0; 787 788 ret = rzg2l_cru_set_stream(cru, 1); 789 if (ret) { 790 return_unused_buffers(cru, VB2_BUF_STATE_QUEUED); 791 goto out; 792 } 793 794 cru->state = RZG2L_CRU_DMA_STARTING; 795 dev_dbg(cru->dev, "Starting to capture\n"); 796 return 0; 797 798 out: 799 if (ret) 800 dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch, 801 cru->scratch_phys); 802 assert_presetn: 803 reset_control_assert(cru->presetn); 804 805 assert_aresetn: 806 reset_control_assert(cru->aresetn); 807 808 err_vclk_disable: 809 clk_disable_unprepare(cru->vclk); 810 811 err_pm_put: 812 pm_runtime_put_sync(cru->dev); 813 814 return ret; 815 } 816 817 static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq) 818 { 819 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq); 820 821 rzg2l_cru_stop_streaming(cru); 822 823 /* Free scratch buffer */ 824 dma_free_coherent(cru->dev, cru->format.sizeimage, 825 cru->scratch, cru->scratch_phys); 826 827 return_unused_buffers(cru, VB2_BUF_STATE_ERROR); 828 829 reset_control_assert(cru->presetn); 830 clk_disable_unprepare(cru->vclk); 831 pm_runtime_put_sync(cru->dev); 832 } 833 834 static const struct vb2_ops rzg2l_cru_qops = { 835 .queue_setup = rzg2l_cru_queue_setup, 836 .buf_prepare = rzg2l_cru_buffer_prepare, 837 .buf_queue = rzg2l_cru_buffer_queue, 838 .start_streaming = rzg2l_cru_start_streaming_vq, 839 .stop_streaming = rzg2l_cru_stop_streaming_vq, 840 }; 841 842 void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru) 843 { 844 mutex_destroy(&cru->lock); 845 846 v4l2_device_unregister(&cru->v4l2_dev); 847 vb2_queue_release(&cru->queue); 848 } 849 850 int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru) 851 { 852 struct vb2_queue *q = &cru->queue; 853 unsigned int i; 854 int ret; 855 856 /* Initialize the top-level structure */ 857 ret = v4l2_device_register(cru->dev, &cru->v4l2_dev); 858 if (ret) 859 return ret; 860 861 mutex_init(&cru->lock); 862 INIT_LIST_HEAD(&cru->buf_list); 863 864 spin_lock_init(&cru->qlock); 865 866 cru->state = RZG2L_CRU_DMA_STOPPED; 867 868 for (i = 0; i < RZG2L_CRU_HW_BUFFER_MAX; i++) 869 cru->queue_buf[i] = NULL; 870 871 /* buffer queue */ 872 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 873 q->io_modes = VB2_MMAP | VB2_DMABUF; 874 q->lock = &cru->lock; 875 q->drv_priv = cru; 876 q->buf_struct_size = sizeof(struct rzg2l_cru_buffer); 877 q->ops = &rzg2l_cru_qops; 878 q->mem_ops = &vb2_dma_contig_memops; 879 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 880 q->min_queued_buffers = 4; 881 q->dev = cru->dev; 882 883 ret = vb2_queue_init(q); 884 if (ret < 0) { 885 dev_err(cru->dev, "failed to initialize VB2 queue\n"); 886 goto error; 887 } 888 889 return 0; 890 891 error: 892 mutex_destroy(&cru->lock); 893 v4l2_device_unregister(&cru->v4l2_dev); 894 return ret; 895 } 896 897 /* ----------------------------------------------------------------------------- 898 * V4L2 stuff 899 */ 900 901 static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru, 902 struct v4l2_pix_format *pix) 903 { 904 const struct rzg2l_cru_info *info = cru->info; 905 const struct rzg2l_cru_ip_format *fmt; 906 907 fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat); 908 if (!fmt) { 909 pix->pixelformat = RZG2L_CRU_DEFAULT_FORMAT; 910 fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat); 911 } 912 913 switch (pix->field) { 914 case V4L2_FIELD_TOP: 915 case V4L2_FIELD_BOTTOM: 916 case V4L2_FIELD_NONE: 917 case V4L2_FIELD_INTERLACED_TB: 918 case V4L2_FIELD_INTERLACED_BT: 919 case V4L2_FIELD_INTERLACED: 920 break; 921 default: 922 pix->field = RZG2L_CRU_DEFAULT_FIELD; 923 break; 924 } 925 926 /* Limit to CRU capabilities */ 927 v4l_bound_align_image(&pix->width, 320, info->max_width, 1, 928 &pix->height, 240, info->max_height, 2, 0); 929 930 v4l2_fill_pixfmt(pix, pix->pixelformat, pix->width, pix->height); 931 932 dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n", 933 pix->width, pix->height, pix->bytesperline, pix->sizeimage); 934 } 935 936 static void rzg2l_cru_try_format(struct rzg2l_cru_dev *cru, 937 struct v4l2_pix_format *pix) 938 { 939 /* 940 * The V4L2 specification clearly documents the colorspace fields 941 * as being set by drivers for capture devices. Using the values 942 * supplied by userspace thus wouldn't comply with the API. Until 943 * the API is updated force fixed values. 944 */ 945 pix->colorspace = RZG2L_CRU_DEFAULT_COLORSPACE; 946 pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace); 947 pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace); 948 pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace, 949 pix->ycbcr_enc); 950 951 rzg2l_cru_format_align(cru, pix); 952 } 953 954 static int rzg2l_cru_querycap(struct file *file, void *priv, 955 struct v4l2_capability *cap) 956 { 957 strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); 958 strscpy(cap->card, "RZG2L_CRU", sizeof(cap->card)); 959 960 return 0; 961 } 962 963 static int rzg2l_cru_try_fmt_vid_cap(struct file *file, void *priv, 964 struct v4l2_format *f) 965 { 966 struct rzg2l_cru_dev *cru = video_drvdata(file); 967 968 rzg2l_cru_try_format(cru, &f->fmt.pix); 969 970 return 0; 971 } 972 973 static int rzg2l_cru_s_fmt_vid_cap(struct file *file, void *priv, 974 struct v4l2_format *f) 975 { 976 struct rzg2l_cru_dev *cru = video_drvdata(file); 977 978 if (vb2_is_busy(&cru->queue)) 979 return -EBUSY; 980 981 rzg2l_cru_try_format(cru, &f->fmt.pix); 982 983 cru->format = f->fmt.pix; 984 985 return 0; 986 } 987 988 static int rzg2l_cru_g_fmt_vid_cap(struct file *file, void *priv, 989 struct v4l2_format *f) 990 { 991 struct rzg2l_cru_dev *cru = video_drvdata(file); 992 993 f->fmt.pix = cru->format; 994 995 return 0; 996 } 997 998 static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv, 999 struct v4l2_fmtdesc *f) 1000 { 1001 const struct rzg2l_cru_ip_format *fmt; 1002 1003 fmt = rzg2l_cru_ip_index_to_fmt(f->index); 1004 if (!fmt) 1005 return -EINVAL; 1006 1007 f->pixelformat = fmt->format; 1008 1009 return 0; 1010 } 1011 1012 static int rzg2l_cru_enum_framesizes(struct file *file, void *fh, 1013 struct v4l2_frmsizeenum *fsize) 1014 { 1015 struct rzg2l_cru_dev *cru = video_drvdata(file); 1016 const struct rzg2l_cru_info *info = cru->info; 1017 const struct rzg2l_cru_ip_format *fmt; 1018 1019 if (fsize->index) 1020 return -EINVAL; 1021 1022 fmt = rzg2l_cru_ip_format_to_fmt(fsize->pixel_format); 1023 if (!fmt) 1024 return -EINVAL; 1025 1026 fsize->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; 1027 fsize->stepwise.min_width = RZG2L_CRU_MIN_INPUT_WIDTH; 1028 fsize->stepwise.max_width = info->max_width; 1029 fsize->stepwise.step_width = 1; 1030 fsize->stepwise.min_height = RZG2L_CRU_MIN_INPUT_HEIGHT; 1031 fsize->stepwise.max_height = info->max_height; 1032 fsize->stepwise.step_height = 1; 1033 1034 return 0; 1035 } 1036 1037 static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = { 1038 .vidioc_querycap = rzg2l_cru_querycap, 1039 .vidioc_try_fmt_vid_cap = rzg2l_cru_try_fmt_vid_cap, 1040 .vidioc_g_fmt_vid_cap = rzg2l_cru_g_fmt_vid_cap, 1041 .vidioc_s_fmt_vid_cap = rzg2l_cru_s_fmt_vid_cap, 1042 .vidioc_enum_fmt_vid_cap = rzg2l_cru_enum_fmt_vid_cap, 1043 1044 .vidioc_reqbufs = vb2_ioctl_reqbufs, 1045 .vidioc_create_bufs = vb2_ioctl_create_bufs, 1046 .vidioc_querybuf = vb2_ioctl_querybuf, 1047 .vidioc_qbuf = vb2_ioctl_qbuf, 1048 .vidioc_dqbuf = vb2_ioctl_dqbuf, 1049 .vidioc_expbuf = vb2_ioctl_expbuf, 1050 .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 1051 .vidioc_streamon = vb2_ioctl_streamon, 1052 .vidioc_streamoff = vb2_ioctl_streamoff, 1053 .vidioc_enum_framesizes = rzg2l_cru_enum_framesizes, 1054 }; 1055 1056 /* ----------------------------------------------------------------------------- 1057 * Media controller file operations 1058 */ 1059 1060 static int rzg2l_cru_open(struct file *file) 1061 { 1062 struct rzg2l_cru_dev *cru = video_drvdata(file); 1063 int ret; 1064 1065 ret = mutex_lock_interruptible(&cru->lock); 1066 if (ret) 1067 return ret; 1068 1069 ret = v4l2_fh_open(file); 1070 if (ret) 1071 goto err_unlock; 1072 1073 mutex_unlock(&cru->lock); 1074 1075 return 0; 1076 1077 err_unlock: 1078 mutex_unlock(&cru->lock); 1079 1080 return ret; 1081 } 1082 1083 static int rzg2l_cru_release(struct file *file) 1084 { 1085 struct rzg2l_cru_dev *cru = video_drvdata(file); 1086 int ret; 1087 1088 mutex_lock(&cru->lock); 1089 1090 /* the release helper will cleanup any on-going streaming. */ 1091 ret = _vb2_fop_release(file, NULL); 1092 1093 mutex_unlock(&cru->lock); 1094 1095 return ret; 1096 } 1097 1098 static const struct v4l2_file_operations rzg2l_cru_fops = { 1099 .owner = THIS_MODULE, 1100 .unlocked_ioctl = video_ioctl2, 1101 .open = rzg2l_cru_open, 1102 .release = rzg2l_cru_release, 1103 .poll = vb2_fop_poll, 1104 .mmap = vb2_fop_mmap, 1105 .read = vb2_fop_read, 1106 }; 1107 1108 /* ----------------------------------------------------------------------------- 1109 * Media entity operations 1110 */ 1111 1112 static int rzg2l_cru_video_link_validate(struct media_link *link) 1113 { 1114 struct v4l2_subdev_format fmt = { 1115 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 1116 }; 1117 const struct rzg2l_cru_ip_format *video_fmt; 1118 struct v4l2_subdev *subdev; 1119 struct rzg2l_cru_dev *cru; 1120 int ret; 1121 1122 subdev = media_entity_to_v4l2_subdev(link->source->entity); 1123 fmt.pad = link->source->index; 1124 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 1125 if (ret < 0) 1126 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 1127 1128 cru = container_of(media_entity_to_video_device(link->sink->entity), 1129 struct rzg2l_cru_dev, vdev); 1130 video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat); 1131 1132 if (fmt.format.width != cru->format.width || 1133 fmt.format.height != cru->format.height || 1134 fmt.format.field != cru->format.field || 1135 !rzg2l_cru_ip_fmt_supports_mbus_code(video_fmt, fmt.format.code)) 1136 return -EPIPE; 1137 1138 return 0; 1139 } 1140 1141 static const struct media_entity_operations rzg2l_cru_video_media_ops = { 1142 .link_validate = rzg2l_cru_video_link_validate, 1143 }; 1144 1145 static void rzg2l_cru_v4l2_init(struct rzg2l_cru_dev *cru) 1146 { 1147 struct video_device *vdev = &cru->vdev; 1148 1149 vdev->v4l2_dev = &cru->v4l2_dev; 1150 vdev->queue = &cru->queue; 1151 snprintf(vdev->name, sizeof(vdev->name), "CRU output"); 1152 vdev->release = video_device_release_empty; 1153 vdev->lock = &cru->lock; 1154 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 1155 vdev->device_caps |= V4L2_CAP_IO_MC; 1156 vdev->entity.ops = &rzg2l_cru_video_media_ops; 1157 vdev->fops = &rzg2l_cru_fops; 1158 vdev->ioctl_ops = &rzg2l_cru_ioctl_ops; 1159 1160 /* Set a default format */ 1161 cru->format.pixelformat = RZG2L_CRU_DEFAULT_FORMAT; 1162 cru->format.width = RZG2L_CRU_DEFAULT_WIDTH; 1163 cru->format.height = RZG2L_CRU_DEFAULT_HEIGHT; 1164 cru->format.field = RZG2L_CRU_DEFAULT_FIELD; 1165 cru->format.colorspace = RZG2L_CRU_DEFAULT_COLORSPACE; 1166 rzg2l_cru_format_align(cru, &cru->format); 1167 } 1168 1169 void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru) 1170 { 1171 media_device_unregister(&cru->mdev); 1172 video_unregister_device(&cru->vdev); 1173 } 1174 1175 int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru) 1176 { 1177 struct video_device *vdev = &cru->vdev; 1178 int ret; 1179 1180 if (video_is_registered(&cru->vdev)) { 1181 struct media_entity *entity; 1182 1183 entity = &cru->vdev.entity; 1184 if (!entity->graph_obj.mdev) 1185 entity->graph_obj.mdev = &cru->mdev; 1186 return 0; 1187 } 1188 1189 rzg2l_cru_v4l2_init(cru); 1190 video_set_drvdata(vdev, cru); 1191 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); 1192 if (ret) { 1193 dev_err(cru->dev, "Failed to register video device\n"); 1194 return ret; 1195 } 1196 1197 ret = media_device_register(&cru->mdev); 1198 if (ret) { 1199 video_unregister_device(&cru->vdev); 1200 return ret; 1201 } 1202 1203 return 0; 1204 } 1205