1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PiSP Back End driver. 4 * Copyright (c) 2021-2024 Raspberry Pi Limited. 5 * 6 */ 7 #include <linux/clk.h> 8 #include <linux/interrupt.h> 9 #include <linux/io.h> 10 #include <linux/kernel.h> 11 #include <linux/lockdep.h> 12 #include <linux/module.h> 13 #include <linux/platform_device.h> 14 #include <linux/pm_runtime.h> 15 #include <media/v4l2-device.h> 16 #include <media/v4l2-ioctl.h> 17 #include <media/videobuf2-dma-contig.h> 18 #include <media/videobuf2-vmalloc.h> 19 20 #include <uapi/linux/media/raspberrypi/pisp_be_config.h> 21 22 #include "pisp_be_formats.h" 23 24 /* Maximum number of config buffers possible */ 25 #define PISP_BE_NUM_CONFIG_BUFFERS VB2_MAX_FRAME 26 27 #define PISPBE_NAME "pispbe" 28 29 /* Some ISP-BE registers */ 30 #define PISP_BE_VERSION_REG 0x0 31 #define PISP_BE_CONTROL_REG 0x4 32 #define PISP_BE_CONTROL_COPY_CONFIG BIT(1) 33 #define PISP_BE_CONTROL_QUEUE_JOB BIT(0) 34 #define PISP_BE_CONTROL_NUM_TILES(n) ((n) << 16) 35 #define PISP_BE_TILE_ADDR_LO_REG 0x8 36 #define PISP_BE_TILE_ADDR_HI_REG 0xc 37 #define PISP_BE_STATUS_REG 0x10 38 #define PISP_BE_STATUS_QUEUED BIT(0) 39 #define PISP_BE_BATCH_STATUS_REG 0x14 40 #define PISP_BE_INTERRUPT_EN_REG 0x18 41 #define PISP_BE_INTERRUPT_STATUS_REG 0x1c 42 #define PISP_BE_AXI_REG 0x20 43 #define PISP_BE_CONFIG_BASE_REG 0x40 44 #define PISP_BE_IO_ADDR_LOW(n) (PISP_BE_CONFIG_BASE_REG + 8 * (n)) 45 #define PISP_BE_IO_ADDR_HIGH(n) (PISP_BE_IO_ADDR_LOW((n)) + 4) 46 #define PISP_BE_GLOBAL_BAYER_ENABLE 0xb0 47 #define PISP_BE_GLOBAL_RGB_ENABLE 0xb4 48 #define N_HW_ADDRESSES 13 49 #define N_HW_ENABLES 2 50 51 #define PISP_BE_VERSION_2712 0x02252700 52 #define PISP_BE_VERSION_MINOR_BITS 0xf 53 54 /* 55 * This maps our nodes onto the inputs/outputs of the actual PiSP Back End. 56 * Be wary of the word "OUTPUT" which is used ambiguously here. In a V4L2 57 * context it means an input to the hardware (source image or metadata). 58 * Elsewhere it means an output from the hardware. 59 */ 60 enum pispbe_node_ids { 61 MAIN_INPUT_NODE, 62 TDN_INPUT_NODE, 63 STITCH_INPUT_NODE, 64 OUTPUT0_NODE, 65 OUTPUT1_NODE, 66 TDN_OUTPUT_NODE, 67 STITCH_OUTPUT_NODE, 68 CONFIG_NODE, 69 PISPBE_NUM_NODES 70 }; 71 72 struct pispbe_node_description { 73 const char *ent_name; 74 enum v4l2_buf_type buf_type; 75 unsigned int caps; 76 }; 77 78 static const struct pispbe_node_description node_desc[PISPBE_NUM_NODES] = { 79 /* MAIN_INPUT_NODE */ 80 { 81 .ent_name = PISPBE_NAME "-input", 82 .buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, 83 .caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE, 84 }, 85 /* TDN_INPUT_NODE */ 86 { 87 .ent_name = PISPBE_NAME "-tdn_input", 88 .buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, 89 .caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE, 90 }, 91 /* STITCH_INPUT_NODE */ 92 { 93 .ent_name = PISPBE_NAME "-stitch_input", 94 .buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, 95 .caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE, 96 }, 97 /* OUTPUT0_NODE */ 98 { 99 .ent_name = PISPBE_NAME "-output0", 100 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, 101 .caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE, 102 }, 103 /* OUTPUT1_NODE */ 104 { 105 .ent_name = PISPBE_NAME "-output1", 106 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, 107 .caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE, 108 }, 109 /* TDN_OUTPUT_NODE */ 110 { 111 .ent_name = PISPBE_NAME "-tdn_output", 112 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, 113 .caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE, 114 }, 115 /* STITCH_OUTPUT_NODE */ 116 { 117 .ent_name = PISPBE_NAME "-stitch_output", 118 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, 119 .caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE, 120 }, 121 /* CONFIG_NODE */ 122 { 123 .ent_name = PISPBE_NAME "-config", 124 .buf_type = V4L2_BUF_TYPE_META_OUTPUT, 125 .caps = V4L2_CAP_META_OUTPUT, 126 } 127 }; 128 129 #define NODE_DESC_IS_OUTPUT(desc) ( \ 130 ((desc)->buf_type == V4L2_BUF_TYPE_META_OUTPUT) || \ 131 ((desc)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT) || \ 132 ((desc)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) 133 134 #define NODE_IS_META(node) ( \ 135 ((node)->buf_type == V4L2_BUF_TYPE_META_OUTPUT)) 136 #define NODE_IS_OUTPUT(node) ( \ 137 ((node)->buf_type == V4L2_BUF_TYPE_META_OUTPUT) || \ 138 ((node)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT) || \ 139 ((node)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) 140 #define NODE_IS_CAPTURE(node) ( \ 141 ((node)->buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) || \ 142 ((node)->buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) 143 #define NODE_IS_MPLANE(node) ( \ 144 ((node)->buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) || \ 145 ((node)->buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) 146 147 /* 148 * Structure to describe a single node /dev/video<N> which represents a single 149 * input or output queue to the PiSP Back End device. 150 */ 151 struct pispbe_node { 152 unsigned int id; 153 int vfl_dir; 154 enum v4l2_buf_type buf_type; 155 struct video_device vfd; 156 struct media_pad pad; 157 struct media_intf_devnode *intf_devnode; 158 struct media_link *intf_link; 159 struct pispbe_dev *pispbe; 160 /* Video device lock */ 161 struct mutex node_lock; 162 /* vb2_queue lock */ 163 struct mutex queue_lock; 164 /* Protect pispbe_node->ready_queue and pispbe_buffer->ready_list */ 165 spinlock_t ready_lock; 166 struct list_head ready_queue; 167 struct vb2_queue queue; 168 struct v4l2_format format; 169 const struct pisp_be_format *pisp_format; 170 }; 171 172 /* For logging only, use the entity name with "pispbe" and separator removed */ 173 #define NODE_NAME(node) \ 174 (node_desc[(node)->id].ent_name + sizeof(PISPBE_NAME)) 175 176 /* Records details of the jobs currently running or queued on the h/w. */ 177 struct pispbe_job { 178 bool valid; 179 /* 180 * An array of buffer pointers - remember it's source buffers first, 181 * then captures, then metadata last. 182 */ 183 struct pispbe_buffer *buf[PISPBE_NUM_NODES]; 184 }; 185 186 struct pispbe_hw_enables { 187 u32 bayer_enables; 188 u32 rgb_enables; 189 }; 190 191 /* Records a job configuration and memory addresses. */ 192 struct pispbe_job_descriptor { 193 dma_addr_t hw_dma_addrs[N_HW_ADDRESSES]; 194 struct pisp_be_tiles_config *config; 195 struct pispbe_hw_enables hw_enables; 196 dma_addr_t tiles; 197 }; 198 199 /* 200 * Structure representing the entire PiSP Back End device, comprising several 201 * nodes which share platform resources and a mutex for the actual HW. 202 */ 203 struct pispbe_dev { 204 struct device *dev; 205 struct pispbe_dev *pispbe; 206 struct pisp_be_tiles_config *config; 207 void __iomem *be_reg_base; 208 struct clk *clk; 209 struct v4l2_device v4l2_dev; 210 struct v4l2_subdev sd; 211 struct media_device mdev; 212 struct media_pad pad[PISPBE_NUM_NODES]; /* output pads first */ 213 struct pispbe_node node[PISPBE_NUM_NODES]; 214 dma_addr_t config_dma_addr; 215 unsigned int sequence; 216 u32 streaming_map; 217 struct pispbe_job queued_job, running_job; 218 spinlock_t hw_lock; /* protects "hw_busy" flag and streaming_map */ 219 bool hw_busy; /* non-zero if a job is queued or is being started */ 220 int irq; 221 u32 hw_version; 222 u8 done, started; 223 }; 224 225 static u32 pispbe_rd(struct pispbe_dev *pispbe, unsigned int offset) 226 { 227 return readl(pispbe->be_reg_base + offset); 228 } 229 230 static void pispbe_wr(struct pispbe_dev *pispbe, unsigned int offset, u32 val) 231 { 232 writel(val, pispbe->be_reg_base + offset); 233 } 234 235 /* 236 * Queue a job to the h/w. If the h/w is idle it will begin immediately. 237 * Caller must ensure it is "safe to queue", i.e. we don't already have a 238 * queued, unstarted job. 239 */ 240 static void pispbe_queue_job(struct pispbe_dev *pispbe, 241 struct pispbe_job_descriptor *job) 242 { 243 unsigned int begin, end; 244 245 if (pispbe_rd(pispbe, PISP_BE_STATUS_REG) & PISP_BE_STATUS_QUEUED) 246 dev_err(pispbe->dev, "ERROR: not safe to queue new job!\n"); 247 248 /* 249 * Write configuration to hardware. DMA addresses and enable flags 250 * are passed separately, because the driver needs to sanitize them, 251 * and we don't want to modify (or be vulnerable to modifications of) 252 * the mmap'd buffer. 253 */ 254 for (unsigned int u = 0; u < N_HW_ADDRESSES; ++u) { 255 pispbe_wr(pispbe, PISP_BE_IO_ADDR_LOW(u), 256 lower_32_bits(job->hw_dma_addrs[u])); 257 pispbe_wr(pispbe, PISP_BE_IO_ADDR_HIGH(u), 258 upper_32_bits(job->hw_dma_addrs[u])); 259 } 260 pispbe_wr(pispbe, PISP_BE_GLOBAL_BAYER_ENABLE, 261 job->hw_enables.bayer_enables); 262 pispbe_wr(pispbe, PISP_BE_GLOBAL_RGB_ENABLE, 263 job->hw_enables.rgb_enables); 264 265 /* Everything else is as supplied by the user. */ 266 begin = offsetof(struct pisp_be_config, global.bayer_order) / 267 sizeof(u32); 268 end = sizeof(struct pisp_be_config) / sizeof(u32); 269 for (unsigned int u = begin; u < end; u++) 270 pispbe_wr(pispbe, PISP_BE_CONFIG_BASE_REG + sizeof(u32) * u, 271 ((u32 *)job->config)[u]); 272 273 /* Read back the addresses -- an error here could be fatal */ 274 for (unsigned int u = 0; u < N_HW_ADDRESSES; ++u) { 275 unsigned int offset = PISP_BE_IO_ADDR_LOW(u); 276 u64 along = pispbe_rd(pispbe, offset); 277 278 along += ((u64)pispbe_rd(pispbe, offset + 4)) << 32; 279 if (along != (u64)(job->hw_dma_addrs[u])) { 280 dev_dbg(pispbe->dev, 281 "ISP BE config error: check if ISP RAMs enabled?\n"); 282 return; 283 } 284 } 285 286 /* 287 * Write tile pointer to hardware. The IOMMU should prevent 288 * out-of-bounds offsets reaching non-ISP buffers. 289 */ 290 pispbe_wr(pispbe, PISP_BE_TILE_ADDR_LO_REG, lower_32_bits(job->tiles)); 291 pispbe_wr(pispbe, PISP_BE_TILE_ADDR_HI_REG, upper_32_bits(job->tiles)); 292 293 /* Enqueue the job */ 294 pispbe_wr(pispbe, PISP_BE_CONTROL_REG, 295 PISP_BE_CONTROL_COPY_CONFIG | PISP_BE_CONTROL_QUEUE_JOB | 296 PISP_BE_CONTROL_NUM_TILES(job->config->num_tiles)); 297 } 298 299 struct pispbe_buffer { 300 struct vb2_v4l2_buffer vb; 301 struct list_head ready_list; 302 unsigned int config_index; 303 }; 304 305 static int pispbe_get_planes_addr(dma_addr_t addr[3], struct pispbe_buffer *buf, 306 struct pispbe_node *node) 307 { 308 unsigned int num_planes = node->format.fmt.pix_mp.num_planes; 309 unsigned int plane_factor = 0; 310 unsigned int size; 311 unsigned int p; 312 313 if (!buf || !node->pisp_format) 314 return 0; 315 316 /* 317 * Determine the base plane size. This will not be the same 318 * as node->format.fmt.pix_mp.plane_fmt[0].sizeimage for a single 319 * plane buffer in an mplane format. 320 */ 321 size = node->format.fmt.pix_mp.plane_fmt[0].bytesperline * 322 node->format.fmt.pix_mp.height; 323 324 for (p = 0; p < num_planes && p < PISPBE_MAX_PLANES; p++) { 325 addr[p] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, p); 326 plane_factor += node->pisp_format->plane_factor[p]; 327 } 328 329 for (; p < PISPBE_MAX_PLANES && node->pisp_format->plane_factor[p]; p++) { 330 /* 331 * Calculate the address offset of this plane as needed 332 * by the hardware. This is specifically for non-mplane 333 * buffer formats, where there are 3 image planes, e.g. 334 * for the V4L2_PIX_FMT_YUV420 format. 335 */ 336 addr[p] = addr[0] + ((size * plane_factor) >> 3); 337 plane_factor += node->pisp_format->plane_factor[p]; 338 } 339 340 return num_planes; 341 } 342 343 static dma_addr_t pispbe_get_addr(struct pispbe_buffer *buf) 344 { 345 if (buf) 346 return vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); 347 348 return 0; 349 } 350 351 static void pispbe_xlate_addrs(struct pispbe_dev *pispbe, 352 struct pispbe_job_descriptor *job, 353 struct pispbe_buffer *buf[PISPBE_NUM_NODES]) 354 { 355 struct pispbe_hw_enables *hw_en = &job->hw_enables; 356 struct pisp_be_tiles_config *config = job->config; 357 dma_addr_t *addrs = job->hw_dma_addrs; 358 int ret; 359 360 /* Take a copy of the "enable" bitmaps so we can modify them. */ 361 hw_en->bayer_enables = config->config.global.bayer_enables; 362 hw_en->rgb_enables = config->config.global.rgb_enables; 363 364 /* 365 * Main input first. There are 3 address pointers, corresponding to up 366 * to 3 planes. 367 */ 368 ret = pispbe_get_planes_addr(addrs, buf[MAIN_INPUT_NODE], 369 &pispbe->node[MAIN_INPUT_NODE]); 370 if (ret <= 0) { 371 /* 372 * This shouldn't happen; pispbe_schedule_internal should insist 373 * on an input. 374 */ 375 dev_warn(pispbe->dev, "ISP-BE missing input\n"); 376 hw_en->bayer_enables = 0; 377 hw_en->rgb_enables = 0; 378 return; 379 } 380 381 /* 382 * Now TDN/Stitch inputs and outputs. These are single-plane and only 383 * used with Bayer input. Input enables must match the requirements 384 * of the processing stages, otherwise the hardware can lock up! 385 */ 386 if (hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_INPUT) { 387 addrs[3] = pispbe_get_addr(buf[TDN_INPUT_NODE]); 388 if (addrs[3] == 0 || 389 !(hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_TDN_INPUT) || 390 !(hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_TDN) || 391 (config->config.tdn.reset & 1)) { 392 hw_en->bayer_enables &= 393 ~(PISP_BE_BAYER_ENABLE_TDN_INPUT | 394 PISP_BE_BAYER_ENABLE_TDN_DECOMPRESS); 395 if (!(config->config.tdn.reset & 1)) 396 hw_en->bayer_enables &= 397 ~PISP_BE_BAYER_ENABLE_TDN; 398 } 399 400 addrs[4] = pispbe_get_addr(buf[STITCH_INPUT_NODE]); 401 if (addrs[4] == 0 || 402 !(hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_STITCH_INPUT) || 403 !(hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_STITCH)) { 404 hw_en->bayer_enables &= 405 ~(PISP_BE_BAYER_ENABLE_STITCH_INPUT | 406 PISP_BE_BAYER_ENABLE_STITCH_DECOMPRESS | 407 PISP_BE_BAYER_ENABLE_STITCH); 408 } 409 410 addrs[5] = pispbe_get_addr(buf[TDN_OUTPUT_NODE]); 411 if (addrs[5] == 0) 412 hw_en->bayer_enables &= 413 ~(PISP_BE_BAYER_ENABLE_TDN_COMPRESS | 414 PISP_BE_BAYER_ENABLE_TDN_OUTPUT); 415 416 addrs[6] = pispbe_get_addr(buf[STITCH_OUTPUT_NODE]); 417 if (addrs[6] == 0) 418 hw_en->bayer_enables &= 419 ~(PISP_BE_BAYER_ENABLE_STITCH_COMPRESS | 420 PISP_BE_BAYER_ENABLE_STITCH_OUTPUT); 421 } else { 422 /* No Bayer input? Disable entire Bayer pipe (else lockup) */ 423 hw_en->bayer_enables = 0; 424 } 425 426 /* Main image output channels. */ 427 for (unsigned int i = 0; i < PISP_BACK_END_NUM_OUTPUTS; i++) { 428 ret = pispbe_get_planes_addr(addrs + 7 + 3 * i, 429 buf[OUTPUT0_NODE + i], 430 &pispbe->node[OUTPUT0_NODE + i]); 431 if (ret <= 0) 432 hw_en->rgb_enables &= ~(PISP_BE_RGB_ENABLE_OUTPUT0 << i); 433 } 434 } 435 436 /* 437 * Prepare a job description to be submitted to the HW. 438 * 439 * To schedule a job, we need all streaming nodes (apart from Output0, 440 * Output1, Tdn and Stitch) to have a buffer ready, which must 441 * include at least a config buffer and a main input image. 442 * 443 * For Output0, Output1, Tdn and Stitch, a buffer only needs to be 444 * available if the blocks are enabled in the config. 445 * 446 * Needs to be called with hw_lock held. 447 * 448 * Returns 0 if a job has been successfully prepared, < 0 otherwise. 449 */ 450 static int pispbe_prepare_job(struct pispbe_dev *pispbe, 451 struct pispbe_job_descriptor *job) 452 { 453 struct pispbe_buffer *buf[PISPBE_NUM_NODES] = {}; 454 unsigned int config_index; 455 struct pispbe_node *node; 456 unsigned long flags; 457 458 lockdep_assert_held(&pispbe->hw_lock); 459 460 memset(job, 0, sizeof(struct pispbe_job_descriptor)); 461 462 if (((BIT(CONFIG_NODE) | BIT(MAIN_INPUT_NODE)) & 463 pispbe->streaming_map) != 464 (BIT(CONFIG_NODE) | BIT(MAIN_INPUT_NODE))) 465 return -ENODEV; 466 467 node = &pispbe->node[CONFIG_NODE]; 468 spin_lock_irqsave(&node->ready_lock, flags); 469 buf[CONFIG_NODE] = list_first_entry_or_null(&node->ready_queue, 470 struct pispbe_buffer, 471 ready_list); 472 if (buf[CONFIG_NODE]) { 473 list_del(&buf[CONFIG_NODE]->ready_list); 474 pispbe->queued_job.buf[CONFIG_NODE] = buf[CONFIG_NODE]; 475 } 476 spin_unlock_irqrestore(&node->ready_lock, flags); 477 478 /* Exit early if no config buffer has been queued. */ 479 if (!buf[CONFIG_NODE]) 480 return -ENODEV; 481 482 config_index = buf[CONFIG_NODE]->vb.vb2_buf.index; 483 job->config = &pispbe->config[config_index]; 484 job->tiles = pispbe->config_dma_addr + 485 config_index * sizeof(struct pisp_be_tiles_config) + 486 offsetof(struct pisp_be_tiles_config, tiles); 487 488 /* remember: srcimages, captures then metadata */ 489 for (unsigned int i = 0; i < PISPBE_NUM_NODES; i++) { 490 unsigned int bayer_en = 491 job->config->config.global.bayer_enables; 492 unsigned int rgb_en = 493 job->config->config.global.rgb_enables; 494 bool ignore_buffers = false; 495 496 /* Config node is handled outside the loop above. */ 497 if (i == CONFIG_NODE) 498 continue; 499 500 buf[i] = NULL; 501 if (!(pispbe->streaming_map & BIT(i))) 502 continue; 503 504 if ((!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT0) && 505 i == OUTPUT0_NODE) || 506 (!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT1) && 507 i == OUTPUT1_NODE) || 508 (!(bayer_en & PISP_BE_BAYER_ENABLE_TDN_INPUT) && 509 i == TDN_INPUT_NODE) || 510 (!(bayer_en & PISP_BE_BAYER_ENABLE_TDN_OUTPUT) && 511 i == TDN_OUTPUT_NODE) || 512 (!(bayer_en & PISP_BE_BAYER_ENABLE_STITCH_INPUT) && 513 i == STITCH_INPUT_NODE) || 514 (!(bayer_en & PISP_BE_BAYER_ENABLE_STITCH_OUTPUT) && 515 i == STITCH_OUTPUT_NODE)) { 516 /* 517 * Ignore Output0/Output1/Tdn/Stitch buffer check if the 518 * global enables aren't set for these blocks. If a 519 * buffer has been provided, we dequeue it back to the 520 * user with the other in-use buffers. 521 */ 522 ignore_buffers = true; 523 } 524 525 node = &pispbe->node[i]; 526 527 /* Pull a buffer from each V4L2 queue to form the queued job */ 528 spin_lock_irqsave(&node->ready_lock, flags); 529 buf[i] = list_first_entry_or_null(&node->ready_queue, 530 struct pispbe_buffer, 531 ready_list); 532 if (buf[i]) { 533 list_del(&buf[i]->ready_list); 534 pispbe->queued_job.buf[i] = buf[i]; 535 } 536 spin_unlock_irqrestore(&node->ready_lock, flags); 537 538 if (!buf[i] && !ignore_buffers) 539 goto err_return_buffers; 540 } 541 542 pispbe->queued_job.valid = true; 543 544 /* Convert buffers to DMA addresses for the hardware */ 545 pispbe_xlate_addrs(pispbe, job, buf); 546 547 return 0; 548 549 err_return_buffers: 550 for (unsigned int i = 0; i < PISPBE_NUM_NODES; i++) { 551 struct pispbe_node *n = &pispbe->node[i]; 552 553 if (!buf[i]) 554 continue; 555 556 /* Return the buffer to the ready_list queue */ 557 spin_lock_irqsave(&n->ready_lock, flags); 558 list_add(&buf[i]->ready_list, &n->ready_queue); 559 spin_unlock_irqrestore(&n->ready_lock, flags); 560 } 561 562 memset(&pispbe->queued_job, 0, sizeof(pispbe->queued_job)); 563 564 return -ENODEV; 565 } 566 567 static void pispbe_schedule(struct pispbe_dev *pispbe, bool clear_hw_busy) 568 { 569 struct pispbe_job_descriptor job; 570 unsigned long flags; 571 int ret; 572 573 spin_lock_irqsave(&pispbe->hw_lock, flags); 574 575 if (clear_hw_busy) 576 pispbe->hw_busy = false; 577 578 if (pispbe->hw_busy) 579 goto unlock_and_return; 580 581 ret = pispbe_prepare_job(pispbe, &job); 582 if (ret) 583 goto unlock_and_return; 584 585 /* 586 * We can kick the job off without the hw_lock, as this can 587 * never run again until hw_busy is cleared, which will happen 588 * only when the following job has been queued and an interrupt 589 * is rised. 590 */ 591 pispbe->hw_busy = true; 592 spin_unlock_irqrestore(&pispbe->hw_lock, flags); 593 594 if (job.config->num_tiles <= 0 || 595 job.config->num_tiles > PISP_BACK_END_NUM_TILES || 596 !((job.hw_enables.bayer_enables | job.hw_enables.rgb_enables) & 597 PISP_BE_BAYER_ENABLE_INPUT)) { 598 /* 599 * Bad job. We can't let it proceed as it could lock up 600 * the hardware, or worse! 601 * 602 * For now, just force num_tiles to 0, which causes the 603 * H/W to do something bizarre but survivable. It 604 * increments (started,done) counters by more than 1, 605 * but we seem to survive... 606 */ 607 dev_dbg(pispbe->dev, "Bad job: invalid number of tiles: %u\n", 608 job.config->num_tiles); 609 job.config->num_tiles = 0; 610 } 611 612 pispbe_queue_job(pispbe, &job); 613 614 return; 615 616 unlock_and_return: 617 /* No job has been queued, just release the lock and return. */ 618 spin_unlock_irqrestore(&pispbe->hw_lock, flags); 619 } 620 621 static void pispbe_isr_jobdone(struct pispbe_dev *pispbe, 622 struct pispbe_job *job) 623 { 624 struct pispbe_buffer **buf = job->buf; 625 u64 ts = ktime_get_ns(); 626 627 for (unsigned int i = 0; i < PISPBE_NUM_NODES; i++) { 628 if (buf[i]) { 629 buf[i]->vb.vb2_buf.timestamp = ts; 630 buf[i]->vb.sequence = pispbe->sequence; 631 vb2_buffer_done(&buf[i]->vb.vb2_buf, 632 VB2_BUF_STATE_DONE); 633 } 634 } 635 636 pispbe->sequence++; 637 } 638 639 static irqreturn_t pispbe_isr(int irq, void *dev) 640 { 641 struct pispbe_dev *pispbe = (struct pispbe_dev *)dev; 642 bool can_queue_another = false; 643 u8 started, done; 644 u32 u; 645 646 u = pispbe_rd(pispbe, PISP_BE_INTERRUPT_STATUS_REG); 647 if (u == 0) 648 return IRQ_NONE; 649 650 pispbe_wr(pispbe, PISP_BE_INTERRUPT_STATUS_REG, u); 651 u = pispbe_rd(pispbe, PISP_BE_BATCH_STATUS_REG); 652 done = (uint8_t)u; 653 started = (uint8_t)(u >> 8); 654 655 /* 656 * Be aware that done can go up by 2 and started by 1 when: a job that 657 * we previously saw "start" now finishes, and we then queued a new job 658 * which we see both start and finish "simultaneously". 659 */ 660 if (pispbe->running_job.valid && pispbe->done != done) { 661 pispbe_isr_jobdone(pispbe, &pispbe->running_job); 662 memset(&pispbe->running_job, 0, sizeof(pispbe->running_job)); 663 pispbe->done++; 664 } 665 666 if (pispbe->started != started) { 667 pispbe->started++; 668 can_queue_another = 1; 669 670 if (pispbe->done != done && pispbe->queued_job.valid) { 671 pispbe_isr_jobdone(pispbe, &pispbe->queued_job); 672 pispbe->done++; 673 } else { 674 pispbe->running_job = pispbe->queued_job; 675 } 676 677 memset(&pispbe->queued_job, 0, sizeof(pispbe->queued_job)); 678 } 679 680 if (pispbe->done != done || pispbe->started != started) { 681 dev_dbg(pispbe->dev, 682 "Job counters not matching: done = %u, expected %u - started = %u, expected %u\n", 683 pispbe->done, done, pispbe->started, started); 684 pispbe->started = started; 685 pispbe->done = done; 686 } 687 688 /* check if there's more to do before going to sleep */ 689 pispbe_schedule(pispbe, can_queue_another); 690 691 return IRQ_HANDLED; 692 } 693 694 static int pisp_be_validate_config(struct pispbe_dev *pispbe, 695 struct pisp_be_tiles_config *config) 696 { 697 u32 bayer_enables = config->config.global.bayer_enables; 698 u32 rgb_enables = config->config.global.rgb_enables; 699 struct device *dev = pispbe->dev; 700 struct v4l2_format *fmt; 701 unsigned int bpl, size; 702 703 if (!(bayer_enables & PISP_BE_BAYER_ENABLE_INPUT) == 704 !(rgb_enables & PISP_BE_RGB_ENABLE_INPUT)) { 705 dev_dbg(dev, "%s: Not one input enabled\n", __func__); 706 return -EIO; 707 } 708 709 /* Ensure output config strides and buffer sizes match the V4L2 formats. */ 710 fmt = &pispbe->node[TDN_OUTPUT_NODE].format; 711 if (bayer_enables & PISP_BE_BAYER_ENABLE_TDN_OUTPUT) { 712 bpl = config->config.tdn_output_format.stride; 713 size = bpl * config->config.tdn_output_format.height; 714 715 if (fmt->fmt.pix_mp.plane_fmt[0].bytesperline < bpl) { 716 dev_dbg(dev, "%s: bpl mismatch on tdn_output\n", 717 __func__); 718 return -EINVAL; 719 } 720 721 if (fmt->fmt.pix_mp.plane_fmt[0].sizeimage < size) { 722 dev_dbg(dev, "%s: size mismatch on tdn_output\n", 723 __func__); 724 return -EINVAL; 725 } 726 } 727 728 fmt = &pispbe->node[STITCH_OUTPUT_NODE].format; 729 if (bayer_enables & PISP_BE_BAYER_ENABLE_STITCH_OUTPUT) { 730 bpl = config->config.stitch_output_format.stride; 731 size = bpl * config->config.stitch_output_format.height; 732 733 if (fmt->fmt.pix_mp.plane_fmt[0].bytesperline < bpl) { 734 dev_dbg(dev, "%s: bpl mismatch on stitch_output\n", 735 __func__); 736 return -EINVAL; 737 } 738 739 if (fmt->fmt.pix_mp.plane_fmt[0].sizeimage < size) { 740 dev_dbg(dev, "%s: size mismatch on stitch_output\n", 741 __func__); 742 return -EINVAL; 743 } 744 } 745 746 for (unsigned int j = 0; j < PISP_BACK_END_NUM_OUTPUTS; j++) { 747 if (!(rgb_enables & PISP_BE_RGB_ENABLE_OUTPUT(j))) 748 continue; 749 750 if (config->config.output_format[j].image.format & 751 PISP_IMAGE_FORMAT_WALLPAPER_ROLL) 752 continue; /* TODO: Size checks for wallpaper formats */ 753 754 fmt = &pispbe->node[OUTPUT0_NODE + j].format; 755 for (unsigned int i = 0; i < fmt->fmt.pix_mp.num_planes; i++) { 756 bpl = !i ? config->config.output_format[j].image.stride 757 : config->config.output_format[j].image.stride2; 758 size = bpl * config->config.output_format[j].image.height; 759 760 if (config->config.output_format[j].image.format & 761 PISP_IMAGE_FORMAT_SAMPLING_420) 762 size >>= 1; 763 764 if (fmt->fmt.pix_mp.plane_fmt[i].bytesperline < bpl) { 765 dev_dbg(dev, "%s: bpl mismatch on output %d\n", 766 __func__, j); 767 return -EINVAL; 768 } 769 770 if (fmt->fmt.pix_mp.plane_fmt[i].sizeimage < size) { 771 dev_dbg(dev, "%s: size mismatch on output\n", 772 __func__); 773 return -EINVAL; 774 } 775 } 776 } 777 778 return 0; 779 } 780 781 static int pispbe_node_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, 782 unsigned int *nplanes, unsigned int sizes[], 783 struct device *alloc_devs[]) 784 { 785 struct pispbe_node *node = vb2_get_drv_priv(q); 786 struct pispbe_dev *pispbe = node->pispbe; 787 unsigned int num_planes = NODE_IS_MPLANE(node) ? 788 node->format.fmt.pix_mp.num_planes : 1; 789 790 if (*nplanes) { 791 if (*nplanes != num_planes) 792 return -EINVAL; 793 794 for (unsigned int i = 0; i < *nplanes; i++) { 795 unsigned int size = NODE_IS_MPLANE(node) ? 796 node->format.fmt.pix_mp.plane_fmt[i].sizeimage : 797 node->format.fmt.meta.buffersize; 798 799 if (sizes[i] < size) 800 return -EINVAL; 801 } 802 803 return 0; 804 } 805 806 *nplanes = num_planes; 807 for (unsigned int i = 0; i < *nplanes; i++) { 808 unsigned int size = NODE_IS_MPLANE(node) ? 809 node->format.fmt.pix_mp.plane_fmt[i].sizeimage : 810 node->format.fmt.meta.buffersize; 811 sizes[i] = size; 812 } 813 814 dev_dbg(pispbe->dev, 815 "Image (or metadata) size %u, nbuffers %u for node %s\n", 816 sizes[0], *nbuffers, NODE_NAME(node)); 817 818 return 0; 819 } 820 821 static int pispbe_node_buffer_prepare(struct vb2_buffer *vb) 822 { 823 struct pispbe_node *node = vb2_get_drv_priv(vb->vb2_queue); 824 struct pispbe_dev *pispbe = node->pispbe; 825 unsigned int num_planes = NODE_IS_MPLANE(node) ? 826 node->format.fmt.pix_mp.num_planes : 1; 827 828 for (unsigned int i = 0; i < num_planes; i++) { 829 unsigned long size = NODE_IS_MPLANE(node) ? 830 node->format.fmt.pix_mp.plane_fmt[i].sizeimage : 831 node->format.fmt.meta.buffersize; 832 833 if (vb2_plane_size(vb, i) < size) { 834 dev_dbg(pispbe->dev, 835 "data will not fit into plane %d (%lu < %lu)\n", 836 i, vb2_plane_size(vb, i), size); 837 return -EINVAL; 838 } 839 840 vb2_set_plane_payload(vb, i, size); 841 } 842 843 if (node->id == CONFIG_NODE) { 844 void *dst = &node->pispbe->config[vb->index]; 845 void *src = vb2_plane_vaddr(vb, 0); 846 847 memcpy(dst, src, sizeof(struct pisp_be_tiles_config)); 848 849 return pisp_be_validate_config(pispbe, dst); 850 } 851 852 return 0; 853 } 854 855 static void pispbe_node_buffer_queue(struct vb2_buffer *buf) 856 { 857 struct vb2_v4l2_buffer *vbuf = 858 container_of(buf, struct vb2_v4l2_buffer, vb2_buf); 859 struct pispbe_buffer *buffer = 860 container_of(vbuf, struct pispbe_buffer, vb); 861 struct pispbe_node *node = vb2_get_drv_priv(buf->vb2_queue); 862 struct pispbe_dev *pispbe = node->pispbe; 863 unsigned long flags; 864 865 dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node)); 866 spin_lock_irqsave(&node->ready_lock, flags); 867 list_add_tail(&buffer->ready_list, &node->ready_queue); 868 spin_unlock_irqrestore(&node->ready_lock, flags); 869 870 /* 871 * Every time we add a buffer, check if there's now some work for the hw 872 * to do. 873 */ 874 pispbe_schedule(pispbe, false); 875 } 876 877 static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count) 878 { 879 struct pispbe_node *node = vb2_get_drv_priv(q); 880 struct pispbe_dev *pispbe = node->pispbe; 881 struct pispbe_buffer *buf, *tmp; 882 unsigned long flags; 883 int ret; 884 885 ret = pm_runtime_resume_and_get(pispbe->dev); 886 if (ret < 0) 887 goto err_return_buffers; 888 889 spin_lock_irqsave(&pispbe->hw_lock, flags); 890 node->pispbe->streaming_map |= BIT(node->id); 891 node->pispbe->sequence = 0; 892 spin_unlock_irqrestore(&pispbe->hw_lock, flags); 893 894 dev_dbg(pispbe->dev, "%s: for node %s (count %u)\n", 895 __func__, NODE_NAME(node), count); 896 dev_dbg(pispbe->dev, "Nodes streaming now 0x%x\n", 897 node->pispbe->streaming_map); 898 899 /* Maybe we're ready to run. */ 900 pispbe_schedule(pispbe, false); 901 902 return 0; 903 904 err_return_buffers: 905 spin_lock_irqsave(&pispbe->hw_lock, flags); 906 list_for_each_entry_safe(buf, tmp, &node->ready_queue, ready_list) { 907 list_del(&buf->ready_list); 908 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); 909 } 910 spin_unlock_irqrestore(&pispbe->hw_lock, flags); 911 912 return ret; 913 } 914 915 static void pispbe_node_stop_streaming(struct vb2_queue *q) 916 { 917 struct pispbe_node *node = vb2_get_drv_priv(q); 918 struct pispbe_dev *pispbe = node->pispbe; 919 struct pispbe_buffer *buf; 920 unsigned long flags; 921 922 /* 923 * Now this is a bit awkward. In a simple M2M device we could just wait 924 * for all queued jobs to complete, but here there's a risk that a 925 * partial set of buffers was queued and cannot be run. For now, just 926 * cancel all buffers stuck in the "ready queue", then wait for any 927 * running job. 928 * 929 * This may return buffers out of order. 930 */ 931 dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node)); 932 spin_lock_irqsave(&pispbe->hw_lock, flags); 933 do { 934 unsigned long flags1; 935 936 spin_lock_irqsave(&node->ready_lock, flags1); 937 buf = list_first_entry_or_null(&node->ready_queue, 938 struct pispbe_buffer, 939 ready_list); 940 if (buf) { 941 list_del(&buf->ready_list); 942 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 943 } 944 spin_unlock_irqrestore(&node->ready_lock, flags1); 945 } while (buf); 946 spin_unlock_irqrestore(&pispbe->hw_lock, flags); 947 948 vb2_wait_for_all_buffers(&node->queue); 949 950 spin_lock_irqsave(&pispbe->hw_lock, flags); 951 pispbe->streaming_map &= ~BIT(node->id); 952 spin_unlock_irqrestore(&pispbe->hw_lock, flags); 953 954 pm_runtime_mark_last_busy(pispbe->dev); 955 pm_runtime_put_autosuspend(pispbe->dev); 956 957 dev_dbg(pispbe->dev, "Nodes streaming now 0x%x\n", 958 pispbe->streaming_map); 959 } 960 961 static const struct vb2_ops pispbe_node_queue_ops = { 962 .queue_setup = pispbe_node_queue_setup, 963 .buf_prepare = pispbe_node_buffer_prepare, 964 .buf_queue = pispbe_node_buffer_queue, 965 .start_streaming = pispbe_node_start_streaming, 966 .stop_streaming = pispbe_node_stop_streaming, 967 }; 968 969 static const struct v4l2_file_operations pispbe_fops = { 970 .owner = THIS_MODULE, 971 .open = v4l2_fh_open, 972 .release = vb2_fop_release, 973 .poll = vb2_fop_poll, 974 .unlocked_ioctl = video_ioctl2, 975 .mmap = vb2_fop_mmap 976 }; 977 978 static int pispbe_node_querycap(struct file *file, void *priv, 979 struct v4l2_capability *cap) 980 { 981 struct pispbe_node *node = video_drvdata(file); 982 struct pispbe_dev *pispbe = node->pispbe; 983 984 strscpy(cap->driver, PISPBE_NAME, sizeof(cap->driver)); 985 strscpy(cap->card, PISPBE_NAME, sizeof(cap->card)); 986 987 dev_dbg(pispbe->dev, "Caps for node %s: %x and %x (dev %x)\n", 988 NODE_NAME(node), cap->capabilities, cap->device_caps, 989 node->vfd.device_caps); 990 991 return 0; 992 } 993 994 static int pispbe_node_g_fmt_vid_cap(struct file *file, void *priv, 995 struct v4l2_format *f) 996 { 997 struct pispbe_node *node = video_drvdata(file); 998 struct pispbe_dev *pispbe = node->pispbe; 999 1000 if (!NODE_IS_CAPTURE(node) || NODE_IS_META(node)) { 1001 dev_dbg(pispbe->dev, 1002 "Cannot get capture fmt for output node %s\n", 1003 NODE_NAME(node)); 1004 return -EINVAL; 1005 } 1006 1007 *f = node->format; 1008 dev_dbg(pispbe->dev, "Get capture format for node %s\n", 1009 NODE_NAME(node)); 1010 1011 return 0; 1012 } 1013 1014 static int pispbe_node_g_fmt_vid_out(struct file *file, void *priv, 1015 struct v4l2_format *f) 1016 { 1017 struct pispbe_node *node = video_drvdata(file); 1018 struct pispbe_dev *pispbe = node->pispbe; 1019 1020 if (NODE_IS_CAPTURE(node) || NODE_IS_META(node)) { 1021 dev_dbg(pispbe->dev, 1022 "Cannot get capture fmt for output node %s\n", 1023 NODE_NAME(node)); 1024 return -EINVAL; 1025 } 1026 1027 *f = node->format; 1028 dev_dbg(pispbe->dev, "Get output format for node %s\n", 1029 NODE_NAME(node)); 1030 1031 return 0; 1032 } 1033 1034 static int pispbe_node_g_fmt_meta_out(struct file *file, void *priv, 1035 struct v4l2_format *f) 1036 { 1037 struct pispbe_node *node = video_drvdata(file); 1038 struct pispbe_dev *pispbe = node->pispbe; 1039 1040 if (!NODE_IS_META(node) || NODE_IS_CAPTURE(node)) { 1041 dev_dbg(pispbe->dev, 1042 "Cannot get capture fmt for meta output node %s\n", 1043 NODE_NAME(node)); 1044 return -EINVAL; 1045 } 1046 1047 *f = node->format; 1048 dev_dbg(pispbe->dev, "Get output format for meta node %s\n", 1049 NODE_NAME(node)); 1050 1051 return 0; 1052 } 1053 1054 static const struct pisp_be_format *pispbe_find_fmt(unsigned int fourcc) 1055 { 1056 for (unsigned int i = 0; i < ARRAY_SIZE(supported_formats); i++) { 1057 if (supported_formats[i].fourcc == fourcc) 1058 return &supported_formats[i]; 1059 } 1060 1061 return NULL; 1062 } 1063 1064 static void pispbe_set_plane_params(struct v4l2_format *f, 1065 const struct pisp_be_format *fmt) 1066 { 1067 unsigned int nplanes = f->fmt.pix_mp.num_planes; 1068 unsigned int total_plane_factor = 0; 1069 1070 for (unsigned int i = 0; i < PISPBE_MAX_PLANES; i++) 1071 total_plane_factor += fmt->plane_factor[i]; 1072 1073 for (unsigned int i = 0; i < nplanes; i++) { 1074 struct v4l2_plane_pix_format *p = &f->fmt.pix_mp.plane_fmt[i]; 1075 unsigned int bpl, plane_size; 1076 1077 bpl = (f->fmt.pix_mp.width * fmt->bit_depth) >> 3; 1078 bpl = ALIGN(max(p->bytesperline, bpl), fmt->align); 1079 1080 plane_size = bpl * f->fmt.pix_mp.height * 1081 (nplanes > 1 ? fmt->plane_factor[i] : total_plane_factor); 1082 /* 1083 * The shift is to divide out the plane_factor fixed point 1084 * scaling of 8. 1085 */ 1086 plane_size = max(p->sizeimage, plane_size >> 3); 1087 1088 p->bytesperline = bpl; 1089 p->sizeimage = plane_size; 1090 } 1091 } 1092 1093 static void pispbe_try_format(struct v4l2_format *f, struct pispbe_node *node) 1094 { 1095 struct pispbe_dev *pispbe = node->pispbe; 1096 u32 pixfmt = f->fmt.pix_mp.pixelformat; 1097 const struct pisp_be_format *fmt; 1098 bool is_rgb; 1099 1100 dev_dbg(pispbe->dev, 1101 "%s: [%s] req %ux%u %p4cc, planes %d\n", 1102 __func__, NODE_NAME(node), f->fmt.pix_mp.width, 1103 f->fmt.pix_mp.height, &pixfmt, 1104 f->fmt.pix_mp.num_planes); 1105 1106 fmt = pispbe_find_fmt(pixfmt); 1107 if (!fmt) { 1108 dev_dbg(pispbe->dev, 1109 "%s: [%s] Format not found, defaulting to YUV420\n", 1110 __func__, NODE_NAME(node)); 1111 fmt = pispbe_find_fmt(V4L2_PIX_FMT_YUV420); 1112 } 1113 1114 f->fmt.pix_mp.pixelformat = fmt->fourcc; 1115 f->fmt.pix_mp.num_planes = fmt->num_planes; 1116 f->fmt.pix_mp.field = V4L2_FIELD_NONE; 1117 f->fmt.pix_mp.width = max(min(f->fmt.pix_mp.width, 65536u), 1118 PISP_BACK_END_MIN_TILE_WIDTH); 1119 f->fmt.pix_mp.height = max(min(f->fmt.pix_mp.height, 65536u), 1120 PISP_BACK_END_MIN_TILE_HEIGHT); 1121 1122 /* 1123 * Fill in the actual colour space when the requested one was 1124 * not supported. This also catches the case when the "default" 1125 * colour space was requested (as that's never in the mask). 1126 */ 1127 if (!(V4L2_COLORSPACE_MASK(f->fmt.pix_mp.colorspace) & 1128 fmt->colorspace_mask)) 1129 f->fmt.pix_mp.colorspace = fmt->colorspace_default; 1130 1131 /* In all cases, we only support the defaults for these: */ 1132 f->fmt.pix_mp.ycbcr_enc = 1133 V4L2_MAP_YCBCR_ENC_DEFAULT(f->fmt.pix_mp.colorspace); 1134 f->fmt.pix_mp.xfer_func = 1135 V4L2_MAP_XFER_FUNC_DEFAULT(f->fmt.pix_mp.colorspace); 1136 1137 is_rgb = f->fmt.pix_mp.colorspace == V4L2_COLORSPACE_SRGB; 1138 f->fmt.pix_mp.quantization = 1139 V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, f->fmt.pix_mp.colorspace, 1140 f->fmt.pix_mp.ycbcr_enc); 1141 1142 /* Set plane size and bytes/line for each plane. */ 1143 pispbe_set_plane_params(f, fmt); 1144 1145 for (unsigned int i = 0; i < f->fmt.pix_mp.num_planes; i++) { 1146 dev_dbg(pispbe->dev, 1147 "%s: [%s] calc plane %d, %ux%u, depth %u, bpl %u size %u\n", 1148 __func__, NODE_NAME(node), i, f->fmt.pix_mp.width, 1149 f->fmt.pix_mp.height, fmt->bit_depth, 1150 f->fmt.pix_mp.plane_fmt[i].bytesperline, 1151 f->fmt.pix_mp.plane_fmt[i].sizeimage); 1152 } 1153 } 1154 1155 static int pispbe_node_try_fmt_vid_cap(struct file *file, void *priv, 1156 struct v4l2_format *f) 1157 { 1158 struct pispbe_node *node = video_drvdata(file); 1159 struct pispbe_dev *pispbe = node->pispbe; 1160 1161 if (!NODE_IS_CAPTURE(node) || NODE_IS_META(node)) { 1162 dev_dbg(pispbe->dev, 1163 "Cannot set capture fmt for output node %s\n", 1164 NODE_NAME(node)); 1165 return -EINVAL; 1166 } 1167 1168 pispbe_try_format(f, node); 1169 1170 return 0; 1171 } 1172 1173 static int pispbe_node_try_fmt_vid_out(struct file *file, void *priv, 1174 struct v4l2_format *f) 1175 { 1176 struct pispbe_node *node = video_drvdata(file); 1177 struct pispbe_dev *pispbe = node->pispbe; 1178 1179 if (!NODE_IS_OUTPUT(node) || NODE_IS_META(node)) { 1180 dev_dbg(pispbe->dev, 1181 "Cannot set capture fmt for output node %s\n", 1182 NODE_NAME(node)); 1183 return -EINVAL; 1184 } 1185 1186 pispbe_try_format(f, node); 1187 1188 return 0; 1189 } 1190 1191 static int pispbe_node_try_fmt_meta_out(struct file *file, void *priv, 1192 struct v4l2_format *f) 1193 { 1194 struct pispbe_node *node = video_drvdata(file); 1195 struct pispbe_dev *pispbe = node->pispbe; 1196 1197 if (!NODE_IS_META(node) || NODE_IS_CAPTURE(node)) { 1198 dev_dbg(pispbe->dev, 1199 "Cannot set capture fmt for meta output node %s\n", 1200 NODE_NAME(node)); 1201 return -EINVAL; 1202 } 1203 1204 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_BE_CFG; 1205 f->fmt.meta.buffersize = sizeof(struct pisp_be_tiles_config); 1206 1207 return 0; 1208 } 1209 1210 static int pispbe_node_s_fmt_vid_cap(struct file *file, void *priv, 1211 struct v4l2_format *f) 1212 { 1213 struct pispbe_node *node = video_drvdata(file); 1214 struct pispbe_dev *pispbe = node->pispbe; 1215 int ret; 1216 1217 ret = pispbe_node_try_fmt_vid_cap(file, priv, f); 1218 if (ret < 0) 1219 return ret; 1220 1221 if (vb2_is_busy(&node->queue)) 1222 return -EBUSY; 1223 1224 node->format = *f; 1225 node->pisp_format = pispbe_find_fmt(f->fmt.pix_mp.pixelformat); 1226 1227 dev_dbg(pispbe->dev, "Set capture format for node %s to %p4cc\n", 1228 NODE_NAME(node), &f->fmt.pix_mp.pixelformat); 1229 1230 return 0; 1231 } 1232 1233 static int pispbe_node_s_fmt_vid_out(struct file *file, void *priv, 1234 struct v4l2_format *f) 1235 { 1236 struct pispbe_node *node = video_drvdata(file); 1237 struct pispbe_dev *pispbe = node->pispbe; 1238 int ret; 1239 1240 ret = pispbe_node_try_fmt_vid_out(file, priv, f); 1241 if (ret < 0) 1242 return ret; 1243 1244 if (vb2_is_busy(&node->queue)) 1245 return -EBUSY; 1246 1247 node->format = *f; 1248 node->pisp_format = pispbe_find_fmt(f->fmt.pix_mp.pixelformat); 1249 1250 dev_dbg(pispbe->dev, "Set output format for node %s to %p4cc\n", 1251 NODE_NAME(node), &f->fmt.pix_mp.pixelformat); 1252 1253 return 0; 1254 } 1255 1256 static int pispbe_node_s_fmt_meta_out(struct file *file, void *priv, 1257 struct v4l2_format *f) 1258 { 1259 struct pispbe_node *node = video_drvdata(file); 1260 struct pispbe_dev *pispbe = node->pispbe; 1261 int ret; 1262 1263 ret = pispbe_node_try_fmt_meta_out(file, priv, f); 1264 if (ret < 0) 1265 return ret; 1266 1267 if (vb2_is_busy(&node->queue)) 1268 return -EBUSY; 1269 1270 node->format = *f; 1271 node->pisp_format = &meta_out_supported_formats[0]; 1272 1273 dev_dbg(pispbe->dev, "Set output format for meta node %s to %p4cc\n", 1274 NODE_NAME(node), &f->fmt.meta.dataformat); 1275 1276 return 0; 1277 } 1278 1279 static int pispbe_node_enum_fmt(struct file *file, void *priv, 1280 struct v4l2_fmtdesc *f) 1281 { 1282 struct pispbe_node *node = video_drvdata(file); 1283 1284 if (f->type != node->queue.type) 1285 return -EINVAL; 1286 1287 if (NODE_IS_META(node)) { 1288 if (f->index) 1289 return -EINVAL; 1290 1291 f->pixelformat = V4L2_META_FMT_RPI_BE_CFG; 1292 f->flags = 0; 1293 return 0; 1294 } 1295 1296 if (f->index >= ARRAY_SIZE(supported_formats)) 1297 return -EINVAL; 1298 1299 f->pixelformat = supported_formats[f->index].fourcc; 1300 f->flags = 0; 1301 1302 return 0; 1303 } 1304 1305 static int pispbe_enum_framesizes(struct file *file, void *priv, 1306 struct v4l2_frmsizeenum *fsize) 1307 { 1308 struct pispbe_node *node = video_drvdata(file); 1309 struct pispbe_dev *pispbe = node->pispbe; 1310 1311 if (NODE_IS_META(node) || fsize->index) 1312 return -EINVAL; 1313 1314 if (!pispbe_find_fmt(fsize->pixel_format)) { 1315 dev_dbg(pispbe->dev, "Invalid pixel code: %x\n", 1316 fsize->pixel_format); 1317 return -EINVAL; 1318 } 1319 1320 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1321 fsize->stepwise.min_width = 32; 1322 fsize->stepwise.max_width = 65535; 1323 fsize->stepwise.step_width = 2; 1324 1325 fsize->stepwise.min_height = 32; 1326 fsize->stepwise.max_height = 65535; 1327 fsize->stepwise.step_height = 2; 1328 1329 return 0; 1330 } 1331 1332 static const struct v4l2_ioctl_ops pispbe_node_ioctl_ops = { 1333 .vidioc_querycap = pispbe_node_querycap, 1334 .vidioc_g_fmt_vid_cap_mplane = pispbe_node_g_fmt_vid_cap, 1335 .vidioc_g_fmt_vid_out_mplane = pispbe_node_g_fmt_vid_out, 1336 .vidioc_g_fmt_meta_out = pispbe_node_g_fmt_meta_out, 1337 .vidioc_try_fmt_vid_cap_mplane = pispbe_node_try_fmt_vid_cap, 1338 .vidioc_try_fmt_vid_out_mplane = pispbe_node_try_fmt_vid_out, 1339 .vidioc_try_fmt_meta_out = pispbe_node_try_fmt_meta_out, 1340 .vidioc_s_fmt_vid_cap_mplane = pispbe_node_s_fmt_vid_cap, 1341 .vidioc_s_fmt_vid_out_mplane = pispbe_node_s_fmt_vid_out, 1342 .vidioc_s_fmt_meta_out = pispbe_node_s_fmt_meta_out, 1343 .vidioc_enum_fmt_vid_cap = pispbe_node_enum_fmt, 1344 .vidioc_enum_fmt_vid_out = pispbe_node_enum_fmt, 1345 .vidioc_enum_fmt_meta_out = pispbe_node_enum_fmt, 1346 .vidioc_enum_framesizes = pispbe_enum_framesizes, 1347 .vidioc_create_bufs = vb2_ioctl_create_bufs, 1348 .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 1349 .vidioc_querybuf = vb2_ioctl_querybuf, 1350 .vidioc_qbuf = vb2_ioctl_qbuf, 1351 .vidioc_dqbuf = vb2_ioctl_dqbuf, 1352 .vidioc_expbuf = vb2_ioctl_expbuf, 1353 .vidioc_reqbufs = vb2_ioctl_reqbufs, 1354 .vidioc_streamon = vb2_ioctl_streamon, 1355 .vidioc_streamoff = vb2_ioctl_streamoff, 1356 }; 1357 1358 static const struct video_device pispbe_videodev = { 1359 .name = PISPBE_NAME, 1360 .vfl_dir = VFL_DIR_M2M, /* gets overwritten */ 1361 .fops = &pispbe_fops, 1362 .ioctl_ops = &pispbe_node_ioctl_ops, 1363 .minor = -1, 1364 .release = video_device_release_empty, 1365 }; 1366 1367 static void pispbe_node_def_fmt(struct pispbe_node *node) 1368 { 1369 if (NODE_IS_META(node) && NODE_IS_OUTPUT(node)) { 1370 /* Config node */ 1371 struct v4l2_format *f = &node->format; 1372 1373 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_BE_CFG; 1374 f->fmt.meta.buffersize = sizeof(struct pisp_be_tiles_config); 1375 f->type = node->buf_type; 1376 } else { 1377 struct v4l2_format f = { 1378 .fmt.pix_mp.pixelformat = V4L2_PIX_FMT_YUV420, 1379 .fmt.pix_mp.width = 1920, 1380 .fmt.pix_mp.height = 1080, 1381 .type = node->buf_type, 1382 }; 1383 pispbe_try_format(&f, node); 1384 node->format = f; 1385 } 1386 1387 node->pisp_format = pispbe_find_fmt(node->format.fmt.pix_mp.pixelformat); 1388 } 1389 1390 /* 1391 * Initialise a struct pispbe_node and register it as /dev/video<N> 1392 * to represent one of the PiSP Back End's input or output streams. 1393 */ 1394 static int pispbe_init_node(struct pispbe_dev *pispbe, unsigned int id) 1395 { 1396 bool output = NODE_DESC_IS_OUTPUT(&node_desc[id]); 1397 struct pispbe_node *node = &pispbe->node[id]; 1398 struct media_entity *entity = &node->vfd.entity; 1399 struct video_device *vdev = &node->vfd; 1400 struct vb2_queue *q = &node->queue; 1401 int ret; 1402 1403 node->id = id; 1404 node->pispbe = pispbe; 1405 node->buf_type = node_desc[id].buf_type; 1406 1407 mutex_init(&node->node_lock); 1408 mutex_init(&node->queue_lock); 1409 INIT_LIST_HEAD(&node->ready_queue); 1410 spin_lock_init(&node->ready_lock); 1411 1412 node->format.type = node->buf_type; 1413 pispbe_node_def_fmt(node); 1414 1415 q->type = node->buf_type; 1416 q->io_modes = VB2_MMAP | VB2_DMABUF; 1417 q->mem_ops = &vb2_dma_contig_memops; 1418 q->drv_priv = node; 1419 q->ops = &pispbe_node_queue_ops; 1420 q->buf_struct_size = sizeof(struct pispbe_buffer); 1421 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1422 q->dev = pispbe->dev; 1423 /* get V4L2 to handle node->queue locking */ 1424 q->lock = &node->queue_lock; 1425 1426 ret = vb2_queue_init(q); 1427 if (ret < 0) { 1428 dev_err(pispbe->dev, "vb2_queue_init failed\n"); 1429 goto err_mutex_destroy; 1430 } 1431 1432 *vdev = pispbe_videodev; /* default initialization */ 1433 strscpy(vdev->name, node_desc[id].ent_name, sizeof(vdev->name)); 1434 vdev->v4l2_dev = &pispbe->v4l2_dev; 1435 vdev->vfl_dir = output ? VFL_DIR_TX : VFL_DIR_RX; 1436 /* get V4L2 to serialise our ioctls */ 1437 vdev->lock = &node->node_lock; 1438 vdev->queue = &node->queue; 1439 vdev->device_caps = V4L2_CAP_STREAMING | node_desc[id].caps; 1440 1441 node->pad.flags = output ? MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; 1442 ret = media_entity_pads_init(entity, 1, &node->pad); 1443 if (ret) { 1444 dev_err(pispbe->dev, 1445 "Failed to register media pads for %s device node\n", 1446 NODE_NAME(node)); 1447 goto err_unregister_queue; 1448 } 1449 1450 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); 1451 if (ret) { 1452 dev_err(pispbe->dev, 1453 "Failed to register video %s device node\n", 1454 NODE_NAME(node)); 1455 goto err_unregister_queue; 1456 } 1457 video_set_drvdata(vdev, node); 1458 1459 if (output) 1460 ret = media_create_pad_link(entity, 0, &pispbe->sd.entity, 1461 id, MEDIA_LNK_FL_IMMUTABLE | 1462 MEDIA_LNK_FL_ENABLED); 1463 else 1464 ret = media_create_pad_link(&pispbe->sd.entity, id, entity, 1465 0, MEDIA_LNK_FL_IMMUTABLE | 1466 MEDIA_LNK_FL_ENABLED); 1467 if (ret) 1468 goto err_unregister_video_dev; 1469 1470 dev_dbg(pispbe->dev, "%s device node registered as /dev/video%d\n", 1471 NODE_NAME(node), node->vfd.num); 1472 1473 return 0; 1474 1475 err_unregister_video_dev: 1476 video_unregister_device(&node->vfd); 1477 err_unregister_queue: 1478 vb2_queue_release(&node->queue); 1479 err_mutex_destroy: 1480 mutex_destroy(&node->node_lock); 1481 mutex_destroy(&node->queue_lock); 1482 return ret; 1483 } 1484 1485 static const struct v4l2_subdev_pad_ops pispbe_pad_ops = { 1486 .link_validate = v4l2_subdev_link_validate_default, 1487 }; 1488 1489 static const struct v4l2_subdev_ops pispbe_sd_ops = { 1490 .pad = &pispbe_pad_ops, 1491 }; 1492 1493 static int pispbe_init_subdev(struct pispbe_dev *pispbe) 1494 { 1495 struct v4l2_subdev *sd = &pispbe->sd; 1496 int ret; 1497 1498 v4l2_subdev_init(sd, &pispbe_sd_ops); 1499 sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; 1500 sd->owner = THIS_MODULE; 1501 sd->dev = pispbe->dev; 1502 strscpy(sd->name, PISPBE_NAME, sizeof(sd->name)); 1503 1504 for (unsigned int i = 0; i < PISPBE_NUM_NODES; i++) 1505 pispbe->pad[i].flags = 1506 NODE_DESC_IS_OUTPUT(&node_desc[i]) ? 1507 MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; 1508 1509 ret = media_entity_pads_init(&sd->entity, PISPBE_NUM_NODES, 1510 pispbe->pad); 1511 if (ret) 1512 goto error; 1513 1514 ret = v4l2_device_register_subdev(&pispbe->v4l2_dev, sd); 1515 if (ret) 1516 goto error; 1517 1518 return 0; 1519 1520 error: 1521 media_entity_cleanup(&sd->entity); 1522 return ret; 1523 } 1524 1525 static int pispbe_init_devices(struct pispbe_dev *pispbe) 1526 { 1527 struct v4l2_device *v4l2_dev; 1528 struct media_device *mdev; 1529 unsigned int num_regist; 1530 int ret; 1531 1532 /* Register v4l2_device and media_device */ 1533 mdev = &pispbe->mdev; 1534 mdev->hw_revision = pispbe->hw_version; 1535 mdev->dev = pispbe->dev; 1536 strscpy(mdev->model, PISPBE_NAME, sizeof(mdev->model)); 1537 media_device_init(mdev); 1538 1539 v4l2_dev = &pispbe->v4l2_dev; 1540 v4l2_dev->mdev = &pispbe->mdev; 1541 strscpy(v4l2_dev->name, PISPBE_NAME, sizeof(v4l2_dev->name)); 1542 1543 ret = v4l2_device_register(pispbe->dev, v4l2_dev); 1544 if (ret) 1545 goto err_media_dev_cleanup; 1546 1547 /* Register the PISPBE subdevice. */ 1548 ret = pispbe_init_subdev(pispbe); 1549 if (ret) 1550 goto err_unregister_v4l2; 1551 1552 /* Create device video nodes */ 1553 for (num_regist = 0; num_regist < PISPBE_NUM_NODES; num_regist++) { 1554 ret = pispbe_init_node(pispbe, num_regist); 1555 if (ret) 1556 goto err_unregister_nodes; 1557 } 1558 1559 ret = media_device_register(mdev); 1560 if (ret) 1561 goto err_unregister_nodes; 1562 1563 pispbe->config = 1564 dma_alloc_coherent(pispbe->dev, 1565 sizeof(struct pisp_be_tiles_config) * 1566 PISP_BE_NUM_CONFIG_BUFFERS, 1567 &pispbe->config_dma_addr, GFP_KERNEL); 1568 if (!pispbe->config) { 1569 dev_err(pispbe->dev, "Unable to allocate cached config buffers.\n"); 1570 ret = -ENOMEM; 1571 goto err_unregister_mdev; 1572 } 1573 1574 return 0; 1575 1576 err_unregister_mdev: 1577 media_device_unregister(mdev); 1578 err_unregister_nodes: 1579 while (num_regist-- > 0) { 1580 video_unregister_device(&pispbe->node[num_regist].vfd); 1581 vb2_queue_release(&pispbe->node[num_regist].queue); 1582 } 1583 v4l2_device_unregister_subdev(&pispbe->sd); 1584 media_entity_cleanup(&pispbe->sd.entity); 1585 err_unregister_v4l2: 1586 v4l2_device_unregister(v4l2_dev); 1587 err_media_dev_cleanup: 1588 media_device_cleanup(mdev); 1589 return ret; 1590 } 1591 1592 static void pispbe_destroy_devices(struct pispbe_dev *pispbe) 1593 { 1594 if (pispbe->config) { 1595 dma_free_coherent(pispbe->dev, 1596 sizeof(struct pisp_be_tiles_config) * 1597 PISP_BE_NUM_CONFIG_BUFFERS, 1598 pispbe->config, 1599 pispbe->config_dma_addr); 1600 } 1601 1602 dev_dbg(pispbe->dev, "Unregister from media controller\n"); 1603 1604 v4l2_device_unregister_subdev(&pispbe->sd); 1605 media_entity_cleanup(&pispbe->sd.entity); 1606 media_device_unregister(&pispbe->mdev); 1607 1608 for (int i = PISPBE_NUM_NODES - 1; i >= 0; i--) { 1609 video_unregister_device(&pispbe->node[i].vfd); 1610 vb2_queue_release(&pispbe->node[i].queue); 1611 mutex_destroy(&pispbe->node[i].node_lock); 1612 mutex_destroy(&pispbe->node[i].queue_lock); 1613 } 1614 1615 media_device_cleanup(&pispbe->mdev); 1616 v4l2_device_unregister(&pispbe->v4l2_dev); 1617 } 1618 1619 static int pispbe_runtime_suspend(struct device *dev) 1620 { 1621 struct pispbe_dev *pispbe = dev_get_drvdata(dev); 1622 1623 clk_disable_unprepare(pispbe->clk); 1624 1625 return 0; 1626 } 1627 1628 static int pispbe_runtime_resume(struct device *dev) 1629 { 1630 struct pispbe_dev *pispbe = dev_get_drvdata(dev); 1631 int ret; 1632 1633 ret = clk_prepare_enable(pispbe->clk); 1634 if (ret) { 1635 dev_err(dev, "Unable to enable clock\n"); 1636 return ret; 1637 } 1638 1639 dev_dbg(dev, "%s: Enabled clock, rate=%lu\n", 1640 __func__, clk_get_rate(pispbe->clk)); 1641 1642 return 0; 1643 } 1644 1645 static int pispbe_hw_init(struct pispbe_dev *pispbe) 1646 { 1647 u32 u; 1648 1649 /* Check the HW is present and has a known version */ 1650 u = pispbe_rd(pispbe, PISP_BE_VERSION_REG); 1651 dev_dbg(pispbe->dev, "pispbe_probe: HW version: 0x%08x", u); 1652 pispbe->hw_version = u; 1653 if ((u & ~PISP_BE_VERSION_MINOR_BITS) != PISP_BE_VERSION_2712) 1654 return -ENODEV; 1655 1656 /* Clear leftover interrupts */ 1657 pispbe_wr(pispbe, PISP_BE_INTERRUPT_STATUS_REG, 0xFFFFFFFFu); 1658 u = pispbe_rd(pispbe, PISP_BE_BATCH_STATUS_REG); 1659 dev_dbg(pispbe->dev, "pispbe_probe: BatchStatus: 0x%08x", u); 1660 1661 pispbe->done = (uint8_t)u; 1662 pispbe->started = (uint8_t)(u >> 8); 1663 u = pispbe_rd(pispbe, PISP_BE_STATUS_REG); 1664 dev_dbg(pispbe->dev, "pispbe_probe: Status: 0x%08x", u); 1665 1666 if (u != 0 || pispbe->done != pispbe->started) { 1667 dev_err(pispbe->dev, "pispbe_probe: HW is stuck or busy\n"); 1668 return -EBUSY; 1669 } 1670 1671 /* 1672 * AXI QOS=0, CACHE=4'b0010, PROT=3'b011 1673 * Also set "chicken bits" 22:20 which enable sub-64-byte bursts 1674 * and AXI AWID/BID variability (on versions which support this). 1675 */ 1676 pispbe_wr(pispbe, PISP_BE_AXI_REG, 0x32703200u); 1677 1678 /* Enable both interrupt flags */ 1679 pispbe_wr(pispbe, PISP_BE_INTERRUPT_EN_REG, 0x00000003u); 1680 1681 return 0; 1682 } 1683 1684 /* Probe the ISP-BE hardware block, as a single platform device. */ 1685 static int pispbe_probe(struct platform_device *pdev) 1686 { 1687 struct pispbe_dev *pispbe; 1688 int ret; 1689 1690 pispbe = devm_kzalloc(&pdev->dev, sizeof(*pispbe), GFP_KERNEL); 1691 if (!pispbe) 1692 return -ENOMEM; 1693 1694 dev_set_drvdata(&pdev->dev, pispbe); 1695 pispbe->dev = &pdev->dev; 1696 platform_set_drvdata(pdev, pispbe); 1697 1698 pispbe->be_reg_base = devm_platform_ioremap_resource(pdev, 0); 1699 if (IS_ERR(pispbe->be_reg_base)) { 1700 dev_err(&pdev->dev, "Failed to get ISP-BE registers address\n"); 1701 return PTR_ERR(pispbe->be_reg_base); 1702 } 1703 1704 pispbe->irq = platform_get_irq(pdev, 0); 1705 if (pispbe->irq <= 0) 1706 return -EINVAL; 1707 1708 ret = devm_request_irq(&pdev->dev, pispbe->irq, pispbe_isr, 0, 1709 PISPBE_NAME, pispbe); 1710 if (ret) { 1711 dev_err(&pdev->dev, "Unable to request interrupt\n"); 1712 return ret; 1713 } 1714 1715 ret = dma_set_mask_and_coherent(pispbe->dev, DMA_BIT_MASK(36)); 1716 if (ret) 1717 return ret; 1718 1719 pispbe->clk = devm_clk_get(&pdev->dev, NULL); 1720 if (IS_ERR(pispbe->clk)) 1721 return dev_err_probe(&pdev->dev, PTR_ERR(pispbe->clk), 1722 "Failed to get clock"); 1723 1724 /* Hardware initialisation */ 1725 pm_runtime_set_autosuspend_delay(pispbe->dev, 200); 1726 pm_runtime_use_autosuspend(pispbe->dev); 1727 pm_runtime_enable(pispbe->dev); 1728 1729 ret = pispbe_runtime_resume(pispbe->dev); 1730 if (ret) 1731 goto pm_runtime_disable_err; 1732 1733 pispbe->hw_busy = false; 1734 spin_lock_init(&pispbe->hw_lock); 1735 ret = pispbe_hw_init(pispbe); 1736 if (ret) 1737 goto pm_runtime_suspend_err; 1738 1739 ret = pispbe_init_devices(pispbe); 1740 if (ret) 1741 goto disable_devs_err; 1742 1743 pm_runtime_mark_last_busy(pispbe->dev); 1744 pm_runtime_put_autosuspend(pispbe->dev); 1745 1746 return 0; 1747 1748 disable_devs_err: 1749 pispbe_destroy_devices(pispbe); 1750 pm_runtime_suspend_err: 1751 pispbe_runtime_suspend(pispbe->dev); 1752 pm_runtime_disable_err: 1753 pm_runtime_dont_use_autosuspend(pispbe->dev); 1754 pm_runtime_disable(pispbe->dev); 1755 1756 return ret; 1757 } 1758 1759 static void pispbe_remove(struct platform_device *pdev) 1760 { 1761 struct pispbe_dev *pispbe = platform_get_drvdata(pdev); 1762 1763 pispbe_destroy_devices(pispbe); 1764 1765 pispbe_runtime_suspend(pispbe->dev); 1766 pm_runtime_dont_use_autosuspend(pispbe->dev); 1767 pm_runtime_disable(pispbe->dev); 1768 } 1769 1770 static const struct dev_pm_ops pispbe_pm_ops = { 1771 SET_RUNTIME_PM_OPS(pispbe_runtime_suspend, pispbe_runtime_resume, NULL) 1772 }; 1773 1774 static const struct of_device_id pispbe_of_match[] = { 1775 { 1776 .compatible = "raspberrypi,pispbe", 1777 }, 1778 { /* sentinel */ }, 1779 }; 1780 MODULE_DEVICE_TABLE(of, pispbe_of_match); 1781 1782 static struct platform_driver pispbe_pdrv = { 1783 .probe = pispbe_probe, 1784 .remove = pispbe_remove, 1785 .driver = { 1786 .name = PISPBE_NAME, 1787 .of_match_table = pispbe_of_match, 1788 .pm = &pispbe_pm_ops, 1789 }, 1790 }; 1791 1792 module_platform_driver(pispbe_pdrv); 1793 1794 MODULE_DESCRIPTION("PiSP Back End driver"); 1795 MODULE_AUTHOR("David Plowman <david.plowman@raspberrypi.com>"); 1796 MODULE_AUTHOR("Nick Hollinghurst <nick.hollinghurst@raspberrypi.com>"); 1797 MODULE_LICENSE("GPL"); 1798