1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013--2024 Intel Corporation 4 */ 5 6 #include <linux/auxiliary_bus.h> 7 #include <linux/bitfield.h> 8 #include <linux/bits.h> 9 #include <linux/completion.h> 10 #include <linux/container_of.h> 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/firmware.h> 16 #include <linux/io.h> 17 #include <linux/irqreturn.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/pci.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_qos.h> 24 #include <linux/slab.h> 25 #include <linux/spinlock.h> 26 #include <linux/string.h> 27 28 #include <media/ipu-bridge.h> 29 #include <media/media-device.h> 30 #include <media/media-entity.h> 31 #include <media/v4l2-async.h> 32 #include <media/v4l2-device.h> 33 #include <media/v4l2-fwnode.h> 34 35 #include "ipu6-bus.h" 36 #include "ipu6-cpd.h" 37 #include "ipu6-isys.h" 38 #include "ipu6-isys-csi2.h" 39 #include "ipu6-mmu.h" 40 #include "ipu6-platform-buttress-regs.h" 41 #include "ipu6-platform-isys-csi2-reg.h" 42 #include "ipu6-platform-regs.h" 43 44 #define IPU6_BUTTRESS_FABIC_CONTROL 0x68 45 #define GDA_ENABLE_IWAKE_INDEX 2 46 #define GDA_IWAKE_THRESHOLD_INDEX 1 47 #define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0 48 #define GDA_MEMOPEN_THRESHOLD_INDEX 3 49 #define DEFAULT_DID_RATIO 90 50 #define DEFAULT_IWAKE_THRESHOLD 0x42 51 #define DEFAULT_MEM_OPEN_TIME 10 52 #define ONE_THOUSAND_MICROSECOND 1000 53 /* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */ 54 #define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800 55 56 /* LTR & DID value are 10 bit at most */ 57 #define LTR_DID_VAL_MAX 1023 58 #define LTR_DEFAULT_VALUE 0x70503c19 59 #define FILL_TIME_DEFAULT_VALUE 0xfff0783c 60 #define LTR_DID_PKGC_2R 20 61 #define LTR_SCALE_DEFAULT 5 62 #define LTR_SCALE_1024NS 2 63 #define DID_SCALE_1US 2 64 #define DID_SCALE_32US 3 65 #define REG_PKGC_PMON_CFG 0xb00 66 67 #define VAL_PKGC_PMON_CFG_RESET 0x38 68 #define VAL_PKGC_PMON_CFG_START 0x7 69 70 #define IS_PIXEL_BUFFER_PAGES 0x80 71 /* 72 * when iwake mode is disabled, the critical threshold is statically set 73 * to 75% of the IS pixel buffer, criticalThreshold = (128 * 3) / 4 74 */ 75 #define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4) 76 77 union fabric_ctrl { 78 struct { 79 u16 ltr_val : 10; 80 u16 ltr_scale : 3; 81 u16 reserved : 3; 82 u16 did_val : 10; 83 u16 did_scale : 3; 84 u16 reserved2 : 1; 85 u16 keep_power_in_D0 : 1; 86 u16 keep_power_override : 1; 87 } bits; 88 u32 value; 89 }; 90 91 enum ltr_did_type { 92 LTR_IWAKE_ON, 93 LTR_IWAKE_OFF, 94 LTR_ISYS_ON, 95 LTR_ISYS_OFF, 96 LTR_ENHANNCE_IWAKE, 97 LTR_TYPE_MAX 98 }; 99 100 #define ISYS_PM_QOS_VALUE 300 101 102 static int isys_isr_one(struct ipu6_bus_device *adev); 103 104 static int 105 isys_complete_ext_device_registration(struct ipu6_isys *isys, 106 struct v4l2_subdev *sd, 107 struct ipu6_isys_csi2_config *csi2) 108 { 109 struct device *dev = &isys->adev->auxdev.dev; 110 unsigned int i; 111 int ret; 112 113 for (i = 0; i < sd->entity.num_pads; i++) { 114 if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) 115 break; 116 } 117 118 if (i == sd->entity.num_pads) { 119 dev_warn(dev, "no src pad in external entity\n"); 120 ret = -ENOENT; 121 goto unregister_subdev; 122 } 123 124 ret = media_create_pad_link(&sd->entity, i, 125 &isys->csi2[csi2->port].asd.sd.entity, 126 0, MEDIA_LNK_FL_ENABLED | 127 MEDIA_LNK_FL_IMMUTABLE); 128 if (ret) { 129 dev_warn(dev, "can't create link\n"); 130 goto unregister_subdev; 131 } 132 133 isys->csi2[csi2->port].nlanes = csi2->nlanes; 134 135 return 0; 136 137 unregister_subdev: 138 v4l2_device_unregister_subdev(sd); 139 140 return ret; 141 } 142 143 static void isys_stream_init(struct ipu6_isys *isys) 144 { 145 u32 i; 146 147 for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) { 148 mutex_init(&isys->streams[i].mutex); 149 init_completion(&isys->streams[i].stream_open_completion); 150 init_completion(&isys->streams[i].stream_close_completion); 151 init_completion(&isys->streams[i].stream_start_completion); 152 init_completion(&isys->streams[i].stream_stop_completion); 153 INIT_LIST_HEAD(&isys->streams[i].queues); 154 isys->streams[i].isys = isys; 155 isys->streams[i].stream_handle = i; 156 isys->streams[i].vc = INVALID_VC_ID; 157 } 158 } 159 160 static void isys_csi2_unregister_subdevices(struct ipu6_isys *isys) 161 { 162 const struct ipu6_isys_internal_csi2_pdata *csi2 = 163 &isys->pdata->ipdata->csi2; 164 unsigned int i; 165 166 for (i = 0; i < csi2->nports; i++) 167 ipu6_isys_csi2_cleanup(&isys->csi2[i]); 168 } 169 170 static int isys_csi2_register_subdevices(struct ipu6_isys *isys) 171 { 172 const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = 173 &isys->pdata->ipdata->csi2; 174 unsigned int i; 175 int ret; 176 177 for (i = 0; i < csi2_pdata->nports; i++) { 178 ret = ipu6_isys_csi2_init(&isys->csi2[i], isys, 179 isys->pdata->base + 180 CSI_REG_PORT_BASE(i), i); 181 if (ret) 182 goto fail; 183 184 isys->isr_csi2_bits |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); 185 } 186 187 return 0; 188 189 fail: 190 while (i--) 191 ipu6_isys_csi2_cleanup(&isys->csi2[i]); 192 193 return ret; 194 } 195 196 static int isys_csi2_create_media_links(struct ipu6_isys *isys) 197 { 198 const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = 199 &isys->pdata->ipdata->csi2; 200 struct device *dev = &isys->adev->auxdev.dev; 201 unsigned int i, j; 202 int ret; 203 204 for (i = 0; i < csi2_pdata->nports; i++) { 205 struct media_entity *sd = &isys->csi2[i].asd.sd.entity; 206 207 for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { 208 struct ipu6_isys_video *av = &isys->csi2[i].av[j]; 209 210 ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, 211 &av->vdev.entity, 0, 0); 212 if (ret) { 213 dev_err(dev, "CSI2 can't create link\n"); 214 return ret; 215 } 216 217 av->csi2 = &isys->csi2[i]; 218 } 219 } 220 221 return 0; 222 } 223 224 static void isys_unregister_video_devices(struct ipu6_isys *isys) 225 { 226 const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = 227 &isys->pdata->ipdata->csi2; 228 unsigned int i, j; 229 230 for (i = 0; i < csi2_pdata->nports; i++) 231 for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) 232 ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); 233 } 234 235 static int isys_register_video_devices(struct ipu6_isys *isys) 236 { 237 const struct ipu6_isys_internal_csi2_pdata *csi2_pdata = 238 &isys->pdata->ipdata->csi2; 239 unsigned int i, j; 240 int ret; 241 242 for (i = 0; i < csi2_pdata->nports; i++) { 243 for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { 244 struct ipu6_isys_video *av = &isys->csi2[i].av[j]; 245 246 snprintf(av->vdev.name, sizeof(av->vdev.name), 247 IPU6_ISYS_ENTITY_PREFIX " ISYS Capture %u", 248 i * NR_OF_CSI2_SRC_PADS + j); 249 av->isys = isys; 250 av->aq.vbq.buf_struct_size = 251 sizeof(struct ipu6_isys_video_buffer); 252 253 ret = ipu6_isys_video_init(av); 254 if (ret) 255 goto fail; 256 } 257 } 258 259 return 0; 260 261 fail: 262 while (i--) { 263 while (j--) 264 ipu6_isys_video_cleanup(&isys->csi2[i].av[j]); 265 j = NR_OF_CSI2_SRC_PADS; 266 } 267 268 return ret; 269 } 270 271 void isys_setup_hw(struct ipu6_isys *isys) 272 { 273 void __iomem *base = isys->pdata->base; 274 const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; 275 u32 irqs = 0; 276 unsigned int i, nports; 277 278 nports = isys->pdata->ipdata->csi2.nports; 279 280 /* Enable irqs for all MIPI ports */ 281 for (i = 0; i < nports; i++) 282 irqs |= IPU6_ISYS_UNISPART_IRQ_CSI2(i); 283 284 writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_edge); 285 writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_lnp); 286 writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_mask); 287 writel(irqs, base + isys->pdata->ipdata->csi2.ctrl0_irq_enable); 288 writel(GENMASK(19, 0), 289 base + isys->pdata->ipdata->csi2.ctrl0_irq_clear); 290 291 irqs = ISYS_UNISPART_IRQS; 292 writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_EDGE); 293 writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); 294 writel(GENMASK(28, 0), base + IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); 295 writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); 296 writel(irqs, base + IPU6_REG_ISYS_UNISPART_IRQ_ENABLE); 297 298 writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); 299 writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); 300 301 /* Write CDC FIFO threshold values for isys */ 302 for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) 303 writel(thd[i], base + IPU6_REG_ISYS_CDC_THRESHOLD(i)); 304 } 305 306 static void ipu6_isys_csi2_isr(struct ipu6_isys_csi2 *csi2) 307 { 308 struct ipu6_isys_stream *stream; 309 unsigned int i; 310 u32 status; 311 int source; 312 313 ipu6_isys_register_errors(csi2); 314 315 status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + 316 CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); 317 318 writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + 319 CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); 320 321 source = csi2->asd.source; 322 for (i = 0; i < NR_OF_CSI2_VC; i++) { 323 if (status & IPU_CSI_RX_IRQ_FS_VC(i)) { 324 stream = ipu6_isys_query_stream_by_source(csi2->isys, 325 source, i); 326 if (stream) { 327 ipu6_isys_csi2_sof_event_by_stream(stream); 328 ipu6_isys_put_stream(stream); 329 } 330 } 331 332 if (status & IPU_CSI_RX_IRQ_FE_VC(i)) { 333 stream = ipu6_isys_query_stream_by_source(csi2->isys, 334 source, i); 335 if (stream) { 336 ipu6_isys_csi2_eof_event_by_stream(stream); 337 ipu6_isys_put_stream(stream); 338 } 339 } 340 } 341 } 342 343 irqreturn_t isys_isr(struct ipu6_bus_device *adev) 344 { 345 struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); 346 void __iomem *base = isys->pdata->base; 347 u32 status_sw, status_csi; 348 u32 ctrl0_status, ctrl0_clear; 349 350 spin_lock(&isys->power_lock); 351 if (!isys->power) { 352 spin_unlock(&isys->power_lock); 353 return IRQ_NONE; 354 } 355 356 ctrl0_status = isys->pdata->ipdata->csi2.ctrl0_irq_status; 357 ctrl0_clear = isys->pdata->ipdata->csi2.ctrl0_irq_clear; 358 359 status_csi = readl(isys->pdata->base + ctrl0_status); 360 status_sw = readl(isys->pdata->base + 361 IPU6_REG_ISYS_UNISPART_IRQ_STATUS); 362 363 writel(ISYS_UNISPART_IRQS & ~IPU6_ISYS_UNISPART_IRQ_SW, 364 base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); 365 366 do { 367 writel(status_csi, isys->pdata->base + ctrl0_clear); 368 369 writel(status_sw, isys->pdata->base + 370 IPU6_REG_ISYS_UNISPART_IRQ_CLEAR); 371 372 if (isys->isr_csi2_bits & status_csi) { 373 unsigned int i; 374 375 for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { 376 /* irq from not enabled port */ 377 if (!isys->csi2[i].base) 378 continue; 379 if (status_csi & IPU6_ISYS_UNISPART_IRQ_CSI2(i)) 380 ipu6_isys_csi2_isr(&isys->csi2[i]); 381 } 382 } 383 384 writel(0, base + IPU6_REG_ISYS_UNISPART_SW_IRQ_REG); 385 386 if (!isys_isr_one(adev)) 387 status_sw = IPU6_ISYS_UNISPART_IRQ_SW; 388 else 389 status_sw = 0; 390 391 status_csi = readl(isys->pdata->base + ctrl0_status); 392 status_sw |= readl(isys->pdata->base + 393 IPU6_REG_ISYS_UNISPART_IRQ_STATUS); 394 } while ((status_csi & isys->isr_csi2_bits) || 395 (status_sw & IPU6_ISYS_UNISPART_IRQ_SW)); 396 397 writel(ISYS_UNISPART_IRQS, base + IPU6_REG_ISYS_UNISPART_IRQ_MASK); 398 399 spin_unlock(&isys->power_lock); 400 401 return IRQ_HANDLED; 402 } 403 404 static void get_lut_ltrdid(struct ipu6_isys *isys, struct ltr_did *pltr_did) 405 { 406 struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; 407 struct ltr_did ltrdid_default; 408 409 ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE; 410 ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE; 411 412 if (iwake_watermark->ltrdid.lut_ltr.value) 413 *pltr_did = iwake_watermark->ltrdid; 414 else 415 *pltr_did = ltrdid_default; 416 } 417 418 static int set_iwake_register(struct ipu6_isys *isys, u32 index, u32 value) 419 { 420 struct device *dev = &isys->adev->auxdev.dev; 421 u32 req_id = index; 422 u32 offset = 0; 423 int ret; 424 425 ret = ipu6_fw_isys_send_proxy_token(isys, req_id, index, offset, value); 426 if (ret) 427 dev_err(dev, "write %d failed %d", index, ret); 428 429 return ret; 430 } 431 432 /* 433 * When input system is powered up and before enabling any new sensor capture, 434 * or after disabling any sensor capture the following values need to be set: 435 * LTR_value = LTR(usec) from calculation; 436 * LTR_scale = 2; 437 * DID_value = DID(usec) from calculation; 438 * DID_scale = 2; 439 * 440 * When input system is powered down, the LTR and DID values 441 * must be returned to the default values: 442 * LTR_value = 1023; 443 * LTR_scale = 5; 444 * DID_value = 1023; 445 * DID_scale = 2; 446 */ 447 static void set_iwake_ltrdid(struct ipu6_isys *isys, u16 ltr, u16 did, 448 enum ltr_did_type use) 449 { 450 struct device *dev = &isys->adev->auxdev.dev; 451 u16 ltr_val, ltr_scale = LTR_SCALE_1024NS; 452 u16 did_val, did_scale = DID_SCALE_1US; 453 struct ipu6_device *isp = isys->adev->isp; 454 union fabric_ctrl fc; 455 456 switch (use) { 457 case LTR_IWAKE_ON: 458 ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX); 459 did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX); 460 ltr_scale = (ltr == LTR_DID_VAL_MAX && 461 did == LTR_DID_VAL_MAX) ? 462 LTR_SCALE_DEFAULT : LTR_SCALE_1024NS; 463 break; 464 case LTR_ISYS_ON: 465 case LTR_IWAKE_OFF: 466 ltr_val = LTR_DID_PKGC_2R; 467 did_val = LTR_DID_PKGC_2R; 468 break; 469 case LTR_ISYS_OFF: 470 ltr_val = LTR_DID_VAL_MAX; 471 did_val = LTR_DID_VAL_MAX; 472 ltr_scale = LTR_SCALE_DEFAULT; 473 break; 474 case LTR_ENHANNCE_IWAKE: 475 if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) { 476 ltr_val = LTR_DID_VAL_MAX; 477 did_val = LTR_DID_VAL_MAX; 478 ltr_scale = LTR_SCALE_DEFAULT; 479 } else if (did < ONE_THOUSAND_MICROSECOND) { 480 ltr_val = ltr; 481 did_val = did; 482 } else { 483 ltr_val = ltr; 484 /* div 90% value by 32 to account for scale change */ 485 did_val = did / 32; 486 did_scale = DID_SCALE_32US; 487 } 488 break; 489 default: 490 ltr_val = LTR_DID_VAL_MAX; 491 did_val = LTR_DID_VAL_MAX; 492 ltr_scale = LTR_SCALE_DEFAULT; 493 break; 494 } 495 496 fc.value = readl(isp->base + IPU6_BUTTRESS_FABIC_CONTROL); 497 fc.bits.ltr_val = ltr_val; 498 fc.bits.ltr_scale = ltr_scale; 499 fc.bits.did_val = did_val; 500 fc.bits.did_scale = did_scale; 501 502 dev_dbg(dev, "ltr: value %u scale %u, did: value %u scale %u\n", 503 ltr_val, ltr_scale, did_val, did_scale); 504 writel(fc.value, isp->base + IPU6_BUTTRESS_FABIC_CONTROL); 505 } 506 507 /* 508 * Driver may clear register GDA_ENABLE_IWAKE before FW configures the 509 * stream for debug purpose. Otherwise driver should not access this register. 510 */ 511 static void enable_iwake(struct ipu6_isys *isys, bool enable) 512 { 513 struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; 514 int ret; 515 516 mutex_lock(&iwake_watermark->mutex); 517 518 if (iwake_watermark->iwake_enabled == enable) { 519 mutex_unlock(&iwake_watermark->mutex); 520 return; 521 } 522 523 ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable); 524 if (!ret) 525 iwake_watermark->iwake_enabled = enable; 526 527 mutex_unlock(&iwake_watermark->mutex); 528 } 529 530 void update_watermark_setting(struct ipu6_isys *isys) 531 { 532 struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; 533 u32 iwake_threshold, iwake_critical_threshold, page_num; 534 struct device *dev = &isys->adev->auxdev.dev; 535 u32 calc_fill_time_us = 0, ltr = 0, did = 0; 536 struct video_stream_watermark *p_watermark; 537 enum ltr_did_type ltr_did_type; 538 struct list_head *stream_node; 539 u64 isys_pb_datarate_mbs = 0; 540 u32 mem_open_threshold = 0; 541 struct ltr_did ltrdid; 542 u64 threshold_bytes; 543 u32 max_sram_size; 544 u32 shift; 545 546 shift = isys->pdata->ipdata->sram_gran_shift; 547 max_sram_size = isys->pdata->ipdata->max_sram_size; 548 549 mutex_lock(&iwake_watermark->mutex); 550 if (iwake_watermark->force_iwake_disable) { 551 set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); 552 set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, 553 CRITICAL_THRESHOLD_IWAKE_DISABLE); 554 goto unlock_exit; 555 } 556 557 if (list_empty(&iwake_watermark->video_list)) { 558 isys_pb_datarate_mbs = 0; 559 } else { 560 list_for_each(stream_node, &iwake_watermark->video_list) { 561 p_watermark = list_entry(stream_node, 562 struct video_stream_watermark, 563 stream_node); 564 isys_pb_datarate_mbs += p_watermark->stream_data_rate; 565 } 566 } 567 mutex_unlock(&iwake_watermark->mutex); 568 569 if (!isys_pb_datarate_mbs) { 570 enable_iwake(isys, false); 571 set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); 572 mutex_lock(&iwake_watermark->mutex); 573 set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, 574 CRITICAL_THRESHOLD_IWAKE_DISABLE); 575 goto unlock_exit; 576 } 577 578 enable_iwake(isys, true); 579 calc_fill_time_us = max_sram_size / isys_pb_datarate_mbs; 580 581 if (isys->pdata->ipdata->enhanced_iwake) { 582 ltr = isys->pdata->ipdata->ltr; 583 did = calc_fill_time_us * DEFAULT_DID_RATIO / 100; 584 ltr_did_type = LTR_ENHANNCE_IWAKE; 585 } else { 586 get_lut_ltrdid(isys, <rdid); 587 588 if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0) 589 ltr = 0; 590 else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1) 591 ltr = ltrdid.lut_ltr.bits.val0; 592 else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2) 593 ltr = ltrdid.lut_ltr.bits.val1; 594 else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3) 595 ltr = ltrdid.lut_ltr.bits.val2; 596 else 597 ltr = ltrdid.lut_ltr.bits.val3; 598 599 did = calc_fill_time_us - ltr; 600 ltr_did_type = LTR_IWAKE_ON; 601 } 602 603 set_iwake_ltrdid(isys, ltr, did, ltr_did_type); 604 605 /* calculate iwake threshold with 2KB granularity pages */ 606 threshold_bytes = did * isys_pb_datarate_mbs; 607 iwake_threshold = max_t(u32, 1, threshold_bytes >> shift); 608 iwake_threshold = min_t(u32, iwake_threshold, max_sram_size); 609 610 mutex_lock(&iwake_watermark->mutex); 611 if (isys->pdata->ipdata->enhanced_iwake) { 612 set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, 613 DEFAULT_IWAKE_THRESHOLD); 614 /* calculate number of pages that will be filled in 10 usec */ 615 page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) / 616 ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE; 617 page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) % 618 ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0; 619 mem_open_threshold = isys->pdata->ipdata->memopen_threshold; 620 mem_open_threshold = max_t(u32, mem_open_threshold, page_num); 621 dev_dbg(dev, "mem_open_threshold: %u\n", mem_open_threshold); 622 set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX, 623 mem_open_threshold); 624 } else { 625 set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, 626 iwake_threshold); 627 } 628 629 iwake_critical_threshold = iwake_threshold + 630 (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2; 631 632 dev_dbg(dev, "threshold: %u critical: %u\n", iwake_threshold, 633 iwake_critical_threshold); 634 635 set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, 636 iwake_critical_threshold); 637 638 writel(VAL_PKGC_PMON_CFG_RESET, 639 isys->adev->isp->base + REG_PKGC_PMON_CFG); 640 writel(VAL_PKGC_PMON_CFG_START, 641 isys->adev->isp->base + REG_PKGC_PMON_CFG); 642 unlock_exit: 643 mutex_unlock(&iwake_watermark->mutex); 644 } 645 646 static void isys_iwake_watermark_init(struct ipu6_isys *isys) 647 { 648 struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; 649 650 INIT_LIST_HEAD(&iwake_watermark->video_list); 651 mutex_init(&iwake_watermark->mutex); 652 653 iwake_watermark->ltrdid.lut_ltr.value = 0; 654 iwake_watermark->isys = isys; 655 iwake_watermark->iwake_enabled = false; 656 iwake_watermark->force_iwake_disable = false; 657 } 658 659 static void isys_iwake_watermark_cleanup(struct ipu6_isys *isys) 660 { 661 struct isys_iwake_watermark *iwake_watermark = &isys->iwake_watermark; 662 663 mutex_lock(&iwake_watermark->mutex); 664 list_del(&iwake_watermark->video_list); 665 mutex_unlock(&iwake_watermark->mutex); 666 667 mutex_destroy(&iwake_watermark->mutex); 668 } 669 670 /* The .bound() notifier callback when a match is found */ 671 static int isys_notifier_bound(struct v4l2_async_notifier *notifier, 672 struct v4l2_subdev *sd, 673 struct v4l2_async_connection *asc) 674 { 675 struct ipu6_isys *isys = 676 container_of(notifier, struct ipu6_isys, notifier); 677 struct sensor_async_sd *s_asd = 678 container_of(asc, struct sensor_async_sd, asc); 679 int ret; 680 681 ret = ipu_bridge_instantiate_vcm(sd->dev); 682 if (ret) { 683 dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); 684 return ret; 685 } 686 687 dev_dbg(&isys->adev->auxdev.dev, "bind %s nlanes is %d port is %d\n", 688 sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); 689 ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); 690 if (ret) 691 return ret; 692 693 return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); 694 } 695 696 static int isys_notifier_complete(struct v4l2_async_notifier *notifier) 697 { 698 struct ipu6_isys *isys = 699 container_of(notifier, struct ipu6_isys, notifier); 700 701 return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); 702 } 703 704 static const struct v4l2_async_notifier_operations isys_async_ops = { 705 .bound = isys_notifier_bound, 706 .complete = isys_notifier_complete, 707 }; 708 709 #define ISYS_MAX_PORTS 8 710 static int isys_notifier_init(struct ipu6_isys *isys) 711 { 712 struct ipu6_device *isp = isys->adev->isp; 713 struct device *dev = &isp->pdev->dev; 714 unsigned int i; 715 int ret; 716 717 v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); 718 719 for (i = 0; i < ISYS_MAX_PORTS; i++) { 720 struct v4l2_fwnode_endpoint vep = { 721 .bus_type = V4L2_MBUS_CSI2_DPHY 722 }; 723 struct sensor_async_sd *s_asd; 724 struct fwnode_handle *ep; 725 726 ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, 727 FWNODE_GRAPH_ENDPOINT_NEXT); 728 if (!ep) 729 continue; 730 731 ret = v4l2_fwnode_endpoint_parse(ep, &vep); 732 if (ret) { 733 dev_err(dev, "fwnode endpoint parse failed: %d\n", ret); 734 goto err_parse; 735 } 736 737 s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, 738 struct sensor_async_sd); 739 if (IS_ERR(s_asd)) { 740 ret = PTR_ERR(s_asd); 741 dev_err(dev, "add remove fwnode failed: %d\n", ret); 742 goto err_parse; 743 } 744 745 s_asd->csi2.port = vep.base.port; 746 s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; 747 748 dev_dbg(dev, "remote endpoint port %d with %d lanes added\n", 749 s_asd->csi2.port, s_asd->csi2.nlanes); 750 751 fwnode_handle_put(ep); 752 753 continue; 754 755 err_parse: 756 fwnode_handle_put(ep); 757 return ret; 758 } 759 760 isys->notifier.ops = &isys_async_ops; 761 ret = v4l2_async_nf_register(&isys->notifier); 762 if (ret) { 763 dev_err(dev, "failed to register async notifier : %d\n", ret); 764 v4l2_async_nf_cleanup(&isys->notifier); 765 } 766 767 return ret; 768 } 769 770 static void isys_notifier_cleanup(struct ipu6_isys *isys) 771 { 772 v4l2_async_nf_unregister(&isys->notifier); 773 v4l2_async_nf_cleanup(&isys->notifier); 774 } 775 776 static int isys_register_devices(struct ipu6_isys *isys) 777 { 778 struct device *dev = &isys->adev->auxdev.dev; 779 struct pci_dev *pdev = isys->adev->isp->pdev; 780 int ret; 781 782 isys->media_dev.dev = dev; 783 media_device_pci_init(&isys->media_dev, 784 pdev, IPU6_MEDIA_DEV_MODEL_NAME); 785 786 strscpy(isys->v4l2_dev.name, isys->media_dev.model, 787 sizeof(isys->v4l2_dev.name)); 788 789 ret = media_device_register(&isys->media_dev); 790 if (ret < 0) 791 goto out_media_device_unregister; 792 793 isys->v4l2_dev.mdev = &isys->media_dev; 794 isys->v4l2_dev.ctrl_handler = NULL; 795 796 ret = v4l2_device_register(&pdev->dev, &isys->v4l2_dev); 797 if (ret < 0) 798 goto out_media_device_unregister; 799 800 ret = isys_register_video_devices(isys); 801 if (ret) 802 goto out_v4l2_device_unregister; 803 804 ret = isys_csi2_register_subdevices(isys); 805 if (ret) 806 goto out_isys_unregister_video_device; 807 808 ret = isys_csi2_create_media_links(isys); 809 if (ret) 810 goto out_isys_unregister_subdevices; 811 812 ret = isys_notifier_init(isys); 813 if (ret) 814 goto out_isys_unregister_subdevices; 815 816 return 0; 817 818 out_isys_unregister_subdevices: 819 isys_csi2_unregister_subdevices(isys); 820 821 out_isys_unregister_video_device: 822 isys_unregister_video_devices(isys); 823 824 out_v4l2_device_unregister: 825 v4l2_device_unregister(&isys->v4l2_dev); 826 827 out_media_device_unregister: 828 media_device_unregister(&isys->media_dev); 829 media_device_cleanup(&isys->media_dev); 830 831 dev_err(dev, "failed to register isys devices\n"); 832 833 return ret; 834 } 835 836 static void isys_unregister_devices(struct ipu6_isys *isys) 837 { 838 isys_unregister_video_devices(isys); 839 isys_csi2_unregister_subdevices(isys); 840 v4l2_device_unregister(&isys->v4l2_dev); 841 media_device_unregister(&isys->media_dev); 842 media_device_cleanup(&isys->media_dev); 843 } 844 845 static int isys_runtime_pm_resume(struct device *dev) 846 { 847 struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); 848 struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); 849 struct ipu6_device *isp = adev->isp; 850 unsigned long flags; 851 int ret; 852 853 if (!isys) 854 return 0; 855 856 ret = ipu6_mmu_hw_init(adev->mmu); 857 if (ret) 858 return ret; 859 860 cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); 861 862 ret = ipu6_buttress_start_tsc_sync(isp); 863 if (ret) 864 return ret; 865 866 spin_lock_irqsave(&isys->power_lock, flags); 867 isys->power = 1; 868 spin_unlock_irqrestore(&isys->power_lock, flags); 869 870 isys_setup_hw(isys); 871 872 set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON); 873 874 return 0; 875 } 876 877 static int isys_runtime_pm_suspend(struct device *dev) 878 { 879 struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); 880 struct ipu6_isys *isys; 881 unsigned long flags; 882 883 isys = dev_get_drvdata(dev); 884 if (!isys) 885 return 0; 886 887 spin_lock_irqsave(&isys->power_lock, flags); 888 isys->power = 0; 889 spin_unlock_irqrestore(&isys->power_lock, flags); 890 891 mutex_lock(&isys->mutex); 892 isys->need_reset = false; 893 mutex_unlock(&isys->mutex); 894 895 isys->phy_termcal_val = 0; 896 cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); 897 898 set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF); 899 900 ipu6_mmu_hw_cleanup(adev->mmu); 901 902 return 0; 903 } 904 905 static int isys_suspend(struct device *dev) 906 { 907 struct ipu6_isys *isys = dev_get_drvdata(dev); 908 909 /* If stream is open, refuse to suspend */ 910 if (isys->stream_opened) 911 return -EBUSY; 912 913 return 0; 914 } 915 916 static int isys_resume(struct device *dev) 917 { 918 return 0; 919 } 920 921 static const struct dev_pm_ops isys_pm_ops = { 922 .runtime_suspend = isys_runtime_pm_suspend, 923 .runtime_resume = isys_runtime_pm_resume, 924 .suspend = isys_suspend, 925 .resume = isys_resume, 926 }; 927 928 static void isys_remove(struct auxiliary_device *auxdev) 929 { 930 struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); 931 struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); 932 struct ipu6_device *isp = adev->isp; 933 struct isys_fw_msgs *fwmsg, *safe; 934 unsigned int i; 935 936 list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) 937 dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), 938 fwmsg, fwmsg->dma_addr, 0); 939 940 list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) 941 dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), 942 fwmsg, fwmsg->dma_addr, 0); 943 944 isys_unregister_devices(isys); 945 isys_notifier_cleanup(isys); 946 947 cpu_latency_qos_remove_request(&isys->pm_qos); 948 949 if (!isp->secure_mode) { 950 ipu6_cpd_free_pkg_dir(adev); 951 ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); 952 release_firmware(adev->fw); 953 } 954 955 for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) 956 mutex_destroy(&isys->streams[i].mutex); 957 958 isys_iwake_watermark_cleanup(isys); 959 mutex_destroy(&isys->stream_mutex); 960 mutex_destroy(&isys->mutex); 961 } 962 963 static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) 964 { 965 struct device *dev = &isys->adev->auxdev.dev; 966 struct isys_fw_msgs *addr; 967 dma_addr_t dma_addr; 968 unsigned long flags; 969 unsigned int i; 970 971 for (i = 0; i < amount; i++) { 972 addr = dma_alloc_attrs(dev, sizeof(struct isys_fw_msgs), 973 &dma_addr, GFP_KERNEL, 0); 974 if (!addr) 975 break; 976 addr->dma_addr = dma_addr; 977 978 spin_lock_irqsave(&isys->listlock, flags); 979 list_add(&addr->head, &isys->framebuflist); 980 spin_unlock_irqrestore(&isys->listlock, flags); 981 } 982 983 if (i == amount) 984 return 0; 985 986 spin_lock_irqsave(&isys->listlock, flags); 987 while (!list_empty(&isys->framebuflist)) { 988 addr = list_first_entry(&isys->framebuflist, 989 struct isys_fw_msgs, head); 990 list_del(&addr->head); 991 spin_unlock_irqrestore(&isys->listlock, flags); 992 dma_free_attrs(dev, sizeof(struct isys_fw_msgs), addr, 993 addr->dma_addr, 0); 994 spin_lock_irqsave(&isys->listlock, flags); 995 } 996 spin_unlock_irqrestore(&isys->listlock, flags); 997 998 return -ENOMEM; 999 } 1000 1001 struct isys_fw_msgs *ipu6_get_fw_msg_buf(struct ipu6_isys_stream *stream) 1002 { 1003 struct ipu6_isys *isys = stream->isys; 1004 struct device *dev = &isys->adev->auxdev.dev; 1005 struct isys_fw_msgs *msg; 1006 unsigned long flags; 1007 int ret; 1008 1009 spin_lock_irqsave(&isys->listlock, flags); 1010 if (list_empty(&isys->framebuflist)) { 1011 spin_unlock_irqrestore(&isys->listlock, flags); 1012 dev_dbg(dev, "Frame list empty\n"); 1013 1014 ret = alloc_fw_msg_bufs(isys, 5); 1015 if (ret < 0) 1016 return NULL; 1017 1018 spin_lock_irqsave(&isys->listlock, flags); 1019 if (list_empty(&isys->framebuflist)) { 1020 spin_unlock_irqrestore(&isys->listlock, flags); 1021 dev_err(dev, "Frame list empty\n"); 1022 return NULL; 1023 } 1024 } 1025 msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); 1026 list_move(&msg->head, &isys->framebuflist_fw); 1027 spin_unlock_irqrestore(&isys->listlock, flags); 1028 memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); 1029 1030 return msg; 1031 } 1032 1033 void ipu6_cleanup_fw_msg_bufs(struct ipu6_isys *isys) 1034 { 1035 struct isys_fw_msgs *fwmsg, *fwmsg0; 1036 unsigned long flags; 1037 1038 spin_lock_irqsave(&isys->listlock, flags); 1039 list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) 1040 list_move(&fwmsg->head, &isys->framebuflist); 1041 spin_unlock_irqrestore(&isys->listlock, flags); 1042 } 1043 1044 void ipu6_put_fw_msg_buf(struct ipu6_isys *isys, u64 data) 1045 { 1046 struct isys_fw_msgs *msg; 1047 unsigned long flags; 1048 u64 *ptr = (u64 *)data; 1049 1050 if (!ptr) 1051 return; 1052 1053 spin_lock_irqsave(&isys->listlock, flags); 1054 msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); 1055 list_move(&msg->head, &isys->framebuflist); 1056 spin_unlock_irqrestore(&isys->listlock, flags); 1057 } 1058 1059 static int isys_probe(struct auxiliary_device *auxdev, 1060 const struct auxiliary_device_id *auxdev_id) 1061 { 1062 const struct ipu6_isys_internal_csi2_pdata *csi2_pdata; 1063 struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); 1064 struct ipu6_device *isp = adev->isp; 1065 const struct firmware *fw; 1066 struct ipu6_isys *isys; 1067 unsigned int i; 1068 int ret; 1069 1070 if (!isp->bus_ready_to_probe) 1071 return -EPROBE_DEFER; 1072 1073 isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); 1074 if (!isys) 1075 return -ENOMEM; 1076 1077 adev->auxdrv_data = 1078 (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; 1079 adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); 1080 isys->adev = adev; 1081 isys->pdata = adev->pdata; 1082 csi2_pdata = &isys->pdata->ipdata->csi2; 1083 1084 isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, 1085 sizeof(*isys->csi2), GFP_KERNEL); 1086 if (!isys->csi2) 1087 return -ENOMEM; 1088 1089 ret = ipu6_mmu_hw_init(adev->mmu); 1090 if (ret) 1091 return ret; 1092 1093 /* initial sensor type */ 1094 isys->sensor_type = isys->pdata->ipdata->sensor_type_start; 1095 1096 spin_lock_init(&isys->streams_lock); 1097 spin_lock_init(&isys->power_lock); 1098 isys->power = 0; 1099 isys->phy_termcal_val = 0; 1100 1101 mutex_init(&isys->mutex); 1102 mutex_init(&isys->stream_mutex); 1103 1104 spin_lock_init(&isys->listlock); 1105 INIT_LIST_HEAD(&isys->framebuflist); 1106 INIT_LIST_HEAD(&isys->framebuflist_fw); 1107 1108 isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN; 1109 isys->icache_prefetch = 0; 1110 1111 dev_set_drvdata(&auxdev->dev, isys); 1112 1113 isys_stream_init(isys); 1114 1115 if (!isp->secure_mode) { 1116 fw = isp->cpd_fw; 1117 ret = ipu6_buttress_map_fw_image(adev, fw, &adev->fw_sgt); 1118 if (ret) 1119 goto release_firmware; 1120 1121 ret = ipu6_cpd_create_pkg_dir(adev, isp->cpd_fw->data); 1122 if (ret) 1123 goto remove_shared_buffer; 1124 } 1125 1126 cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); 1127 1128 ret = alloc_fw_msg_bufs(isys, 20); 1129 if (ret < 0) 1130 goto out_remove_pkg_dir_shared_buffer; 1131 1132 isys_iwake_watermark_init(isys); 1133 1134 if (is_ipu6se(adev->isp->hw_ver)) 1135 isys->phy_set_power = ipu6_isys_jsl_phy_set_power; 1136 else if (is_ipu6ep_mtl(adev->isp->hw_ver)) 1137 isys->phy_set_power = ipu6_isys_dwc_phy_set_power; 1138 else 1139 isys->phy_set_power = ipu6_isys_mcd_phy_set_power; 1140 1141 ret = isys_register_devices(isys); 1142 if (ret) 1143 goto out_remove_pkg_dir_shared_buffer; 1144 1145 ipu6_mmu_hw_cleanup(adev->mmu); 1146 1147 return 0; 1148 1149 out_remove_pkg_dir_shared_buffer: 1150 if (!isp->secure_mode) 1151 ipu6_cpd_free_pkg_dir(adev); 1152 remove_shared_buffer: 1153 if (!isp->secure_mode) 1154 ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); 1155 release_firmware: 1156 if (!isp->secure_mode) 1157 release_firmware(adev->fw); 1158 1159 for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) 1160 mutex_destroy(&isys->streams[i].mutex); 1161 1162 mutex_destroy(&isys->mutex); 1163 mutex_destroy(&isys->stream_mutex); 1164 1165 ipu6_mmu_hw_cleanup(adev->mmu); 1166 1167 return ret; 1168 } 1169 1170 struct fwmsg { 1171 int type; 1172 char *msg; 1173 bool valid_ts; 1174 }; 1175 1176 static const struct fwmsg fw_msg[] = { 1177 {IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, 1178 {IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, 1179 {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, 1180 {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, 1181 "STREAM_START_AND_CAPTURE_ACK", 0}, 1182 {IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, 1183 {IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, 1184 {IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, 1185 {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, 1186 {IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, 1187 "STREAM_START_AND_CAPTURE_DONE", 1}, 1188 {IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, 1189 {IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, 1190 {IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, 1191 {IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, 1192 {-1, "UNKNOWN MESSAGE", 0} 1193 }; 1194 1195 static u32 resp_type_to_index(int type) 1196 { 1197 unsigned int i; 1198 1199 for (i = 0; i < ARRAY_SIZE(fw_msg); i++) 1200 if (fw_msg[i].type == type) 1201 return i; 1202 1203 return ARRAY_SIZE(fw_msg) - 1; 1204 } 1205 1206 static int isys_isr_one(struct ipu6_bus_device *adev) 1207 { 1208 struct ipu6_isys *isys = ipu6_bus_get_drvdata(adev); 1209 struct ipu6_fw_isys_resp_info_abi *resp; 1210 struct ipu6_isys_stream *stream; 1211 struct ipu6_isys_csi2 *csi2 = NULL; 1212 u32 index; 1213 u64 ts; 1214 1215 if (!isys->fwcom) 1216 return 1; 1217 1218 resp = ipu6_fw_isys_get_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); 1219 if (!resp) 1220 return 1; 1221 1222 ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; 1223 1224 index = resp_type_to_index(resp->type); 1225 dev_dbg(&adev->auxdev.dev, 1226 "FW resp %02d %s, stream %u, ts 0x%16.16llx, pin %d\n", 1227 resp->type, fw_msg[index].msg, resp->stream_handle, 1228 fw_msg[index].valid_ts ? ts : 0, resp->pin_id); 1229 1230 if (resp->error_info.error == IPU6_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) 1231 /* Suspension is kind of special case: not enough buffers */ 1232 dev_dbg(&adev->auxdev.dev, 1233 "FW error resp SUSPENSION, details %d\n", 1234 resp->error_info.error_details); 1235 else if (resp->error_info.error) 1236 dev_dbg(&adev->auxdev.dev, 1237 "FW error resp error %d, details %d\n", 1238 resp->error_info.error, resp->error_info.error_details); 1239 1240 if (resp->stream_handle >= IPU6_ISYS_MAX_STREAMS) { 1241 dev_err(&adev->auxdev.dev, "bad stream handle %u\n", 1242 resp->stream_handle); 1243 goto leave; 1244 } 1245 1246 stream = ipu6_isys_query_stream_by_handle(isys, resp->stream_handle); 1247 if (!stream) { 1248 dev_err(&adev->auxdev.dev, "stream of stream_handle %u is unused\n", 1249 resp->stream_handle); 1250 goto leave; 1251 } 1252 stream->error = resp->error_info.error; 1253 1254 csi2 = ipu6_isys_subdev_to_csi2(stream->asd); 1255 1256 switch (resp->type) { 1257 case IPU6_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: 1258 complete(&stream->stream_open_completion); 1259 break; 1260 case IPU6_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: 1261 complete(&stream->stream_close_completion); 1262 break; 1263 case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_ACK: 1264 complete(&stream->stream_start_completion); 1265 break; 1266 case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: 1267 complete(&stream->stream_start_completion); 1268 break; 1269 case IPU6_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: 1270 complete(&stream->stream_stop_completion); 1271 break; 1272 case IPU6_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: 1273 complete(&stream->stream_stop_completion); 1274 break; 1275 case IPU6_FW_ISYS_RESP_TYPE_PIN_DATA_READY: 1276 /* 1277 * firmware only release the capture msg until software 1278 * get pin_data_ready event 1279 */ 1280 ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id); 1281 if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS && 1282 stream->output_pins[resp->pin_id].pin_ready) 1283 stream->output_pins[resp->pin_id].pin_ready(stream, 1284 resp); 1285 else 1286 dev_warn(&adev->auxdev.dev, 1287 "%d:No data pin ready handler for pin id %d\n", 1288 resp->stream_handle, resp->pin_id); 1289 if (csi2) 1290 ipu6_isys_csi2_error(csi2); 1291 1292 break; 1293 case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: 1294 break; 1295 case IPU6_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: 1296 case IPU6_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: 1297 break; 1298 case IPU6_FW_ISYS_RESP_TYPE_FRAME_SOF: 1299 1300 ipu6_isys_csi2_sof_event_by_stream(stream); 1301 stream->seq[stream->seq_index].sequence = 1302 atomic_read(&stream->sequence) - 1; 1303 stream->seq[stream->seq_index].timestamp = ts; 1304 dev_dbg(&adev->auxdev.dev, 1305 "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", 1306 resp->stream_handle, 1307 stream->seq[stream->seq_index].sequence, ts); 1308 stream->seq_index = (stream->seq_index + 1) 1309 % IPU6_ISYS_MAX_PARALLEL_SOF; 1310 break; 1311 case IPU6_FW_ISYS_RESP_TYPE_FRAME_EOF: 1312 ipu6_isys_csi2_eof_event_by_stream(stream); 1313 dev_dbg(&adev->auxdev.dev, 1314 "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", 1315 resp->stream_handle, 1316 stream->seq[stream->seq_index].sequence, ts); 1317 break; 1318 case IPU6_FW_ISYS_RESP_TYPE_STATS_DATA_READY: 1319 break; 1320 default: 1321 dev_err(&adev->auxdev.dev, "%d:unknown response type %u\n", 1322 resp->stream_handle, resp->type); 1323 break; 1324 } 1325 1326 ipu6_isys_put_stream(stream); 1327 leave: 1328 ipu6_fw_isys_put_resp(isys->fwcom, IPU6_BASE_MSG_RECV_QUEUES); 1329 return 0; 1330 } 1331 1332 static const struct ipu6_auxdrv_data ipu6_isys_auxdrv_data = { 1333 .isr = isys_isr, 1334 .isr_threaded = NULL, 1335 .wake_isr_thread = false, 1336 }; 1337 1338 static const struct auxiliary_device_id ipu6_isys_id_table[] = { 1339 { 1340 .name = "intel_ipu6.isys", 1341 .driver_data = (kernel_ulong_t)&ipu6_isys_auxdrv_data, 1342 }, 1343 { } 1344 }; 1345 MODULE_DEVICE_TABLE(auxiliary, ipu6_isys_id_table); 1346 1347 static struct auxiliary_driver isys_driver = { 1348 .name = IPU6_ISYS_NAME, 1349 .probe = isys_probe, 1350 .remove = isys_remove, 1351 .id_table = ipu6_isys_id_table, 1352 .driver = { 1353 .pm = &isys_pm_ops, 1354 }, 1355 }; 1356 1357 module_auxiliary_driver(isys_driver); 1358 1359 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>"); 1360 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>"); 1361 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>"); 1362 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>"); 1363 MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>"); 1364 MODULE_LICENSE("GPL"); 1365 MODULE_DESCRIPTION("Intel IPU6 input system driver"); 1366 MODULE_IMPORT_NS(INTEL_IPU6); 1367 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); 1368