1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2018-2020 Intel Corporation 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/module.h> 8 #include <linux/of_graph.h> 9 #include <linux/of_platform.h> 10 #include <linux/of_reserved_mem.h> 11 #include <linux/mfd/syscon.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/regmap.h> 15 16 #include <drm/drm_atomic_helper.h> 17 #include <drm/drm_drv.h> 18 #include <drm/drm_fb_helper.h> 19 #include <drm/drm_gem_cma_helper.h> 20 #include <drm/drm_gem_framebuffer_helper.h> 21 #include <drm/drm_module.h> 22 #include <drm/drm_probe_helper.h> 23 #include <drm/drm_vblank.h> 24 25 #include "kmb_drv.h" 26 #include "kmb_dsi.h" 27 #include "kmb_regs.h" 28 29 static int kmb_display_clk_enable(struct kmb_drm_private *kmb) 30 { 31 int ret = 0; 32 33 ret = clk_prepare_enable(kmb->kmb_clk.clk_lcd); 34 if (ret) { 35 drm_err(&kmb->drm, "Failed to enable LCD clock: %d\n", ret); 36 return ret; 37 } 38 DRM_INFO("SUCCESS : enabled LCD clocks\n"); 39 return 0; 40 } 41 42 static int kmb_initialize_clocks(struct kmb_drm_private *kmb, struct device *dev) 43 { 44 int ret = 0; 45 struct regmap *msscam; 46 47 kmb->kmb_clk.clk_lcd = devm_clk_get(dev, "clk_lcd"); 48 if (IS_ERR(kmb->kmb_clk.clk_lcd)) { 49 drm_err(&kmb->drm, "clk_get() failed clk_lcd\n"); 50 return PTR_ERR(kmb->kmb_clk.clk_lcd); 51 } 52 53 kmb->kmb_clk.clk_pll0 = devm_clk_get(dev, "clk_pll0"); 54 if (IS_ERR(kmb->kmb_clk.clk_pll0)) { 55 drm_err(&kmb->drm, "clk_get() failed clk_pll0 "); 56 return PTR_ERR(kmb->kmb_clk.clk_pll0); 57 } 58 kmb->sys_clk_mhz = clk_get_rate(kmb->kmb_clk.clk_pll0) / 1000000; 59 drm_info(&kmb->drm, "system clk = %d Mhz", kmb->sys_clk_mhz); 60 61 ret = kmb_dsi_clk_init(kmb->kmb_dsi); 62 63 /* Set LCD clock to 200 Mhz */ 64 clk_set_rate(kmb->kmb_clk.clk_lcd, KMB_LCD_DEFAULT_CLK); 65 if (clk_get_rate(kmb->kmb_clk.clk_lcd) != KMB_LCD_DEFAULT_CLK) { 66 drm_err(&kmb->drm, "failed to set to clk_lcd to %d\n", 67 KMB_LCD_DEFAULT_CLK); 68 return -1; 69 } 70 drm_dbg(&kmb->drm, "clk_lcd = %ld\n", clk_get_rate(kmb->kmb_clk.clk_lcd)); 71 72 ret = kmb_display_clk_enable(kmb); 73 if (ret) 74 return ret; 75 76 msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam"); 77 if (IS_ERR(msscam)) { 78 drm_err(&kmb->drm, "failed to get msscam syscon"); 79 return -1; 80 } 81 82 /* Enable MSS_CAM_CLK_CTRL for MIPI TX and LCD */ 83 regmap_update_bits(msscam, MSS_CAM_CLK_CTRL, 0x1fff, 0x1fff); 84 regmap_update_bits(msscam, MSS_CAM_RSTN_CTRL, 0xffffffff, 0xffffffff); 85 return 0; 86 } 87 88 static void kmb_display_clk_disable(struct kmb_drm_private *kmb) 89 { 90 clk_disable_unprepare(kmb->kmb_clk.clk_lcd); 91 } 92 93 static void __iomem *kmb_map_mmio(struct drm_device *drm, 94 struct platform_device *pdev, 95 char *name) 96 { 97 struct resource *res; 98 void __iomem *mem; 99 100 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 101 if (!res) { 102 drm_err(drm, "failed to get resource for %s", name); 103 return ERR_PTR(-ENOMEM); 104 } 105 mem = devm_ioremap_resource(drm->dev, res); 106 if (IS_ERR(mem)) 107 drm_err(drm, "failed to ioremap %s registers", name); 108 return mem; 109 } 110 111 static int kmb_hw_init(struct drm_device *drm, unsigned long flags) 112 { 113 struct kmb_drm_private *kmb = to_kmb(drm); 114 struct platform_device *pdev = to_platform_device(drm->dev); 115 int irq_lcd; 116 int ret = 0; 117 118 /* Map LCD MMIO registers */ 119 kmb->lcd_mmio = kmb_map_mmio(drm, pdev, "lcd"); 120 if (IS_ERR(kmb->lcd_mmio)) { 121 drm_err(&kmb->drm, "failed to map LCD registers\n"); 122 return -ENOMEM; 123 } 124 125 /* Map MIPI MMIO registers */ 126 ret = kmb_dsi_map_mmio(kmb->kmb_dsi); 127 if (ret) 128 return ret; 129 130 /* Enable display clocks */ 131 kmb_initialize_clocks(kmb, &pdev->dev); 132 133 /* Register irqs here - section 17.3 in databook 134 * lists LCD at 79 and 82 for MIPI under MSS CPU - 135 * firmware has redirected 79 to A53 IRQ 33 136 */ 137 138 /* Allocate LCD interrupt resources */ 139 irq_lcd = platform_get_irq(pdev, 0); 140 if (irq_lcd < 0) { 141 ret = irq_lcd; 142 drm_err(&kmb->drm, "irq_lcd not found"); 143 goto setup_fail; 144 } 145 146 /* Get the optional framebuffer memory resource */ 147 ret = of_reserved_mem_device_init(drm->dev); 148 if (ret && ret != -ENODEV) 149 return ret; 150 151 spin_lock_init(&kmb->irq_lock); 152 153 kmb->irq_lcd = irq_lcd; 154 155 return 0; 156 157 setup_fail: 158 of_reserved_mem_device_release(drm->dev); 159 160 return ret; 161 } 162 163 static const struct drm_mode_config_funcs kmb_mode_config_funcs = { 164 .fb_create = drm_gem_fb_create, 165 .atomic_check = drm_atomic_helper_check, 166 .atomic_commit = drm_atomic_helper_commit, 167 }; 168 169 static int kmb_setup_mode_config(struct drm_device *drm) 170 { 171 int ret; 172 struct kmb_drm_private *kmb = to_kmb(drm); 173 174 ret = drmm_mode_config_init(drm); 175 if (ret) 176 return ret; 177 drm->mode_config.min_width = KMB_FB_MIN_WIDTH; 178 drm->mode_config.min_height = KMB_FB_MIN_HEIGHT; 179 drm->mode_config.max_width = KMB_FB_MAX_WIDTH; 180 drm->mode_config.max_height = KMB_FB_MAX_HEIGHT; 181 drm->mode_config.preferred_depth = 24; 182 drm->mode_config.funcs = &kmb_mode_config_funcs; 183 184 ret = kmb_setup_crtc(drm); 185 if (ret < 0) { 186 drm_err(drm, "failed to create crtc\n"); 187 return ret; 188 } 189 ret = kmb_dsi_encoder_init(drm, kmb->kmb_dsi); 190 /* Set the CRTC's port so that the encoder component can find it */ 191 kmb->crtc.port = of_graph_get_port_by_id(drm->dev->of_node, 0); 192 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 193 if (ret < 0) { 194 drm_err(drm, "failed to initialize vblank\n"); 195 pm_runtime_disable(drm->dev); 196 return ret; 197 } 198 199 drm_mode_config_reset(drm); 200 return 0; 201 } 202 203 static irqreturn_t handle_lcd_irq(struct drm_device *dev) 204 { 205 unsigned long status, val, val1; 206 int plane_id, dma0_state, dma1_state; 207 struct kmb_drm_private *kmb = to_kmb(dev); 208 u32 ctrl = 0; 209 210 status = kmb_read_lcd(kmb, LCD_INT_STATUS); 211 212 spin_lock(&kmb->irq_lock); 213 if (status & LCD_INT_EOF) { 214 kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF); 215 216 /* When disabling/enabling LCD layers, the change takes effect 217 * immediately and does not wait for EOF (end of frame). 218 * When kmb_plane_atomic_disable is called, mark the plane as 219 * disabled but actually disable the plane when EOF irq is 220 * being handled. 221 */ 222 for (plane_id = LAYER_0; 223 plane_id < KMB_MAX_PLANES; plane_id++) { 224 if (kmb->plane_status[plane_id].disable) { 225 kmb_clr_bitmask_lcd(kmb, 226 LCD_LAYERn_DMA_CFG 227 (plane_id), 228 LCD_DMA_LAYER_ENABLE); 229 230 kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, 231 kmb->plane_status[plane_id].ctrl); 232 233 ctrl = kmb_read_lcd(kmb, LCD_CONTROL); 234 if (!(ctrl & (LCD_CTRL_VL1_ENABLE | 235 LCD_CTRL_VL2_ENABLE | 236 LCD_CTRL_GL1_ENABLE | 237 LCD_CTRL_GL2_ENABLE))) { 238 /* If no LCD layers are using DMA, 239 * then disable DMA pipelined AXI read 240 * transactions. 241 */ 242 kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, 243 LCD_CTRL_PIPELINE_DMA); 244 } 245 246 kmb->plane_status[plane_id].disable = false; 247 } 248 } 249 if (kmb->kmb_under_flow) { 250 /* DMA Recovery after underflow */ 251 dma0_state = (kmb->layer_no == 0) ? 252 LCD_VIDEO0_DMA0_STATE : LCD_VIDEO1_DMA0_STATE; 253 dma1_state = (kmb->layer_no == 0) ? 254 LCD_VIDEO0_DMA1_STATE : LCD_VIDEO1_DMA1_STATE; 255 256 do { 257 kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1); 258 val = kmb_read_lcd(kmb, dma0_state) 259 & LCD_DMA_STATE_ACTIVE; 260 val1 = kmb_read_lcd(kmb, dma1_state) 261 & LCD_DMA_STATE_ACTIVE; 262 } while ((val || val1)); 263 /* disable dma */ 264 kmb_clr_bitmask_lcd(kmb, 265 LCD_LAYERn_DMA_CFG(kmb->layer_no), 266 LCD_DMA_LAYER_ENABLE); 267 kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1); 268 kmb->kmb_flush_done = 1; 269 kmb->kmb_under_flow = 0; 270 } 271 } 272 273 if (status & LCD_INT_LINE_CMP) { 274 /* clear line compare interrupt */ 275 kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LINE_CMP); 276 } 277 278 if (status & LCD_INT_VERT_COMP) { 279 /* Read VSTATUS */ 280 val = kmb_read_lcd(kmb, LCD_VSTATUS); 281 val = (val & LCD_VSTATUS_VERTICAL_STATUS_MASK); 282 switch (val) { 283 case LCD_VSTATUS_COMPARE_VSYNC: 284 /* Clear vertical compare interrupt */ 285 kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); 286 if (kmb->kmb_flush_done) { 287 kmb_set_bitmask_lcd(kmb, 288 LCD_LAYERn_DMA_CFG 289 (kmb->layer_no), 290 LCD_DMA_LAYER_ENABLE); 291 kmb->kmb_flush_done = 0; 292 } 293 drm_crtc_handle_vblank(&kmb->crtc); 294 break; 295 case LCD_VSTATUS_COMPARE_BACKPORCH: 296 case LCD_VSTATUS_COMPARE_ACTIVE: 297 case LCD_VSTATUS_COMPARE_FRONT_PORCH: 298 kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); 299 break; 300 } 301 } 302 if (status & LCD_INT_DMA_ERR) { 303 val = 304 (status & LCD_INT_DMA_ERR & 305 kmb_read_lcd(kmb, LCD_INT_ENABLE)); 306 /* LAYER0 - VL0 */ 307 if (val & (LAYER0_DMA_FIFO_UNDERFLOW | 308 LAYER0_DMA_CB_FIFO_UNDERFLOW | 309 LAYER0_DMA_CR_FIFO_UNDERFLOW)) { 310 kmb->kmb_under_flow++; 311 drm_info(&kmb->drm, 312 "!LAYER0:VL0 DMA UNDERFLOW val = 0x%lx,under_flow=%d", 313 val, kmb->kmb_under_flow); 314 /* disable underflow interrupt */ 315 kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, 316 LAYER0_DMA_FIFO_UNDERFLOW | 317 LAYER0_DMA_CB_FIFO_UNDERFLOW | 318 LAYER0_DMA_CR_FIFO_UNDERFLOW); 319 kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 320 LAYER0_DMA_CB_FIFO_UNDERFLOW | 321 LAYER0_DMA_FIFO_UNDERFLOW | 322 LAYER0_DMA_CR_FIFO_UNDERFLOW); 323 /* disable auto restart mode */ 324 kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(0), 325 LCD_DMA_LAYER_CONT_PING_PONG_UPDATE); 326 327 kmb->layer_no = 0; 328 } 329 330 if (val & LAYER0_DMA_FIFO_OVERFLOW) 331 drm_dbg(&kmb->drm, 332 "LAYER0:VL0 DMA OVERFLOW val = 0x%lx", val); 333 if (val & LAYER0_DMA_CB_FIFO_OVERFLOW) 334 drm_dbg(&kmb->drm, 335 "LAYER0:VL0 DMA CB OVERFLOW val = 0x%lx", val); 336 if (val & LAYER0_DMA_CR_FIFO_OVERFLOW) 337 drm_dbg(&kmb->drm, 338 "LAYER0:VL0 DMA CR OVERFLOW val = 0x%lx", val); 339 340 /* LAYER1 - VL1 */ 341 if (val & (LAYER1_DMA_FIFO_UNDERFLOW | 342 LAYER1_DMA_CB_FIFO_UNDERFLOW | 343 LAYER1_DMA_CR_FIFO_UNDERFLOW)) { 344 kmb->kmb_under_flow++; 345 drm_info(&kmb->drm, 346 "!LAYER1:VL1 DMA UNDERFLOW val = 0x%lx, under_flow=%d", 347 val, kmb->kmb_under_flow); 348 /* disable underflow interrupt */ 349 kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, 350 LAYER1_DMA_FIFO_UNDERFLOW | 351 LAYER1_DMA_CB_FIFO_UNDERFLOW | 352 LAYER1_DMA_CR_FIFO_UNDERFLOW); 353 kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 354 LAYER1_DMA_CB_FIFO_UNDERFLOW | 355 LAYER1_DMA_FIFO_UNDERFLOW | 356 LAYER1_DMA_CR_FIFO_UNDERFLOW); 357 /* disable auto restart mode */ 358 kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(1), 359 LCD_DMA_LAYER_CONT_PING_PONG_UPDATE); 360 kmb->layer_no = 1; 361 } 362 363 /* LAYER1 - VL1 */ 364 if (val & LAYER1_DMA_FIFO_OVERFLOW) 365 drm_dbg(&kmb->drm, 366 "LAYER1:VL1 DMA OVERFLOW val = 0x%lx", val); 367 if (val & LAYER1_DMA_CB_FIFO_OVERFLOW) 368 drm_dbg(&kmb->drm, 369 "LAYER1:VL1 DMA CB OVERFLOW val = 0x%lx", val); 370 if (val & LAYER1_DMA_CR_FIFO_OVERFLOW) 371 drm_dbg(&kmb->drm, 372 "LAYER1:VL1 DMA CR OVERFLOW val = 0x%lx", val); 373 374 /* LAYER2 - GL0 */ 375 if (val & LAYER2_DMA_FIFO_UNDERFLOW) 376 drm_dbg(&kmb->drm, 377 "LAYER2:GL0 DMA UNDERFLOW val = 0x%lx", val); 378 if (val & LAYER2_DMA_FIFO_OVERFLOW) 379 drm_dbg(&kmb->drm, 380 "LAYER2:GL0 DMA OVERFLOW val = 0x%lx", val); 381 382 /* LAYER3 - GL1 */ 383 if (val & LAYER3_DMA_FIFO_UNDERFLOW) 384 drm_dbg(&kmb->drm, 385 "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val); 386 if (val & LAYER3_DMA_FIFO_OVERFLOW) 387 drm_dbg(&kmb->drm, 388 "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val); 389 } 390 391 spin_unlock(&kmb->irq_lock); 392 393 if (status & LCD_INT_LAYER) { 394 /* Clear layer interrupts */ 395 kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LAYER); 396 } 397 398 /* Clear all interrupts */ 399 kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 1); 400 return IRQ_HANDLED; 401 } 402 403 /* IRQ handler */ 404 static irqreturn_t kmb_isr(int irq, void *arg) 405 { 406 struct drm_device *dev = (struct drm_device *)arg; 407 408 handle_lcd_irq(dev); 409 return IRQ_HANDLED; 410 } 411 412 static void kmb_irq_reset(struct drm_device *drm) 413 { 414 kmb_write_lcd(to_kmb(drm), LCD_INT_CLEAR, 0xFFFF); 415 kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0); 416 } 417 418 static int kmb_irq_install(struct drm_device *drm, unsigned int irq) 419 { 420 if (irq == IRQ_NOTCONNECTED) 421 return -ENOTCONN; 422 423 kmb_irq_reset(drm); 424 425 return request_irq(irq, kmb_isr, 0, drm->driver->name, drm); 426 } 427 428 static void kmb_irq_uninstall(struct drm_device *drm) 429 { 430 struct kmb_drm_private *kmb = to_kmb(drm); 431 432 kmb_irq_reset(drm); 433 free_irq(kmb->irq_lcd, drm); 434 } 435 436 DEFINE_DRM_GEM_CMA_FOPS(fops); 437 438 static const struct drm_driver kmb_driver = { 439 .driver_features = DRIVER_GEM | 440 DRIVER_MODESET | DRIVER_ATOMIC, 441 /* GEM Operations */ 442 .fops = &fops, 443 DRM_GEM_CMA_DRIVER_OPS_VMAP, 444 .name = "kmb-drm", 445 .desc = "KEEMBAY DISPLAY DRIVER", 446 .date = DRIVER_DATE, 447 .major = DRIVER_MAJOR, 448 .minor = DRIVER_MINOR, 449 }; 450 451 static int kmb_remove(struct platform_device *pdev) 452 { 453 struct device *dev = &pdev->dev; 454 struct drm_device *drm = dev_get_drvdata(dev); 455 struct kmb_drm_private *kmb = to_kmb(drm); 456 457 drm_dev_unregister(drm); 458 drm_kms_helper_poll_fini(drm); 459 of_node_put(kmb->crtc.port); 460 kmb->crtc.port = NULL; 461 pm_runtime_get_sync(drm->dev); 462 kmb_irq_uninstall(drm); 463 pm_runtime_put_sync(drm->dev); 464 pm_runtime_disable(drm->dev); 465 466 of_reserved_mem_device_release(drm->dev); 467 468 /* Release clks */ 469 kmb_display_clk_disable(kmb); 470 471 dev_set_drvdata(dev, NULL); 472 473 /* Unregister DSI host */ 474 kmb_dsi_host_unregister(kmb->kmb_dsi); 475 drm_atomic_helper_shutdown(drm); 476 return 0; 477 } 478 479 static int kmb_probe(struct platform_device *pdev) 480 { 481 struct device *dev = get_device(&pdev->dev); 482 struct kmb_drm_private *kmb; 483 int ret = 0; 484 struct device_node *dsi_in; 485 struct device_node *dsi_node; 486 struct platform_device *dsi_pdev; 487 488 /* The bridge (ADV 7535) will return -EPROBE_DEFER until it 489 * has a mipi_dsi_host to register its device to. So, we 490 * first register the DSI host during probe time, and then return 491 * -EPROBE_DEFER until the bridge is loaded. Probe will be called again 492 * and then the rest of the driver initialization can proceed 493 * afterwards and the bridge can be successfully attached. 494 */ 495 dsi_in = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); 496 if (!dsi_in) { 497 DRM_ERROR("Failed to get dsi_in node info from DT"); 498 return -EINVAL; 499 } 500 dsi_node = of_graph_get_remote_port_parent(dsi_in); 501 if (!dsi_node) { 502 of_node_put(dsi_in); 503 DRM_ERROR("Failed to get dsi node from DT\n"); 504 return -EINVAL; 505 } 506 507 dsi_pdev = of_find_device_by_node(dsi_node); 508 if (!dsi_pdev) { 509 of_node_put(dsi_in); 510 of_node_put(dsi_node); 511 DRM_ERROR("Failed to get dsi platform device\n"); 512 return -EINVAL; 513 } 514 515 of_node_put(dsi_in); 516 of_node_put(dsi_node); 517 ret = kmb_dsi_host_bridge_init(get_device(&dsi_pdev->dev)); 518 519 if (ret == -EPROBE_DEFER) { 520 return -EPROBE_DEFER; 521 } else if (ret) { 522 DRM_ERROR("probe failed to initialize DSI host bridge\n"); 523 return ret; 524 } 525 526 /* Create DRM device */ 527 kmb = devm_drm_dev_alloc(dev, &kmb_driver, 528 struct kmb_drm_private, drm); 529 if (IS_ERR(kmb)) 530 return PTR_ERR(kmb); 531 532 dev_set_drvdata(dev, &kmb->drm); 533 534 /* Initialize MIPI DSI */ 535 kmb->kmb_dsi = kmb_dsi_init(dsi_pdev); 536 if (IS_ERR(kmb->kmb_dsi)) { 537 drm_err(&kmb->drm, "failed to initialize DSI\n"); 538 ret = PTR_ERR(kmb->kmb_dsi); 539 goto err_free1; 540 } 541 542 kmb->kmb_dsi->dev = &dsi_pdev->dev; 543 kmb->kmb_dsi->pdev = dsi_pdev; 544 ret = kmb_hw_init(&kmb->drm, 0); 545 if (ret) 546 goto err_free1; 547 548 ret = kmb_setup_mode_config(&kmb->drm); 549 if (ret) 550 goto err_free; 551 552 ret = kmb_irq_install(&kmb->drm, kmb->irq_lcd); 553 if (ret < 0) { 554 drm_err(&kmb->drm, "failed to install IRQ handler\n"); 555 goto err_irq; 556 } 557 558 drm_kms_helper_poll_init(&kmb->drm); 559 560 /* Register graphics device with the kernel */ 561 ret = drm_dev_register(&kmb->drm, 0); 562 if (ret) 563 goto err_register; 564 565 drm_fbdev_generic_setup(&kmb->drm, 0); 566 567 return 0; 568 569 err_register: 570 drm_kms_helper_poll_fini(&kmb->drm); 571 err_irq: 572 pm_runtime_disable(kmb->drm.dev); 573 err_free: 574 drm_crtc_cleanup(&kmb->crtc); 575 drm_mode_config_cleanup(&kmb->drm); 576 err_free1: 577 dev_set_drvdata(dev, NULL); 578 kmb_dsi_host_unregister(kmb->kmb_dsi); 579 580 return ret; 581 } 582 583 static const struct of_device_id kmb_of_match[] = { 584 {.compatible = "intel,keembay-display"}, 585 {}, 586 }; 587 588 MODULE_DEVICE_TABLE(of, kmb_of_match); 589 590 static int __maybe_unused kmb_pm_suspend(struct device *dev) 591 { 592 struct drm_device *drm = dev_get_drvdata(dev); 593 struct kmb_drm_private *kmb = to_kmb(drm); 594 595 drm_kms_helper_poll_disable(drm); 596 597 kmb->state = drm_atomic_helper_suspend(drm); 598 if (IS_ERR(kmb->state)) { 599 drm_kms_helper_poll_enable(drm); 600 return PTR_ERR(kmb->state); 601 } 602 603 return 0; 604 } 605 606 static int __maybe_unused kmb_pm_resume(struct device *dev) 607 { 608 struct drm_device *drm = dev_get_drvdata(dev); 609 struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL; 610 611 if (!kmb) 612 return 0; 613 614 drm_atomic_helper_resume(drm, kmb->state); 615 drm_kms_helper_poll_enable(drm); 616 617 return 0; 618 } 619 620 static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume); 621 622 static struct platform_driver kmb_platform_driver = { 623 .probe = kmb_probe, 624 .remove = kmb_remove, 625 .driver = { 626 .name = "kmb-drm", 627 .pm = &kmb_pm_ops, 628 .of_match_table = kmb_of_match, 629 }, 630 }; 631 632 drm_module_platform_driver(kmb_platform_driver); 633 634 MODULE_AUTHOR("Intel Corporation"); 635 MODULE_DESCRIPTION("Keembay Display driver"); 636 MODULE_LICENSE("GPL v2"); 637