1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. 4 * Author: Liviu Dudau <Liviu.Dudau@arm.com> 5 * 6 * ARM Mali DP500/DP550/DP650 KMS/DRM driver 7 */ 8 9 #include <linux/module.h> 10 #include <linux/clk.h> 11 #include <linux/component.h> 12 #include <linux/of_device.h> 13 #include <linux/of_graph.h> 14 #include <linux/of_reserved_mem.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/debugfs.h> 18 19 #include <drm/drm_atomic.h> 20 #include <drm/drm_atomic_helper.h> 21 #include <drm/drm_crtc.h> 22 #include <drm/drm_drv.h> 23 #include <drm/drm_fbdev_dma.h> 24 #include <drm/drm_fourcc.h> 25 #include <drm/drm_gem_dma_helper.h> 26 #include <drm/drm_gem_framebuffer_helper.h> 27 #include <drm/drm_managed.h> 28 #include <drm/drm_modeset_helper.h> 29 #include <drm/drm_module.h> 30 #include <drm/drm_of.h> 31 #include <drm/drm_probe_helper.h> 32 #include <drm/drm_vblank.h> 33 34 #include "malidp_drv.h" 35 #include "malidp_mw.h" 36 #include "malidp_regs.h" 37 #include "malidp_hw.h" 38 39 #define MALIDP_CONF_VALID_TIMEOUT 250 40 #define AFBC_HEADER_SIZE 16 41 #define AFBC_SUPERBLK_ALIGNMENT 128 42 43 static void malidp_write_gamma_table(struct malidp_hw_device *hwdev, 44 u32 data[MALIDP_COEFFTAB_NUM_COEFFS]) 45 { 46 int i; 47 /* Update all channels with a single gamma curve. */ 48 const u32 gamma_write_mask = GENMASK(18, 16); 49 /* 50 * Always write an entire table, so the address field in 51 * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask 52 * directly. 53 */ 54 malidp_hw_write(hwdev, gamma_write_mask, 55 hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); 56 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) 57 malidp_hw_write(hwdev, data[i], 58 hwdev->hw->map.coeffs_base + 59 MALIDP_COEF_TABLE_DATA); 60 } 61 62 static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc, 63 struct drm_crtc_state *old_state) 64 { 65 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 66 struct malidp_hw_device *hwdev = malidp->dev; 67 68 if (!crtc->state->color_mgmt_changed) 69 return; 70 71 if (!crtc->state->gamma_lut) { 72 malidp_hw_clearbits(hwdev, 73 MALIDP_DISP_FUNC_GAMMA, 74 MALIDP_DE_DISPLAY_FUNC); 75 } else { 76 struct malidp_crtc_state *mc = 77 to_malidp_crtc_state(crtc->state); 78 79 if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id != 80 old_state->gamma_lut->base.id)) 81 malidp_write_gamma_table(hwdev, mc->gamma_coeffs); 82 83 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA, 84 MALIDP_DE_DISPLAY_FUNC); 85 } 86 } 87 88 static 89 void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc, 90 struct drm_crtc_state *old_state) 91 { 92 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 93 struct malidp_hw_device *hwdev = malidp->dev; 94 int i; 95 96 if (!crtc->state->color_mgmt_changed) 97 return; 98 99 if (!crtc->state->ctm) { 100 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ, 101 MALIDP_DE_DISPLAY_FUNC); 102 } else { 103 struct malidp_crtc_state *mc = 104 to_malidp_crtc_state(crtc->state); 105 106 if (!old_state->ctm || (crtc->state->ctm->base.id != 107 old_state->ctm->base.id)) 108 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) 109 malidp_hw_write(hwdev, 110 mc->coloradj_coeffs[i], 111 hwdev->hw->map.coeffs_base + 112 MALIDP_COLOR_ADJ_COEF + 4 * i); 113 114 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, 115 MALIDP_DE_DISPLAY_FUNC); 116 } 117 } 118 119 static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, 120 struct drm_crtc_state *old_state) 121 { 122 struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state); 123 struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state); 124 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 125 struct malidp_hw_device *hwdev = malidp->dev; 126 struct malidp_se_config *s = &cs->scaler_config; 127 struct malidp_se_config *old_s = &old_cs->scaler_config; 128 u32 se_control = hwdev->hw->map.se_base + 129 ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 130 0x10 : 0xC); 131 u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; 132 u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; 133 u32 val; 134 135 /* Set SE_CONTROL */ 136 if (!s->scale_enable) { 137 val = malidp_hw_read(hwdev, se_control); 138 val &= ~MALIDP_SE_SCALING_EN; 139 malidp_hw_write(hwdev, val, se_control); 140 return; 141 } 142 143 hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s); 144 val = malidp_hw_read(hwdev, se_control); 145 val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; 146 147 val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK); 148 val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0; 149 150 val |= MALIDP_SE_RGBO_IF_EN; 151 malidp_hw_write(hwdev, val, se_control); 152 153 /* Set IN_SIZE & OUT_SIZE. */ 154 val = MALIDP_SE_SET_V_SIZE(s->input_h) | 155 MALIDP_SE_SET_H_SIZE(s->input_w); 156 malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE); 157 val = MALIDP_SE_SET_V_SIZE(s->output_h) | 158 MALIDP_SE_SET_H_SIZE(s->output_w); 159 malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE); 160 161 /* Set phase regs. */ 162 malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH); 163 malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH); 164 malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH); 165 malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH); 166 } 167 168 /* 169 * set the "config valid" bit and wait until the hardware acts on it 170 */ 171 static int malidp_set_and_wait_config_valid(struct drm_device *drm) 172 { 173 struct malidp_drm *malidp = drm_to_malidp(drm); 174 struct malidp_hw_device *hwdev = malidp->dev; 175 int ret; 176 177 hwdev->hw->set_config_valid(hwdev, 1); 178 /* don't wait for config_valid flag if we are in config mode */ 179 if (hwdev->hw->in_config_mode(hwdev)) { 180 atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE); 181 return 0; 182 } 183 184 ret = wait_event_interruptible_timeout(malidp->wq, 185 atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE, 186 msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT)); 187 188 return (ret > 0) ? 0 : -ETIMEDOUT; 189 } 190 191 static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) 192 { 193 struct drm_device *drm = state->dev; 194 struct malidp_drm *malidp = drm_to_malidp(drm); 195 int loop = 5; 196 197 malidp->event = malidp->crtc.state->event; 198 malidp->crtc.state->event = NULL; 199 200 if (malidp->crtc.state->active) { 201 /* 202 * if we have an event to deliver to userspace, make sure 203 * the vblank is enabled as we are sending it from the IRQ 204 * handler. 205 */ 206 if (malidp->event) 207 drm_crtc_vblank_get(&malidp->crtc); 208 209 /* only set config_valid if the CRTC is enabled */ 210 if (malidp_set_and_wait_config_valid(drm) < 0) { 211 /* 212 * make a loop around the second CVAL setting and 213 * try 5 times before giving up. 214 */ 215 while (loop--) { 216 if (!malidp_set_and_wait_config_valid(drm)) 217 break; 218 } 219 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); 220 } 221 222 } else if (malidp->event) { 223 /* CRTC inactive means vblank IRQ is disabled, send event directly */ 224 spin_lock_irq(&drm->event_lock); 225 drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); 226 malidp->event = NULL; 227 spin_unlock_irq(&drm->event_lock); 228 } 229 drm_atomic_helper_commit_hw_done(state); 230 } 231 232 static void malidp_atomic_commit_tail(struct drm_atomic_state *state) 233 { 234 struct drm_device *drm = state->dev; 235 struct malidp_drm *malidp = drm_to_malidp(drm); 236 struct drm_crtc *crtc; 237 struct drm_crtc_state *old_crtc_state; 238 int i; 239 bool fence_cookie = dma_fence_begin_signalling(); 240 241 pm_runtime_get_sync(drm->dev); 242 243 /* 244 * set config_valid to a special value to let IRQ handlers 245 * know that we are updating registers 246 */ 247 atomic_set(&malidp->config_valid, MALIDP_CONFIG_START); 248 malidp->dev->hw->set_config_valid(malidp->dev, 0); 249 250 drm_atomic_helper_commit_modeset_disables(drm, state); 251 252 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 253 malidp_atomic_commit_update_gamma(crtc, old_crtc_state); 254 malidp_atomic_commit_update_coloradj(crtc, old_crtc_state); 255 malidp_atomic_commit_se_config(crtc, old_crtc_state); 256 } 257 258 drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); 259 260 malidp_mw_atomic_commit(drm, state); 261 262 drm_atomic_helper_commit_modeset_enables(drm, state); 263 264 malidp_atomic_commit_hw_done(state); 265 266 dma_fence_end_signalling(fence_cookie); 267 268 pm_runtime_put(drm->dev); 269 270 drm_atomic_helper_cleanup_planes(drm, state); 271 } 272 273 static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = { 274 .atomic_commit_tail = malidp_atomic_commit_tail, 275 }; 276 277 static bool 278 malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, 279 const struct drm_mode_fb_cmd2 *mode_cmd) 280 { 281 if (malidp_format_mod_supported(dev, mode_cmd->pixel_format, 282 mode_cmd->modifier[0]) == false) 283 return false; 284 285 if (mode_cmd->offsets[0] != 0) { 286 DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); 287 return false; 288 } 289 290 switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { 291 case AFBC_SIZE_16X16: 292 if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { 293 DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); 294 return false; 295 } 296 break; 297 default: 298 DRM_DEBUG_KMS("Unsupported AFBC block size\n"); 299 return false; 300 } 301 302 return true; 303 } 304 305 static bool 306 malidp_verify_afbc_framebuffer_size(struct drm_device *dev, 307 struct drm_file *file, 308 const struct drm_mode_fb_cmd2 *mode_cmd) 309 { 310 int n_superblocks = 0; 311 const struct drm_format_info *info; 312 struct drm_gem_object *objs = NULL; 313 u32 afbc_superblock_size = 0, afbc_superblock_height = 0; 314 u32 afbc_superblock_width = 0, afbc_size = 0; 315 int bpp = 0; 316 317 switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { 318 case AFBC_SIZE_16X16: 319 afbc_superblock_height = 16; 320 afbc_superblock_width = 16; 321 break; 322 default: 323 DRM_DEBUG_KMS("AFBC superblock size is not supported\n"); 324 return false; 325 } 326 327 info = drm_get_format_info(dev, mode_cmd); 328 329 n_superblocks = (mode_cmd->width / afbc_superblock_width) * 330 (mode_cmd->height / afbc_superblock_height); 331 332 bpp = malidp_format_get_bpp(info->format); 333 334 afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height) 335 / BITS_PER_BYTE; 336 337 afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); 338 afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); 339 340 if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) { 341 DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) " 342 "should be same as width (=%u) * bpp (=%u)\n", 343 (mode_cmd->pitches[0] * BITS_PER_BYTE), 344 mode_cmd->width, bpp); 345 return false; 346 } 347 348 objs = drm_gem_object_lookup(file, mode_cmd->handles[0]); 349 if (!objs) { 350 DRM_DEBUG_KMS("Failed to lookup GEM object\n"); 351 return false; 352 } 353 354 if (objs->size < afbc_size) { 355 DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n", 356 objs->size, afbc_size); 357 drm_gem_object_put(objs); 358 return false; 359 } 360 361 drm_gem_object_put(objs); 362 363 return true; 364 } 365 366 static bool 367 malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, 368 const struct drm_mode_fb_cmd2 *mode_cmd) 369 { 370 if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd)) 371 return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd); 372 373 return false; 374 } 375 376 static struct drm_framebuffer * 377 malidp_fb_create(struct drm_device *dev, struct drm_file *file, 378 const struct drm_mode_fb_cmd2 *mode_cmd) 379 { 380 if (mode_cmd->modifier[0]) { 381 if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd)) 382 return ERR_PTR(-EINVAL); 383 } 384 385 return drm_gem_fb_create(dev, file, mode_cmd); 386 } 387 388 static const struct drm_mode_config_funcs malidp_mode_config_funcs = { 389 .fb_create = malidp_fb_create, 390 .atomic_check = drm_atomic_helper_check, 391 .atomic_commit = drm_atomic_helper_commit, 392 }; 393 394 static int malidp_init(struct drm_device *drm) 395 { 396 int ret; 397 struct malidp_drm *malidp = drm_to_malidp(drm); 398 struct malidp_hw_device *hwdev = malidp->dev; 399 400 ret = drmm_mode_config_init(drm); 401 if (ret) 402 goto out; 403 404 drm->mode_config.min_width = hwdev->min_line_size; 405 drm->mode_config.min_height = hwdev->min_line_size; 406 drm->mode_config.max_width = hwdev->max_line_size; 407 drm->mode_config.max_height = hwdev->max_line_size; 408 drm->mode_config.funcs = &malidp_mode_config_funcs; 409 drm->mode_config.helper_private = &malidp_mode_config_helpers; 410 411 ret = malidp_crtc_init(drm); 412 if (ret) 413 goto out; 414 415 ret = malidp_mw_connector_init(drm); 416 if (ret) 417 goto out; 418 419 out: 420 return ret; 421 } 422 423 static int malidp_irq_init(struct platform_device *pdev) 424 { 425 int irq_de, irq_se, ret = 0; 426 struct drm_device *drm = dev_get_drvdata(&pdev->dev); 427 struct malidp_drm *malidp = drm_to_malidp(drm); 428 struct malidp_hw_device *hwdev = malidp->dev; 429 430 /* fetch the interrupts from DT */ 431 irq_de = platform_get_irq_byname(pdev, "DE"); 432 if (irq_de < 0) { 433 DRM_ERROR("no 'DE' IRQ specified!\n"); 434 return irq_de; 435 } 436 irq_se = platform_get_irq_byname(pdev, "SE"); 437 if (irq_se < 0) { 438 DRM_ERROR("no 'SE' IRQ specified!\n"); 439 return irq_se; 440 } 441 442 ret = malidp_de_irq_init(drm, irq_de); 443 if (ret) 444 return ret; 445 446 ret = malidp_se_irq_init(drm, irq_se); 447 if (ret) { 448 malidp_de_irq_fini(hwdev); 449 return ret; 450 } 451 452 return 0; 453 } 454 455 DEFINE_DRM_GEM_DMA_FOPS(fops); 456 457 static int malidp_dumb_create(struct drm_file *file_priv, 458 struct drm_device *drm, 459 struct drm_mode_create_dumb *args) 460 { 461 struct malidp_drm *malidp = drm_to_malidp(drm); 462 /* allocate for the worst case scenario, i.e. rotated buffers */ 463 u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1); 464 465 args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment); 466 467 return drm_gem_dma_dumb_create_internal(file_priv, drm, args); 468 } 469 470 #ifdef CONFIG_DEBUG_FS 471 472 static void malidp_error_stats_init(struct malidp_error_stats *error_stats) 473 { 474 error_stats->num_errors = 0; 475 error_stats->last_error_status = 0; 476 error_stats->last_error_vblank = -1; 477 } 478 479 void malidp_error(struct malidp_drm *malidp, 480 struct malidp_error_stats *error_stats, u32 status, 481 u64 vblank) 482 { 483 unsigned long irqflags; 484 485 spin_lock_irqsave(&malidp->errors_lock, irqflags); 486 error_stats->last_error_status = status; 487 error_stats->last_error_vblank = vblank; 488 error_stats->num_errors++; 489 spin_unlock_irqrestore(&malidp->errors_lock, irqflags); 490 } 491 492 static void malidp_error_stats_dump(const char *prefix, 493 struct malidp_error_stats error_stats, 494 struct seq_file *m) 495 { 496 seq_printf(m, "[%s] num_errors : %d\n", prefix, 497 error_stats.num_errors); 498 seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix, 499 error_stats.last_error_status); 500 seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix, 501 error_stats.last_error_vblank); 502 } 503 504 static int malidp_show_stats(struct seq_file *m, void *arg) 505 { 506 struct drm_device *drm = m->private; 507 struct malidp_drm *malidp = drm_to_malidp(drm); 508 unsigned long irqflags; 509 struct malidp_error_stats de_errors, se_errors; 510 511 spin_lock_irqsave(&malidp->errors_lock, irqflags); 512 de_errors = malidp->de_errors; 513 se_errors = malidp->se_errors; 514 spin_unlock_irqrestore(&malidp->errors_lock, irqflags); 515 malidp_error_stats_dump("DE", de_errors, m); 516 malidp_error_stats_dump("SE", se_errors, m); 517 return 0; 518 } 519 520 static int malidp_debugfs_open(struct inode *inode, struct file *file) 521 { 522 return single_open(file, malidp_show_stats, inode->i_private); 523 } 524 525 static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf, 526 size_t len, loff_t *offp) 527 { 528 struct seq_file *m = file->private_data; 529 struct drm_device *drm = m->private; 530 struct malidp_drm *malidp = drm_to_malidp(drm); 531 unsigned long irqflags; 532 533 spin_lock_irqsave(&malidp->errors_lock, irqflags); 534 malidp_error_stats_init(&malidp->de_errors); 535 malidp_error_stats_init(&malidp->se_errors); 536 spin_unlock_irqrestore(&malidp->errors_lock, irqflags); 537 return len; 538 } 539 540 static const struct file_operations malidp_debugfs_fops = { 541 .owner = THIS_MODULE, 542 .open = malidp_debugfs_open, 543 .read = seq_read, 544 .write = malidp_debugfs_write, 545 .llseek = seq_lseek, 546 .release = single_release, 547 }; 548 549 static void malidp_debugfs_init(struct drm_minor *minor) 550 { 551 struct malidp_drm *malidp = drm_to_malidp(minor->dev); 552 553 malidp_error_stats_init(&malidp->de_errors); 554 malidp_error_stats_init(&malidp->se_errors); 555 spin_lock_init(&malidp->errors_lock); 556 debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root, 557 minor->dev, &malidp_debugfs_fops); 558 } 559 560 #endif //CONFIG_DEBUG_FS 561 562 static const struct drm_driver malidp_driver = { 563 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 564 DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), 565 #ifdef CONFIG_DEBUG_FS 566 .debugfs_init = malidp_debugfs_init, 567 #endif 568 .fops = &fops, 569 .name = "mali-dp", 570 .desc = "ARM Mali Display Processor driver", 571 .date = "20160106", 572 .major = 1, 573 .minor = 0, 574 }; 575 576 static const struct of_device_id malidp_drm_of_match[] = { 577 { 578 .compatible = "arm,mali-dp500", 579 .data = &malidp_device[MALIDP_500] 580 }, 581 { 582 .compatible = "arm,mali-dp550", 583 .data = &malidp_device[MALIDP_550] 584 }, 585 { 586 .compatible = "arm,mali-dp650", 587 .data = &malidp_device[MALIDP_650] 588 }, 589 {}, 590 }; 591 MODULE_DEVICE_TABLE(of, malidp_drm_of_match); 592 593 static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev, 594 const struct of_device_id *dev_id) 595 { 596 u32 core_id; 597 const char *compatstr_dp500 = "arm,mali-dp500"; 598 bool is_dp500; 599 bool dt_is_dp500; 600 601 /* 602 * The DP500 CORE_ID register is in a different location, so check it 603 * first. If the product id field matches, then this is DP500, otherwise 604 * check the DP550/650 CORE_ID register. 605 */ 606 core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID); 607 /* Offset 0x18 will never read 0x500 on products other than DP500. */ 608 is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500); 609 dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500, 610 sizeof(dev_id->compatible)) != NULL; 611 if (is_dp500 != dt_is_dp500) { 612 DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n", 613 dev_id->compatible, is_dp500 ? "is" : "is not"); 614 return false; 615 } else if (!dt_is_dp500) { 616 u16 product_id; 617 char buf[32]; 618 619 core_id = malidp_hw_read(hwdev, 620 MALIDP550_DC_BASE + MALIDP_DE_CORE_ID); 621 product_id = MALIDP_PRODUCT_ID(core_id); 622 snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id); 623 if (!strnstr(dev_id->compatible, buf, 624 sizeof(dev_id->compatible))) { 625 DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n", 626 dev_id->compatible, product_id); 627 return false; 628 } 629 } 630 return true; 631 } 632 633 static bool malidp_has_sufficient_address_space(const struct resource *res, 634 const struct of_device_id *dev_id) 635 { 636 resource_size_t res_size = resource_size(res); 637 const char *compatstr_dp500 = "arm,mali-dp500"; 638 639 if (!strnstr(dev_id->compatible, compatstr_dp500, 640 sizeof(dev_id->compatible))) 641 return res_size >= MALIDP550_ADDR_SPACE_SIZE; 642 else if (res_size < MALIDP500_ADDR_SPACE_SIZE) 643 return false; 644 return true; 645 } 646 647 static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, 648 char *buf) 649 { 650 struct drm_device *drm = dev_get_drvdata(dev); 651 struct malidp_drm *malidp = drm_to_malidp(drm); 652 653 return sysfs_emit(buf, "%08x\n", malidp->core_id); 654 } 655 656 static DEVICE_ATTR_RO(core_id); 657 658 static struct attribute *mali_dp_attrs[] = { 659 &dev_attr_core_id.attr, 660 NULL, 661 }; 662 ATTRIBUTE_GROUPS(mali_dp); 663 664 #define MAX_OUTPUT_CHANNELS 3 665 666 static int malidp_runtime_pm_suspend(struct device *dev) 667 { 668 struct drm_device *drm = dev_get_drvdata(dev); 669 struct malidp_drm *malidp = drm_to_malidp(drm); 670 struct malidp_hw_device *hwdev = malidp->dev; 671 672 /* we can only suspend if the hardware is in config mode */ 673 WARN_ON(!hwdev->hw->in_config_mode(hwdev)); 674 675 malidp_se_irq_fini(hwdev); 676 malidp_de_irq_fini(hwdev); 677 hwdev->pm_suspended = true; 678 clk_disable_unprepare(hwdev->mclk); 679 clk_disable_unprepare(hwdev->aclk); 680 clk_disable_unprepare(hwdev->pclk); 681 682 return 0; 683 } 684 685 static int malidp_runtime_pm_resume(struct device *dev) 686 { 687 struct drm_device *drm = dev_get_drvdata(dev); 688 struct malidp_drm *malidp = drm_to_malidp(drm); 689 struct malidp_hw_device *hwdev = malidp->dev; 690 691 clk_prepare_enable(hwdev->pclk); 692 clk_prepare_enable(hwdev->aclk); 693 clk_prepare_enable(hwdev->mclk); 694 hwdev->pm_suspended = false; 695 malidp_de_irq_hw_init(hwdev); 696 malidp_se_irq_hw_init(hwdev); 697 698 return 0; 699 } 700 701 static int malidp_bind(struct device *dev) 702 { 703 struct resource *res; 704 struct drm_device *drm; 705 struct malidp_drm *malidp; 706 struct malidp_hw_device *hwdev; 707 struct platform_device *pdev = to_platform_device(dev); 708 struct of_device_id const *dev_id; 709 struct drm_encoder *encoder; 710 /* number of lines for the R, G and B output */ 711 u8 output_width[MAX_OUTPUT_CHANNELS]; 712 int ret = 0, i; 713 u32 version, out_depth = 0; 714 715 malidp = devm_drm_dev_alloc(dev, &malidp_driver, typeof(*malidp), base); 716 if (IS_ERR(malidp)) 717 return PTR_ERR(malidp); 718 719 drm = &malidp->base; 720 721 hwdev = drmm_kzalloc(drm, sizeof(*hwdev), GFP_KERNEL); 722 if (!hwdev) 723 return -ENOMEM; 724 725 hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); 726 malidp->dev = hwdev; 727 728 hwdev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 729 if (IS_ERR(hwdev->regs)) 730 return PTR_ERR(hwdev->regs); 731 732 hwdev->pclk = devm_clk_get(dev, "pclk"); 733 if (IS_ERR(hwdev->pclk)) 734 return PTR_ERR(hwdev->pclk); 735 736 hwdev->aclk = devm_clk_get(dev, "aclk"); 737 if (IS_ERR(hwdev->aclk)) 738 return PTR_ERR(hwdev->aclk); 739 740 hwdev->mclk = devm_clk_get(dev, "mclk"); 741 if (IS_ERR(hwdev->mclk)) 742 return PTR_ERR(hwdev->mclk); 743 744 hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); 745 if (IS_ERR(hwdev->pxlclk)) 746 return PTR_ERR(hwdev->pxlclk); 747 748 /* Get the optional framebuffer memory resource */ 749 ret = of_reserved_mem_device_init(dev); 750 if (ret && ret != -ENODEV) 751 return ret; 752 753 dev_set_drvdata(dev, drm); 754 755 /* Enable power management */ 756 pm_runtime_enable(dev); 757 758 /* Resume device to enable the clocks */ 759 if (pm_runtime_enabled(dev)) 760 pm_runtime_get_sync(dev); 761 else 762 malidp_runtime_pm_resume(dev); 763 764 dev_id = of_match_device(malidp_drm_of_match, dev); 765 if (!dev_id) { 766 ret = -EINVAL; 767 goto query_hw_fail; 768 } 769 770 if (!malidp_has_sufficient_address_space(res, dev_id)) { 771 DRM_ERROR("Insufficient address space in device-tree.\n"); 772 ret = -EINVAL; 773 goto query_hw_fail; 774 } 775 776 if (!malidp_is_compatible_hw_id(hwdev, dev_id)) { 777 ret = -EINVAL; 778 goto query_hw_fail; 779 } 780 781 ret = hwdev->hw->query_hw(hwdev); 782 if (ret) { 783 DRM_ERROR("Invalid HW configuration\n"); 784 goto query_hw_fail; 785 } 786 787 version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); 788 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, 789 (version >> 12) & 0xf, (version >> 8) & 0xf); 790 791 malidp->core_id = version; 792 793 ret = of_property_read_u32(dev->of_node, 794 "arm,malidp-arqos-value", 795 &hwdev->arqos_value); 796 if (ret) 797 hwdev->arqos_value = 0x0; 798 799 /* set the number of lines used for output of RGB data */ 800 ret = of_property_read_u8_array(dev->of_node, 801 "arm,malidp-output-port-lines", 802 output_width, MAX_OUTPUT_CHANNELS); 803 if (ret) 804 goto query_hw_fail; 805 806 for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) 807 out_depth = (out_depth << 8) | (output_width[i] & 0xf); 808 malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); 809 hwdev->output_color_depth = out_depth; 810 811 atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT); 812 init_waitqueue_head(&malidp->wq); 813 814 ret = malidp_init(drm); 815 if (ret < 0) 816 goto query_hw_fail; 817 818 /* Set the CRTC's port so that the encoder component can find it */ 819 malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0); 820 821 ret = component_bind_all(dev, drm); 822 if (ret) { 823 DRM_ERROR("Failed to bind all components\n"); 824 goto bind_fail; 825 } 826 827 /* We expect to have a maximum of two encoders one for the actual 828 * display and a virtual one for the writeback connector 829 */ 830 WARN_ON(drm->mode_config.num_encoder > 2); 831 list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) { 832 encoder->possible_clones = 833 (1 << drm->mode_config.num_encoder) - 1; 834 } 835 836 ret = malidp_irq_init(pdev); 837 if (ret < 0) 838 goto irq_init_fail; 839 840 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 841 if (ret < 0) { 842 DRM_ERROR("failed to initialise vblank\n"); 843 goto vblank_fail; 844 } 845 pm_runtime_put(dev); 846 847 drm_mode_config_reset(drm); 848 849 drm_kms_helper_poll_init(drm); 850 851 ret = drm_dev_register(drm, 0); 852 if (ret) 853 goto register_fail; 854 855 drm_fbdev_dma_setup(drm, 32); 856 857 return 0; 858 859 register_fail: 860 drm_kms_helper_poll_fini(drm); 861 pm_runtime_get_sync(dev); 862 vblank_fail: 863 malidp_se_irq_fini(hwdev); 864 malidp_de_irq_fini(hwdev); 865 irq_init_fail: 866 drm_atomic_helper_shutdown(drm); 867 component_unbind_all(dev, drm); 868 bind_fail: 869 of_node_put(malidp->crtc.port); 870 malidp->crtc.port = NULL; 871 query_hw_fail: 872 pm_runtime_put(dev); 873 if (pm_runtime_enabled(dev)) 874 pm_runtime_disable(dev); 875 else 876 malidp_runtime_pm_suspend(dev); 877 dev_set_drvdata(dev, NULL); 878 of_reserved_mem_device_release(dev); 879 880 return ret; 881 } 882 883 static void malidp_unbind(struct device *dev) 884 { 885 struct drm_device *drm = dev_get_drvdata(dev); 886 struct malidp_drm *malidp = drm_to_malidp(drm); 887 struct malidp_hw_device *hwdev = malidp->dev; 888 889 drm_dev_unregister(drm); 890 drm_kms_helper_poll_fini(drm); 891 pm_runtime_get_sync(dev); 892 drm_atomic_helper_shutdown(drm); 893 malidp_se_irq_fini(hwdev); 894 malidp_de_irq_fini(hwdev); 895 component_unbind_all(dev, drm); 896 of_node_put(malidp->crtc.port); 897 malidp->crtc.port = NULL; 898 pm_runtime_put(dev); 899 if (pm_runtime_enabled(dev)) 900 pm_runtime_disable(dev); 901 else 902 malidp_runtime_pm_suspend(dev); 903 dev_set_drvdata(dev, NULL); 904 of_reserved_mem_device_release(dev); 905 } 906 907 static const struct component_master_ops malidp_master_ops = { 908 .bind = malidp_bind, 909 .unbind = malidp_unbind, 910 }; 911 912 static int malidp_compare_dev(struct device *dev, void *data) 913 { 914 struct device_node *np = data; 915 916 return dev->of_node == np; 917 } 918 919 static int malidp_platform_probe(struct platform_device *pdev) 920 { 921 struct device_node *port; 922 struct component_match *match = NULL; 923 924 if (!pdev->dev.of_node) 925 return -ENODEV; 926 927 /* there is only one output port inside each device, find it */ 928 port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); 929 if (!port) 930 return -ENODEV; 931 932 drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, 933 port); 934 of_node_put(port); 935 return component_master_add_with_match(&pdev->dev, &malidp_master_ops, 936 match); 937 } 938 939 static void malidp_platform_remove(struct platform_device *pdev) 940 { 941 component_master_del(&pdev->dev, &malidp_master_ops); 942 } 943 944 static void malidp_platform_shutdown(struct platform_device *pdev) 945 { 946 drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 947 } 948 949 static int __maybe_unused malidp_pm_suspend(struct device *dev) 950 { 951 struct drm_device *drm = dev_get_drvdata(dev); 952 953 return drm_mode_config_helper_suspend(drm); 954 } 955 956 static int __maybe_unused malidp_pm_resume(struct device *dev) 957 { 958 struct drm_device *drm = dev_get_drvdata(dev); 959 960 drm_mode_config_helper_resume(drm); 961 962 return 0; 963 } 964 965 static int __maybe_unused malidp_pm_suspend_late(struct device *dev) 966 { 967 if (!pm_runtime_status_suspended(dev)) { 968 malidp_runtime_pm_suspend(dev); 969 pm_runtime_set_suspended(dev); 970 } 971 return 0; 972 } 973 974 static int __maybe_unused malidp_pm_resume_early(struct device *dev) 975 { 976 malidp_runtime_pm_resume(dev); 977 pm_runtime_set_active(dev); 978 return 0; 979 } 980 981 static const struct dev_pm_ops malidp_pm_ops = { 982 SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \ 983 SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \ 984 SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL) 985 }; 986 987 static struct platform_driver malidp_platform_driver = { 988 .probe = malidp_platform_probe, 989 .remove_new = malidp_platform_remove, 990 .shutdown = malidp_platform_shutdown, 991 .driver = { 992 .name = "mali-dp", 993 .pm = &malidp_pm_ops, 994 .of_match_table = malidp_drm_of_match, 995 .dev_groups = mali_dp_groups, 996 }, 997 }; 998 999 drm_module_platform_driver(malidp_platform_driver); 1000 1001 MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>"); 1002 MODULE_DESCRIPTION("ARM Mali DP DRM driver"); 1003 MODULE_LICENSE("GPL v2"); 1004