1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #include <linux/aperture.h> 9 #include <linux/kthread.h> 10 #include <linux/sched/mm.h> 11 #include <uapi/linux/sched/types.h> 12 13 #include <drm/drm_drv.h> 14 #include <drm/drm_mode_config.h> 15 #include <drm/drm_vblank.h> 16 #include <drm/clients/drm_client_setup.h> 17 18 #include "disp/msm_disp_snapshot.h" 19 #include "msm_drv.h" 20 #include "msm_gem.h" 21 #include "msm_kms.h" 22 #include "msm_mmu.h" 23 24 static const struct drm_mode_config_funcs mode_config_funcs = { 25 .fb_create = msm_framebuffer_create, 26 .atomic_check = msm_atomic_check, 27 .atomic_commit = drm_atomic_helper_commit, 28 }; 29 30 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { 31 .atomic_commit_tail = msm_atomic_commit_tail, 32 }; 33 34 static irqreturn_t msm_irq(int irq, void *arg) 35 { 36 struct drm_device *dev = arg; 37 struct msm_drm_private *priv = dev->dev_private; 38 struct msm_kms *kms = priv->kms; 39 40 BUG_ON(!kms); 41 42 return kms->funcs->irq(kms); 43 } 44 45 static void msm_irq_preinstall(struct drm_device *dev) 46 { 47 struct msm_drm_private *priv = dev->dev_private; 48 struct msm_kms *kms = priv->kms; 49 50 BUG_ON(!kms); 51 52 kms->funcs->irq_preinstall(kms); 53 } 54 55 static int msm_irq_postinstall(struct drm_device *dev) 56 { 57 struct msm_drm_private *priv = dev->dev_private; 58 struct msm_kms *kms = priv->kms; 59 60 BUG_ON(!kms); 61 62 if (kms->funcs->irq_postinstall) 63 return kms->funcs->irq_postinstall(kms); 64 65 return 0; 66 } 67 68 static int msm_irq_install(struct drm_device *dev, unsigned int irq) 69 { 70 struct msm_drm_private *priv = dev->dev_private; 71 struct msm_kms *kms = priv->kms; 72 int ret; 73 74 if (irq == IRQ_NOTCONNECTED) 75 return -ENOTCONN; 76 77 msm_irq_preinstall(dev); 78 79 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev); 80 if (ret) 81 return ret; 82 83 kms->irq_requested = true; 84 85 ret = msm_irq_postinstall(dev); 86 if (ret) { 87 free_irq(irq, dev); 88 return ret; 89 } 90 91 return 0; 92 } 93 94 static void msm_irq_uninstall(struct drm_device *dev) 95 { 96 struct msm_drm_private *priv = dev->dev_private; 97 struct msm_kms *kms = priv->kms; 98 99 kms->funcs->irq_uninstall(kms); 100 if (kms->irq_requested) 101 free_irq(kms->irq, dev); 102 } 103 104 struct msm_vblank_work { 105 struct work_struct work; 106 struct drm_crtc *crtc; 107 bool enable; 108 struct msm_drm_private *priv; 109 }; 110 111 static void vblank_ctrl_worker(struct work_struct *work) 112 { 113 struct msm_vblank_work *vbl_work = container_of(work, 114 struct msm_vblank_work, work); 115 struct msm_drm_private *priv = vbl_work->priv; 116 struct msm_kms *kms = priv->kms; 117 118 if (vbl_work->enable) 119 kms->funcs->enable_vblank(kms, vbl_work->crtc); 120 else 121 kms->funcs->disable_vblank(kms, vbl_work->crtc); 122 123 kfree(vbl_work); 124 } 125 126 static int vblank_ctrl_queue_work(struct msm_drm_private *priv, 127 struct drm_crtc *crtc, bool enable) 128 { 129 struct msm_vblank_work *vbl_work; 130 131 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); 132 if (!vbl_work) 133 return -ENOMEM; 134 135 INIT_WORK(&vbl_work->work, vblank_ctrl_worker); 136 137 vbl_work->crtc = crtc; 138 vbl_work->enable = enable; 139 vbl_work->priv = priv; 140 141 queue_work(priv->kms->wq, &vbl_work->work); 142 143 return 0; 144 } 145 146 int msm_crtc_enable_vblank(struct drm_crtc *crtc) 147 { 148 struct drm_device *dev = crtc->dev; 149 struct msm_drm_private *priv = dev->dev_private; 150 struct msm_kms *kms = priv->kms; 151 if (!kms) 152 return -ENXIO; 153 drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id); 154 return vblank_ctrl_queue_work(priv, crtc, true); 155 } 156 157 void msm_crtc_disable_vblank(struct drm_crtc *crtc) 158 { 159 struct drm_device *dev = crtc->dev; 160 struct msm_drm_private *priv = dev->dev_private; 161 struct msm_kms *kms = priv->kms; 162 if (!kms) 163 return; 164 drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id); 165 vblank_ctrl_queue_work(priv, crtc, false); 166 } 167 168 static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data) 169 { 170 struct msm_kms *kms = arg; 171 172 if (atomic_read(&kms->fault_snapshot_capture) == 0) { 173 msm_disp_snapshot_state(kms->dev); 174 atomic_inc(&kms->fault_snapshot_capture); 175 } 176 177 return -ENOSYS; 178 } 179 180 struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev) 181 { 182 struct drm_gpuvm *vm; 183 struct msm_mmu *mmu; 184 struct device *mdp_dev = dev->dev; 185 struct msm_drm_private *priv = dev->dev_private; 186 struct msm_kms *kms = priv->kms; 187 struct device *iommu_dev; 188 189 /* 190 * IOMMUs can be a part of MDSS device tree binding, or the 191 * MDP/DPU device. 192 */ 193 if (device_iommu_mapped(mdp_dev)) 194 iommu_dev = mdp_dev; 195 else if (mdss_dev && device_iommu_mapped(mdss_dev)) 196 iommu_dev = mdss_dev; 197 else { 198 drm_info(dev, "no IOMMU, bailing out\n"); 199 return ERR_PTR(-ENODEV); 200 } 201 202 mmu = msm_iommu_disp_new(iommu_dev, 0); 203 if (IS_ERR(mmu)) 204 return ERR_CAST(mmu); 205 206 vm = msm_gem_vm_create(dev, mmu, "mdp_kms", 207 0x1000, 0x100000000 - 0x1000, true); 208 if (IS_ERR(vm)) { 209 dev_err(mdp_dev, "vm create, error %pe\n", vm); 210 mmu->funcs->destroy(mmu); 211 return vm; 212 } 213 214 msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler); 215 216 return vm; 217 } 218 219 void msm_drm_kms_unregister(struct device *dev) 220 { 221 struct platform_device *pdev = to_platform_device(dev); 222 struct msm_drm_private *priv = platform_get_drvdata(pdev); 223 struct drm_device *ddev = priv->dev; 224 225 drm_atomic_helper_shutdown(ddev); 226 } 227 228 void msm_drm_kms_uninit(struct device *dev) 229 { 230 struct platform_device *pdev = to_platform_device(dev); 231 struct msm_drm_private *priv = platform_get_drvdata(pdev); 232 struct drm_device *ddev = priv->dev; 233 struct msm_kms *kms = priv->kms; 234 int i; 235 236 BUG_ON(!kms); 237 238 /* We must cancel and cleanup any pending vblank enable/disable 239 * work before msm_irq_uninstall() to avoid work re-enabling an 240 * irq after uninstall has disabled it. 241 */ 242 243 flush_workqueue(kms->wq); 244 245 /* clean up event worker threads */ 246 for (i = 0; i < MAX_CRTCS; i++) { 247 if (kms->event_thread[i].worker) 248 kthread_destroy_worker(kms->event_thread[i].worker); 249 } 250 251 drm_kms_helper_poll_fini(ddev); 252 253 msm_disp_snapshot_destroy(ddev); 254 255 pm_runtime_get_sync(dev); 256 msm_irq_uninstall(ddev); 257 pm_runtime_put_sync(dev); 258 259 if (kms && kms->funcs) 260 kms->funcs->destroy(kms); 261 } 262 263 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv) 264 { 265 struct msm_drm_private *priv = dev_get_drvdata(dev); 266 struct drm_device *ddev = priv->dev; 267 struct msm_kms *kms = priv->kms; 268 struct drm_crtc *crtc; 269 int ret; 270 271 /* the fw fb could be anywhere in memory */ 272 ret = aperture_remove_all_conflicting_devices(drv->name); 273 if (ret) 274 return ret; 275 276 ret = msm_disp_snapshot_init(ddev); 277 if (ret) { 278 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); 279 return ret; 280 } 281 282 ret = priv->kms_init(ddev); 283 if (ret) { 284 DRM_DEV_ERROR(dev, "failed to load kms\n"); 285 goto err_msm_uninit; 286 } 287 288 /* Enable normalization of plane zpos */ 289 ddev->mode_config.normalize_zpos = true; 290 291 ddev->mode_config.funcs = &mode_config_funcs; 292 ddev->mode_config.helper_private = &mode_config_helper_funcs; 293 294 kms->dev = ddev; 295 ret = kms->funcs->hw_init(kms); 296 if (ret) { 297 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); 298 goto err_msm_uninit; 299 } 300 301 drm_helper_move_panel_connectors_to_head(ddev); 302 303 drm_for_each_crtc(crtc, ddev) { 304 struct msm_drm_thread *ev_thread; 305 306 /* initialize event thread */ 307 ev_thread = &kms->event_thread[drm_crtc_index(crtc)]; 308 ev_thread->dev = ddev; 309 ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id); 310 if (IS_ERR(ev_thread->worker)) { 311 ret = PTR_ERR(ev_thread->worker); 312 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); 313 ev_thread->worker = NULL; 314 goto err_msm_uninit; 315 } 316 317 sched_set_fifo(ev_thread->worker->task); 318 } 319 320 ret = drm_vblank_init(ddev, ddev->mode_config.num_crtc); 321 if (ret < 0) { 322 DRM_DEV_ERROR(dev, "failed to initialize vblank\n"); 323 goto err_msm_uninit; 324 } 325 326 pm_runtime_get_sync(dev); 327 ret = msm_irq_install(ddev, kms->irq); 328 pm_runtime_put_sync(dev); 329 if (ret < 0) { 330 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); 331 goto err_msm_uninit; 332 } 333 334 drm_mode_config_reset(ddev); 335 336 return 0; 337 338 err_msm_uninit: 339 return ret; 340 } 341 342 int msm_kms_pm_prepare(struct device *dev) 343 { 344 struct msm_drm_private *priv = dev_get_drvdata(dev); 345 struct drm_device *ddev = priv ? priv->dev : NULL; 346 347 if (!priv || !priv->kms) 348 return 0; 349 350 return drm_mode_config_helper_suspend(ddev); 351 } 352 353 void msm_kms_pm_complete(struct device *dev) 354 { 355 struct msm_drm_private *priv = dev_get_drvdata(dev); 356 struct drm_device *ddev = priv ? priv->dev : NULL; 357 358 if (!priv || !priv->kms) 359 return; 360 361 drm_mode_config_helper_resume(ddev); 362 } 363 364 void msm_kms_shutdown(struct platform_device *pdev) 365 { 366 struct msm_drm_private *priv = platform_get_drvdata(pdev); 367 struct drm_device *drm = priv ? priv->dev : NULL; 368 369 /* 370 * Shutdown the hw if we're far enough along where things might be on. 371 * If we run this too early, we'll end up panicking in any variety of 372 * places. Since we don't register the drm device until late in 373 * msm_drm_init, drm_dev->registered is used as an indicator that the 374 * shutdown will be successful. 375 */ 376 if (drm && drm->registered && priv->kms) 377 drm_atomic_helper_shutdown(drm); 378 } 379 380 void msm_drm_kms_post_init(struct device *dev) 381 { 382 struct platform_device *pdev = to_platform_device(dev); 383 struct msm_drm_private *priv = platform_get_drvdata(pdev); 384 struct drm_device *ddev = priv->dev; 385 386 drm_kms_helper_poll_init(ddev); 387 drm_client_setup(ddev, NULL); 388 } 389