1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8 #include <linux/aperture.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <uapi/linux/sched/types.h>
12
13 #include <drm/drm_drv.h>
14 #include <drm/drm_mode_config.h>
15 #include <drm/drm_vblank.h>
16 #include <drm/clients/drm_client_setup.h>
17
18 #include "disp/msm_disp_snapshot.h"
19 #include "msm_drv.h"
20 #include "msm_gem.h"
21 #include "msm_kms.h"
22 #include "msm_mmu.h"
23
24 static const struct drm_mode_config_funcs mode_config_funcs = {
25 .fb_create = msm_framebuffer_create,
26 .atomic_check = msm_atomic_check,
27 .atomic_commit = drm_atomic_helper_commit,
28 };
29
30 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
31 .atomic_commit_tail = msm_atomic_commit_tail,
32 };
33
msm_irq(int irq,void * arg)34 static irqreturn_t msm_irq(int irq, void *arg)
35 {
36 struct drm_device *dev = arg;
37 struct msm_drm_private *priv = dev->dev_private;
38 struct msm_kms *kms = priv->kms;
39
40 BUG_ON(!kms);
41
42 return kms->funcs->irq(kms);
43 }
44
msm_irq_preinstall(struct drm_device * dev)45 static void msm_irq_preinstall(struct drm_device *dev)
46 {
47 struct msm_drm_private *priv = dev->dev_private;
48 struct msm_kms *kms = priv->kms;
49
50 BUG_ON(!kms);
51
52 kms->funcs->irq_preinstall(kms);
53 }
54
msm_irq_postinstall(struct drm_device * dev)55 static int msm_irq_postinstall(struct drm_device *dev)
56 {
57 struct msm_drm_private *priv = dev->dev_private;
58 struct msm_kms *kms = priv->kms;
59
60 BUG_ON(!kms);
61
62 if (kms->funcs->irq_postinstall)
63 return kms->funcs->irq_postinstall(kms);
64
65 return 0;
66 }
67
msm_irq_install(struct drm_device * dev,unsigned int irq)68 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
69 {
70 struct msm_drm_private *priv = dev->dev_private;
71 struct msm_kms *kms = priv->kms;
72 int ret;
73
74 if (irq == IRQ_NOTCONNECTED)
75 return -ENOTCONN;
76
77 msm_irq_preinstall(dev);
78
79 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
80 if (ret)
81 return ret;
82
83 kms->irq_requested = true;
84
85 ret = msm_irq_postinstall(dev);
86 if (ret) {
87 free_irq(irq, dev);
88 return ret;
89 }
90
91 return 0;
92 }
93
msm_irq_uninstall(struct drm_device * dev)94 static void msm_irq_uninstall(struct drm_device *dev)
95 {
96 struct msm_drm_private *priv = dev->dev_private;
97 struct msm_kms *kms = priv->kms;
98
99 kms->funcs->irq_uninstall(kms);
100 if (kms->irq_requested)
101 free_irq(kms->irq, dev);
102 }
103
104 struct msm_vblank_work {
105 struct work_struct work;
106 struct drm_crtc *crtc;
107 bool enable;
108 struct msm_drm_private *priv;
109 };
110
vblank_ctrl_worker(struct work_struct * work)111 static void vblank_ctrl_worker(struct work_struct *work)
112 {
113 struct msm_vblank_work *vbl_work = container_of(work,
114 struct msm_vblank_work, work);
115 struct msm_drm_private *priv = vbl_work->priv;
116 struct msm_kms *kms = priv->kms;
117
118 if (vbl_work->enable)
119 kms->funcs->enable_vblank(kms, vbl_work->crtc);
120 else
121 kms->funcs->disable_vblank(kms, vbl_work->crtc);
122
123 kfree(vbl_work);
124 }
125
vblank_ctrl_queue_work(struct msm_drm_private * priv,struct drm_crtc * crtc,bool enable)126 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
127 struct drm_crtc *crtc, bool enable)
128 {
129 struct msm_vblank_work *vbl_work;
130
131 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
132 if (!vbl_work)
133 return -ENOMEM;
134
135 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
136
137 vbl_work->crtc = crtc;
138 vbl_work->enable = enable;
139 vbl_work->priv = priv;
140
141 queue_work(priv->kms->wq, &vbl_work->work);
142
143 return 0;
144 }
145
msm_crtc_enable_vblank(struct drm_crtc * crtc)146 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
147 {
148 struct drm_device *dev = crtc->dev;
149 struct msm_drm_private *priv = dev->dev_private;
150 struct msm_kms *kms = priv->kms;
151 if (!kms)
152 return -ENXIO;
153 drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
154 return vblank_ctrl_queue_work(priv, crtc, true);
155 }
156
msm_crtc_disable_vblank(struct drm_crtc * crtc)157 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
158 {
159 struct drm_device *dev = crtc->dev;
160 struct msm_drm_private *priv = dev->dev_private;
161 struct msm_kms *kms = priv->kms;
162 if (!kms)
163 return;
164 drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
165 vblank_ctrl_queue_work(priv, crtc, false);
166 }
167
msm_kms_fault_handler(void * arg,unsigned long iova,int flags,void * data)168 static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data)
169 {
170 struct msm_kms *kms = arg;
171
172 if (atomic_read(&kms->fault_snapshot_capture) == 0) {
173 msm_disp_snapshot_state(kms->dev);
174 atomic_inc(&kms->fault_snapshot_capture);
175 }
176
177 return -ENOSYS;
178 }
179
msm_kms_init_vm(struct drm_device * dev)180 struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
181 {
182 struct drm_gpuvm *vm;
183 struct msm_mmu *mmu;
184 struct device *mdp_dev = dev->dev;
185 struct device *mdss_dev = mdp_dev->parent;
186 struct msm_drm_private *priv = dev->dev_private;
187 struct msm_kms *kms = priv->kms;
188 struct device *iommu_dev;
189
190 /*
191 * IOMMUs can be a part of MDSS device tree binding, or the
192 * MDP/DPU device.
193 */
194 if (device_iommu_mapped(mdp_dev))
195 iommu_dev = mdp_dev;
196 else
197 iommu_dev = mdss_dev;
198
199 mmu = msm_iommu_disp_new(iommu_dev, 0);
200 if (IS_ERR(mmu))
201 return ERR_CAST(mmu);
202
203 if (!mmu) {
204 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
205 return NULL;
206 }
207
208 vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
209 0x1000, 0x100000000 - 0x1000, true);
210 if (IS_ERR(vm)) {
211 dev_err(mdp_dev, "vm create, error %pe\n", vm);
212 mmu->funcs->destroy(mmu);
213 return vm;
214 }
215
216 msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler);
217
218 return vm;
219 }
220
msm_drm_kms_unregister(struct device * dev)221 void msm_drm_kms_unregister(struct device *dev)
222 {
223 struct platform_device *pdev = to_platform_device(dev);
224 struct msm_drm_private *priv = platform_get_drvdata(pdev);
225 struct drm_device *ddev = priv->dev;
226
227 drm_atomic_helper_shutdown(ddev);
228 }
229
msm_drm_kms_uninit(struct device * dev)230 void msm_drm_kms_uninit(struct device *dev)
231 {
232 struct platform_device *pdev = to_platform_device(dev);
233 struct msm_drm_private *priv = platform_get_drvdata(pdev);
234 struct drm_device *ddev = priv->dev;
235 struct msm_kms *kms = priv->kms;
236 int i;
237
238 BUG_ON(!kms);
239
240 /* We must cancel and cleanup any pending vblank enable/disable
241 * work before msm_irq_uninstall() to avoid work re-enabling an
242 * irq after uninstall has disabled it.
243 */
244
245 flush_workqueue(kms->wq);
246
247 /* clean up event worker threads */
248 for (i = 0; i < MAX_CRTCS; i++) {
249 if (kms->event_thread[i].worker)
250 kthread_destroy_worker(kms->event_thread[i].worker);
251 }
252
253 drm_kms_helper_poll_fini(ddev);
254
255 msm_disp_snapshot_destroy(ddev);
256
257 pm_runtime_get_sync(dev);
258 msm_irq_uninstall(ddev);
259 pm_runtime_put_sync(dev);
260
261 if (kms && kms->funcs)
262 kms->funcs->destroy(kms);
263 }
264
msm_drm_kms_init(struct device * dev,const struct drm_driver * drv)265 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
266 {
267 struct msm_drm_private *priv = dev_get_drvdata(dev);
268 struct drm_device *ddev = priv->dev;
269 struct msm_kms *kms = priv->kms;
270 struct drm_crtc *crtc;
271 int ret;
272
273 /* the fw fb could be anywhere in memory */
274 ret = aperture_remove_all_conflicting_devices(drv->name);
275 if (ret)
276 return ret;
277
278 ret = msm_disp_snapshot_init(ddev);
279 if (ret) {
280 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
281 return ret;
282 }
283
284 ret = priv->kms_init(ddev);
285 if (ret) {
286 DRM_DEV_ERROR(dev, "failed to load kms\n");
287 goto err_msm_uninit;
288 }
289
290 /* Enable normalization of plane zpos */
291 ddev->mode_config.normalize_zpos = true;
292
293 ddev->mode_config.funcs = &mode_config_funcs;
294 ddev->mode_config.helper_private = &mode_config_helper_funcs;
295
296 kms->dev = ddev;
297 ret = kms->funcs->hw_init(kms);
298 if (ret) {
299 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
300 goto err_msm_uninit;
301 }
302
303 drm_helper_move_panel_connectors_to_head(ddev);
304
305 drm_for_each_crtc(crtc, ddev) {
306 struct msm_drm_thread *ev_thread;
307
308 /* initialize event thread */
309 ev_thread = &kms->event_thread[drm_crtc_index(crtc)];
310 ev_thread->dev = ddev;
311 ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
312 if (IS_ERR(ev_thread->worker)) {
313 ret = PTR_ERR(ev_thread->worker);
314 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
315 ev_thread->worker = NULL;
316 goto err_msm_uninit;
317 }
318
319 sched_set_fifo(ev_thread->worker->task);
320 }
321
322 ret = drm_vblank_init(ddev, ddev->mode_config.num_crtc);
323 if (ret < 0) {
324 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
325 goto err_msm_uninit;
326 }
327
328 pm_runtime_get_sync(dev);
329 ret = msm_irq_install(ddev, kms->irq);
330 pm_runtime_put_sync(dev);
331 if (ret < 0) {
332 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
333 goto err_msm_uninit;
334 }
335
336 drm_mode_config_reset(ddev);
337
338 return 0;
339
340 err_msm_uninit:
341 return ret;
342 }
343
msm_kms_pm_prepare(struct device * dev)344 int msm_kms_pm_prepare(struct device *dev)
345 {
346 struct msm_drm_private *priv = dev_get_drvdata(dev);
347 struct drm_device *ddev = priv ? priv->dev : NULL;
348
349 if (!priv || !priv->kms)
350 return 0;
351
352 return drm_mode_config_helper_suspend(ddev);
353 }
354
msm_kms_pm_complete(struct device * dev)355 void msm_kms_pm_complete(struct device *dev)
356 {
357 struct msm_drm_private *priv = dev_get_drvdata(dev);
358 struct drm_device *ddev = priv ? priv->dev : NULL;
359
360 if (!priv || !priv->kms)
361 return;
362
363 drm_mode_config_helper_resume(ddev);
364 }
365
msm_kms_shutdown(struct platform_device * pdev)366 void msm_kms_shutdown(struct platform_device *pdev)
367 {
368 struct msm_drm_private *priv = platform_get_drvdata(pdev);
369 struct drm_device *drm = priv ? priv->dev : NULL;
370
371 /*
372 * Shutdown the hw if we're far enough along where things might be on.
373 * If we run this too early, we'll end up panicking in any variety of
374 * places. Since we don't register the drm device until late in
375 * msm_drm_init, drm_dev->registered is used as an indicator that the
376 * shutdown will be successful.
377 */
378 if (drm && drm->registered && priv->kms)
379 drm_atomic_helper_shutdown(drm);
380 }
381
msm_drm_kms_post_init(struct device * dev)382 void msm_drm_kms_post_init(struct device *dev)
383 {
384 struct platform_device *pdev = to_platform_device(dev);
385 struct msm_drm_private *priv = platform_get_drvdata(pdev);
386 struct drm_device *ddev = priv->dev;
387
388 drm_kms_helper_poll_init(ddev);
389 drm_client_setup(ddev, NULL);
390 }
391