1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5 */
6
7 #include <linux/clk.h>
8 #include <linux/module.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/remoteproc.h>
13 #include <linux/remoteproc/mtk_scp.h>
14 #include <media/videobuf2-dma-contig.h>
15
16 #include "mtk-mdp3-core.h"
17 #include "mtk-mdp3-cfg.h"
18 #include "mtk-mdp3-m2m.h"
19
20 static const struct of_device_id mdp_of_ids[] = {
21 { .compatible = "mediatek,mt8183-mdp3-rdma",
22 .data = &mt8183_mdp_driver_data,
23 },
24 { .compatible = "mediatek,mt8188-mdp3-rdma",
25 .data = &mt8188_mdp_driver_data,
26 },
27 { .compatible = "mediatek,mt8195-mdp3-rdma",
28 .data = &mt8195_mdp_driver_data,
29 },
30 { .compatible = "mediatek,mt8195-mdp3-wrot",
31 .data = &mt8195_mdp_driver_data,
32 },
33 {},
34 };
35 MODULE_DEVICE_TABLE(of, mdp_of_ids);
36
__get_pdev_by_id(struct platform_device * pdev,struct platform_device * from,enum mdp_infra_id id)37 static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
38 struct platform_device *from,
39 enum mdp_infra_id id)
40 {
41 struct device_node *node, *f = NULL;
42 struct platform_device *mdp_pdev = NULL;
43 const struct mtk_mdp_driver_data *mdp_data;
44 const char *compat;
45
46 if (!pdev)
47 return NULL;
48
49 if (id < MDP_INFRA_MMSYS || id >= MDP_INFRA_MAX) {
50 dev_err(&pdev->dev, "Illegal infra id %d\n", id);
51 return NULL;
52 }
53
54 mdp_data = of_device_get_match_data(&pdev->dev);
55 if (!mdp_data) {
56 dev_err(&pdev->dev, "have no driver data to find node\n");
57 return NULL;
58 }
59
60 compat = mdp_data->mdp_probe_infra[id].compatible;
61 if (strlen(compat) == 0)
62 return NULL;
63
64 if (from)
65 f = from->dev.of_node;
66 node = of_find_compatible_node(f, NULL, compat);
67 if (WARN_ON(!node)) {
68 dev_err(&pdev->dev, "find node from id %d failed\n", id);
69 return NULL;
70 }
71
72 mdp_pdev = of_find_device_by_node(node);
73 of_node_put(node);
74 if (WARN_ON(!mdp_pdev)) {
75 dev_err(&pdev->dev, "find pdev from id %d failed\n", id);
76 return NULL;
77 }
78
79 return mdp_pdev;
80 }
81
mdp_vpu_get_locked(struct mdp_dev * mdp)82 int mdp_vpu_get_locked(struct mdp_dev *mdp)
83 {
84 int ret = 0;
85
86 if (mdp->vpu_count++ == 0) {
87 ret = rproc_boot(mdp->rproc_handle);
88 if (ret) {
89 dev_err(&mdp->pdev->dev,
90 "vpu_load_firmware failed %d\n", ret);
91 goto err_load_vpu;
92 }
93 ret = mdp_vpu_register(mdp);
94 if (ret) {
95 dev_err(&mdp->pdev->dev,
96 "mdp_vpu register failed %d\n", ret);
97 goto err_reg_vpu;
98 }
99 ret = mdp_vpu_dev_init(&mdp->vpu, mdp->scp, &mdp->vpu_lock);
100 if (ret) {
101 dev_err(&mdp->pdev->dev,
102 "mdp_vpu device init failed %d\n", ret);
103 goto err_init_vpu;
104 }
105 }
106 return 0;
107
108 err_init_vpu:
109 mdp_vpu_unregister(mdp);
110 err_reg_vpu:
111 err_load_vpu:
112 mdp->vpu_count--;
113 return ret;
114 }
115
mdp_vpu_put_locked(struct mdp_dev * mdp)116 void mdp_vpu_put_locked(struct mdp_dev *mdp)
117 {
118 if (--mdp->vpu_count == 0) {
119 mdp_vpu_dev_deinit(&mdp->vpu);
120 mdp_vpu_unregister(mdp);
121 }
122 }
123
mdp_video_device_release(struct video_device * vdev)124 void mdp_video_device_release(struct video_device *vdev)
125 {
126 struct mdp_dev *mdp = (struct mdp_dev *)video_get_drvdata(vdev);
127 int i;
128
129 for (i = 0; i < mdp->mdp_data->pp_used; i++)
130 if (mdp->cmdq_clt[i])
131 cmdq_mbox_destroy(mdp->cmdq_clt[i]);
132
133 scp_put(mdp->scp);
134
135 destroy_workqueue(mdp->job_wq);
136 destroy_workqueue(mdp->clock_wq);
137
138 pm_runtime_disable(&mdp->pdev->dev);
139
140 vb2_dma_contig_clear_max_seg_size(&mdp->pdev->dev);
141
142 mdp_comp_destroy(mdp);
143 for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
144 enum mdp_mm_subsys_id idx;
145 struct mtk_mutex *m;
146 u32 m_id;
147
148 idx = mdp->mdp_data->pipe_info[i].sub_id;
149 m_id = mdp->mdp_data->pipe_info[i].mutex_id;
150 m = mdp->mm_subsys[idx].mdp_mutex[m_id];
151 if (!IS_ERR_OR_NULL(m))
152 mtk_mutex_put(m);
153 }
154
155 mdp_vpu_shared_mem_free(&mdp->vpu);
156 v4l2_m2m_release(mdp->m2m_dev);
157 kfree(mdp);
158 }
159
mdp_put_device(void * _dev)160 static void mdp_put_device(void *_dev)
161 {
162 struct device *dev = _dev;
163
164 put_device(dev);
165 }
166
mdp_mm_subsys_deploy(struct mdp_dev * mdp,enum mdp_infra_id id)167 static int mdp_mm_subsys_deploy(struct mdp_dev *mdp, enum mdp_infra_id id)
168 {
169 struct platform_device *mm_pdev = NULL;
170 struct device **dev;
171 int ret;
172 int i;
173
174 if (!mdp)
175 return -EINVAL;
176
177 for (i = 0; i < MDP_MM_SUBSYS_MAX; i++) {
178 const char *compat;
179 enum mdp_infra_id sub_id = id + i;
180
181 switch (id) {
182 case MDP_INFRA_MMSYS:
183 dev = &mdp->mm_subsys[i].mmsys;
184 break;
185 case MDP_INFRA_MUTEX:
186 dev = &mdp->mm_subsys[i].mutex;
187 break;
188 default:
189 dev_err(&mdp->pdev->dev, "Unknown infra id %d", id);
190 return -EINVAL;
191 }
192
193 /*
194 * Not every chip has multiple multimedia subsystems, so
195 * the config may be null.
196 */
197 compat = mdp->mdp_data->mdp_probe_infra[sub_id].compatible;
198 if (strlen(compat) == 0)
199 continue;
200
201 mm_pdev = __get_pdev_by_id(mdp->pdev, mm_pdev, sub_id);
202 if (WARN_ON(!mm_pdev))
203 return -ENODEV;
204
205 ret = devm_add_action_or_reset(&mdp->pdev->dev, mdp_put_device,
206 &mm_pdev->dev);
207 if (ret)
208 return ret;
209
210 *dev = &mm_pdev->dev;
211 }
212
213 return 0;
214 }
215
mdp_probe(struct platform_device * pdev)216 static int mdp_probe(struct platform_device *pdev)
217 {
218 struct device *dev = &pdev->dev;
219 struct mdp_dev *mdp;
220 struct platform_device *mm_pdev;
221 struct resource *res;
222 int ret, i, mutex_id;
223
224 mdp = kzalloc_obj(*mdp);
225 if (!mdp) {
226 ret = -ENOMEM;
227 goto err_return;
228 }
229
230 mdp->pdev = pdev;
231 mdp->mdp_data = of_device_get_match_data(&pdev->dev);
232
233 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
234 if (res->start != mdp->mdp_data->mdp_con_res) {
235 platform_set_drvdata(pdev, mdp);
236 goto success_return;
237 }
238
239 ret = mdp_mm_subsys_deploy(mdp, MDP_INFRA_MMSYS);
240 if (ret)
241 goto err_destroy_device;
242
243 ret = mdp_mm_subsys_deploy(mdp, MDP_INFRA_MUTEX);
244 if (ret)
245 goto err_destroy_device;
246
247 for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
248 enum mdp_mm_subsys_id idx;
249 struct mtk_mutex **m;
250
251 idx = mdp->mdp_data->pipe_info[i].sub_id;
252 mutex_id = mdp->mdp_data->pipe_info[i].mutex_id;
253 m = &mdp->mm_subsys[idx].mdp_mutex[mutex_id];
254
255 if (!IS_ERR_OR_NULL(*m))
256 continue;
257
258 *m = mtk_mutex_get(mdp->mm_subsys[idx].mutex);
259 if (IS_ERR(*m)) {
260 ret = PTR_ERR(*m);
261 goto err_free_mutex;
262 }
263 }
264
265 ret = mdp_comp_config(mdp);
266 if (ret) {
267 dev_err(dev, "Failed to config mdp components\n");
268 goto err_free_mutex;
269 }
270
271 mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME,
272 WQ_FREEZABLE | WQ_PERCPU, 0);
273 if (!mdp->job_wq) {
274 dev_err(dev, "Unable to create job workqueue\n");
275 ret = -ENOMEM;
276 goto err_deinit_comp;
277 }
278
279 mdp->clock_wq = alloc_workqueue(MDP_MODULE_NAME "-clock",
280 WQ_FREEZABLE | WQ_PERCPU,
281 0);
282 if (!mdp->clock_wq) {
283 dev_err(dev, "Unable to create clock workqueue\n");
284 ret = -ENOMEM;
285 goto err_destroy_job_wq;
286 }
287
288 mdp->scp = scp_get(pdev);
289 if (!mdp->scp) {
290 mm_pdev = __get_pdev_by_id(pdev, NULL, MDP_INFRA_SCP);
291 if (WARN_ON(!mm_pdev)) {
292 dev_err(&pdev->dev, "Could not get scp device\n");
293 ret = -ENODEV;
294 goto err_destroy_clock_wq;
295 }
296 mdp->scp = platform_get_drvdata(mm_pdev);
297 put_device(&mm_pdev->dev);
298 }
299
300 mdp->rproc_handle = scp_get_rproc(mdp->scp);
301 dev_dbg(&pdev->dev, "MDP rproc_handle: %p", mdp->rproc_handle);
302
303 mutex_init(&mdp->vpu_lock);
304 mutex_init(&mdp->m2m_lock);
305
306 for (i = 0; i < mdp->mdp_data->pp_used; i++) {
307 mdp->cmdq_clt[i] = cmdq_mbox_create(dev, i);
308 if (IS_ERR(mdp->cmdq_clt[i])) {
309 ret = PTR_ERR(mdp->cmdq_clt[i]);
310 goto err_mbox_destroy;
311 }
312
313 mdp->cmdq_shift_pa[i] = cmdq_get_shift_pa(mdp->cmdq_clt[i]->chan);
314 }
315
316 init_waitqueue_head(&mdp->callback_wq);
317 ida_init(&mdp->mdp_ida);
318 platform_set_drvdata(pdev, mdp);
319
320 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
321
322 ret = v4l2_device_register(dev, &mdp->v4l2_dev);
323 if (ret) {
324 dev_err(dev, "Failed to register v4l2 device\n");
325 ret = -EINVAL;
326 goto err_mbox_destroy;
327 }
328
329 ret = mdp_m2m_device_register(mdp);
330 if (ret) {
331 v4l2_err(&mdp->v4l2_dev, "Failed to register m2m device\n");
332 goto err_unregister_device;
333 }
334
335 success_return:
336 dev_dbg(dev, "mdp-%d registered successfully\n", pdev->id);
337 return 0;
338
339 err_unregister_device:
340 v4l2_device_unregister(&mdp->v4l2_dev);
341 err_mbox_destroy:
342 while (--i >= 0)
343 cmdq_mbox_destroy(mdp->cmdq_clt[i]);
344 scp_put(mdp->scp);
345 err_destroy_clock_wq:
346 destroy_workqueue(mdp->clock_wq);
347 err_destroy_job_wq:
348 destroy_workqueue(mdp->job_wq);
349 err_deinit_comp:
350 mdp_comp_destroy(mdp);
351 err_free_mutex:
352 for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
353 enum mdp_mm_subsys_id idx;
354 struct mtk_mutex *m;
355
356 idx = mdp->mdp_data->pipe_info[i].sub_id;
357 mutex_id = mdp->mdp_data->pipe_info[i].mutex_id;
358 m = mdp->mm_subsys[idx].mdp_mutex[mutex_id];
359 if (!IS_ERR_OR_NULL(m))
360 mtk_mutex_put(m);
361 }
362 err_destroy_device:
363 kfree(mdp);
364 err_return:
365 dev_dbg(dev, "Errno %d\n", ret);
366 return ret;
367 }
368
mdp_remove(struct platform_device * pdev)369 static void mdp_remove(struct platform_device *pdev)
370 {
371 struct mdp_dev *mdp = platform_get_drvdata(pdev);
372
373 v4l2_device_unregister(&mdp->v4l2_dev);
374
375 dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
376 }
377
mdp_suspend(struct device * dev)378 static int __maybe_unused mdp_suspend(struct device *dev)
379 {
380 struct mdp_dev *mdp = dev_get_drvdata(dev);
381 int ret;
382
383 atomic_set(&mdp->suspended, 1);
384
385 if (refcount_read(&mdp->job_count)) {
386 ret = wait_event_timeout(mdp->callback_wq,
387 !refcount_read(&mdp->job_count),
388 2 * HZ);
389 if (ret == 0) {
390 dev_err(dev,
391 "%s:flushed cmdq task incomplete, count=%d\n",
392 __func__, refcount_read(&mdp->job_count));
393 return -EBUSY;
394 }
395 }
396
397 return 0;
398 }
399
mdp_resume(struct device * dev)400 static int __maybe_unused mdp_resume(struct device *dev)
401 {
402 struct mdp_dev *mdp = dev_get_drvdata(dev);
403
404 atomic_set(&mdp->suspended, 0);
405
406 return 0;
407 }
408
409 static const struct dev_pm_ops mdp_pm_ops = {
410 SET_SYSTEM_SLEEP_PM_OPS(mdp_suspend, mdp_resume)
411 };
412
413 static struct platform_driver mdp_driver = {
414 .probe = mdp_probe,
415 .remove = mdp_remove,
416 .driver = {
417 .name = MDP_MODULE_NAME,
418 .pm = &mdp_pm_ops,
419 .of_match_table = mdp_of_ids,
420 },
421 };
422
423 module_platform_driver(mdp_driver);
424
425 MODULE_AUTHOR("Ping-Hsun Wu <ping-hsun.wu@mediatek.com>");
426 MODULE_DESCRIPTION("MediaTek image processor 3 driver");
427 MODULE_LICENSE("GPL");
428