xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c (revision 512f9f150f367176fa9e5f4613b4863409a6f686)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
6  *
7  * Author: Rob Clark <robdclark@gmail.com>
8  */
9 
10 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
11 
12 #include <linux/debugfs.h>
13 #include <linux/dma-buf.h>
14 #include <linux/of_irq.h>
15 #include <linux/pm_opp.h>
16 
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_vblank.h>
21 #include <drm/drm_writeback.h>
22 
23 #include <linux/soc/qcom/ubwc.h>
24 
25 #include "msm_drv.h"
26 #include "msm_mmu.h"
27 #include "msm_gem.h"
28 #include "disp/msm_disp_snapshot.h"
29 
30 #include "dpu_core_irq.h"
31 #include "dpu_crtc.h"
32 #include "dpu_encoder.h"
33 #include "dpu_formats.h"
34 #include "dpu_hw_vbif.h"
35 #include "dpu_kms.h"
36 #include "dpu_plane.h"
37 #include "dpu_vbif.h"
38 #include "dpu_writeback.h"
39 
40 #define CREATE_TRACE_POINTS
41 #include "dpu_trace.h"
42 
43 /*
44  * To enable overall DRM driver logging
45  * # echo 0x2 > /sys/module/drm/parameters/debug
46  *
47  * To enable DRM driver h/w logging
48  * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
49  *
50  * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
51  */
52 #define DPU_DEBUGFS_DIR "msm_dpu"
53 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
54 
55 bool dpu_use_virtual_planes = true;
56 module_param(dpu_use_virtual_planes, bool, 0);
57 
58 static int dpu_kms_hw_init(struct msm_kms *kms);
59 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
60 
61 #ifdef CONFIG_DEBUG_FS
62 static int _dpu_danger_signal_status(struct seq_file *s,
63 		bool danger_status)
64 {
65 	struct dpu_danger_safe_status status;
66 	struct dpu_kms *kms = s->private;
67 	int i;
68 
69 	if (!kms->hw_mdp) {
70 		DPU_ERROR("invalid arg(s)\n");
71 		return 0;
72 	}
73 
74 	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
75 
76 	pm_runtime_get_sync(&kms->pdev->dev);
77 	if (danger_status) {
78 		seq_puts(s, "\nDanger signal status:\n");
79 		if (kms->hw_mdp->ops.get_danger_status)
80 			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
81 					&status);
82 	} else {
83 		seq_puts(s, "\nSafe signal status:\n");
84 		if (kms->hw_mdp->ops.get_safe_status)
85 			kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
86 					&status);
87 	}
88 	pm_runtime_put_sync(&kms->pdev->dev);
89 
90 	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
91 
92 	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
93 		seq_printf(s, "SSPP%d   :  0x%x  \n", i - SSPP_VIG0,
94 				status.sspp[i]);
95 	seq_puts(s, "\n");
96 
97 	return 0;
98 }
99 
100 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
101 {
102 	return _dpu_danger_signal_status(s, true);
103 }
104 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
105 
106 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
107 {
108 	return _dpu_danger_signal_status(s, false);
109 }
110 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
111 
112 static ssize_t _dpu_plane_danger_read(struct file *file,
113 			char __user *buff, size_t count, loff_t *ppos)
114 {
115 	struct dpu_kms *kms = file->private_data;
116 	int len;
117 	char buf[40];
118 
119 	len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
120 
121 	return simple_read_from_buffer(buff, count, ppos, buf, len);
122 }
123 
124 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
125 {
126 	struct drm_plane *plane;
127 
128 	drm_for_each_plane(plane, kms->dev) {
129 		if (plane->fb && plane->state) {
130 			dpu_plane_danger_signal_ctrl(plane, enable);
131 			DPU_DEBUG("plane:%d img:%dx%d ",
132 				plane->base.id, plane->fb->width,
133 				plane->fb->height);
134 			DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
135 				plane->state->src_x >> 16,
136 				plane->state->src_y >> 16,
137 				plane->state->src_w >> 16,
138 				plane->state->src_h >> 16,
139 				plane->state->crtc_x, plane->state->crtc_y,
140 				plane->state->crtc_w, plane->state->crtc_h);
141 		} else {
142 			DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
143 		}
144 	}
145 }
146 
147 static ssize_t _dpu_plane_danger_write(struct file *file,
148 		    const char __user *user_buf, size_t count, loff_t *ppos)
149 {
150 	struct dpu_kms *kms = file->private_data;
151 	int disable_panic;
152 	int ret;
153 
154 	ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
155 	if (ret)
156 		return ret;
157 
158 	if (disable_panic) {
159 		/* Disable panic signal for all active pipes */
160 		DPU_DEBUG("Disabling danger:\n");
161 		_dpu_plane_set_danger_state(kms, false);
162 		kms->has_danger_ctrl = false;
163 	} else {
164 		/* Enable panic signal for all active pipes */
165 		DPU_DEBUG("Enabling danger:\n");
166 		kms->has_danger_ctrl = true;
167 		_dpu_plane_set_danger_state(kms, true);
168 	}
169 
170 	return count;
171 }
172 
173 static const struct file_operations dpu_plane_danger_enable = {
174 	.open = simple_open,
175 	.read = _dpu_plane_danger_read,
176 	.write = _dpu_plane_danger_write,
177 };
178 
179 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
180 		struct dentry *parent)
181 {
182 	struct dentry *entry = debugfs_create_dir("danger", parent);
183 
184 	debugfs_create_file("danger_status", 0600, entry,
185 			dpu_kms, &dpu_debugfs_danger_stats_fops);
186 	debugfs_create_file("safe_status", 0600, entry,
187 			dpu_kms, &dpu_debugfs_safe_stats_fops);
188 	debugfs_create_file("disable_danger", 0600, entry,
189 			dpu_kms, &dpu_plane_danger_enable);
190 
191 }
192 
193 /*
194  * Companion structure for dpu_debugfs_create_regset32.
195  */
196 struct dpu_debugfs_regset32 {
197 	uint32_t offset;
198 	uint32_t blk_len;
199 	struct dpu_kms *dpu_kms;
200 };
201 
202 static int dpu_regset32_show(struct seq_file *s, void *data)
203 {
204 	struct dpu_debugfs_regset32 *regset = s->private;
205 	struct dpu_kms *dpu_kms = regset->dpu_kms;
206 	void __iomem *base;
207 	uint32_t i, addr;
208 
209 	if (!dpu_kms->mmio)
210 		return 0;
211 
212 	base = dpu_kms->mmio + regset->offset;
213 
214 	/* insert padding spaces, if needed */
215 	if (regset->offset & 0xF) {
216 		seq_printf(s, "[%x]", regset->offset & ~0xF);
217 		for (i = 0; i < (regset->offset & 0xF); i += 4)
218 			seq_puts(s, "         ");
219 	}
220 
221 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
222 
223 	/* main register output */
224 	for (i = 0; i < regset->blk_len; i += 4) {
225 		addr = regset->offset + i;
226 		if ((addr & 0xF) == 0x0)
227 			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
228 		seq_printf(s, " %08x", readl_relaxed(base + i));
229 	}
230 	seq_puts(s, "\n");
231 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
232 
233 	return 0;
234 }
235 DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
236 
237 /**
238  * dpu_debugfs_create_regset32 - Create register read back file for debugfs
239  *
240  * This function is almost identical to the standard debugfs_create_regset32()
241  * function, with the main difference being that a list of register
242  * names/offsets do not need to be provided. The 'read' function simply outputs
243  * sequential register values over a specified range.
244  *
245  * @name:   File name within debugfs
246  * @mode:   File mode within debugfs
247  * @parent: Parent directory entry within debugfs, can be NULL
248  * @offset: sub-block offset
249  * @length: sub-block length, in bytes
250  * @dpu_kms: pointer to dpu kms structure
251  */
252 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
253 		void *parent,
254 		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
255 {
256 	struct dpu_debugfs_regset32 *regset;
257 
258 	if (WARN_ON(!name || !dpu_kms || !length))
259 		return;
260 
261 	regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
262 	if (!regset)
263 		return;
264 
265 	/* make sure offset is a multiple of 4 */
266 	regset->offset = round_down(offset, 4);
267 	regset->blk_len = length;
268 	regset->dpu_kms = dpu_kms;
269 
270 	debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops);
271 }
272 
273 static void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
274 {
275 	struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
276 	int i;
277 
278 	if (IS_ERR(entry))
279 		return;
280 
281 	for (i = SSPP_NONE; i < SSPP_MAX; i++) {
282 		struct dpu_hw_sspp *hw = dpu_rm_get_sspp(&dpu_kms->rm, i);
283 
284 		if (!hw)
285 			continue;
286 
287 		_dpu_hw_sspp_init_debugfs(hw, dpu_kms, entry);
288 	}
289 }
290 
291 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
292 {
293 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
294 	void *p = dpu_hw_util_get_log_mask_ptr();
295 	struct dentry *entry;
296 
297 	if (!p)
298 		return -EINVAL;
299 
300 	/* Only create a set of debugfs for the primary node, ignore render nodes */
301 	if (minor->type != DRM_MINOR_PRIMARY)
302 		return 0;
303 
304 	entry = debugfs_create_dir("debug", minor->debugfs_root);
305 
306 	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
307 
308 	dpu_debugfs_danger_init(dpu_kms, entry);
309 	dpu_debugfs_vbif_init(dpu_kms, entry);
310 	dpu_debugfs_core_irq_init(dpu_kms, entry);
311 	dpu_debugfs_sspp_init(dpu_kms, entry);
312 
313 	return dpu_core_perf_debugfs_init(dpu_kms, entry);
314 }
315 #endif
316 
317 /* Global/shared object state funcs */
318 
319 /*
320  * This is a helper that returns the private state currently in operation.
321  * Note that this would return the "old_state" if called in the atomic check
322  * path, and the "new_state" after the atomic swap has been done.
323  */
324 struct dpu_global_state *
325 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
326 {
327 	return to_dpu_global_state(dpu_kms->global_state.state);
328 }
329 
330 /*
331  * This acquires the modeset lock set aside for global state, creates
332  * a new duplicated private object state.
333  */
334 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
335 {
336 	struct msm_drm_private *priv = s->dev->dev_private;
337 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
338 	struct drm_private_state *priv_state;
339 
340 	priv_state = drm_atomic_get_private_obj_state(s,
341 						&dpu_kms->global_state);
342 	if (IS_ERR(priv_state))
343 		return ERR_CAST(priv_state);
344 
345 	return to_dpu_global_state(priv_state);
346 }
347 
348 static struct drm_private_state *
349 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
350 {
351 	struct dpu_global_state *state;
352 
353 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
354 	if (!state)
355 		return NULL;
356 
357 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
358 
359 	return &state->base;
360 }
361 
362 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
363 				      struct drm_private_state *state)
364 {
365 	struct dpu_global_state *dpu_state = to_dpu_global_state(state);
366 
367 	kfree(dpu_state);
368 }
369 
370 static struct drm_private_state *
371 dpu_kms_global_create_state(struct drm_private_obj *obj)
372 {
373 	struct drm_device *dev = obj->dev;
374 	struct msm_drm_private *priv = dev->dev_private;
375 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
376 	struct dpu_global_state *dpu_state;
377 
378 	dpu_state = kzalloc_obj(*dpu_state);
379 	if (!dpu_state)
380 		return ERR_PTR(-ENOMEM);
381 
382 	__drm_atomic_helper_private_obj_create_state(obj, &dpu_state->base);
383 	dpu_state->rm = &dpu_kms->rm;
384 
385 	return &dpu_state->base;
386 }
387 
388 static void dpu_kms_global_print_state(struct drm_printer *p,
389 				       const struct drm_private_state *state)
390 {
391 	const struct dpu_global_state *global_state = to_dpu_global_state(state);
392 
393 	dpu_rm_print_state(p, global_state);
394 }
395 
396 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
397 	.atomic_create_state = dpu_kms_global_create_state,
398 	.atomic_duplicate_state = dpu_kms_global_duplicate_state,
399 	.atomic_destroy_state = dpu_kms_global_destroy_state,
400 	.atomic_print_state = dpu_kms_global_print_state,
401 };
402 
403 static void dpu_kms_global_obj_fini(struct dpu_kms *dpu_kms)
404 {
405 	drm_atomic_private_obj_fini(&dpu_kms->global_state);
406 }
407 
408 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
409 {
410 	struct icc_path *path0;
411 	struct icc_path *path1;
412 	struct device *dpu_dev = &dpu_kms->pdev->dev;
413 
414 	path0 = msm_icc_get(dpu_dev, "mdp0-mem");
415 	path1 = msm_icc_get(dpu_dev, "mdp1-mem");
416 
417 	if (IS_ERR_OR_NULL(path0))
418 		return PTR_ERR_OR_ZERO(path0);
419 
420 	dpu_kms->path[0] = path0;
421 	dpu_kms->num_paths = 1;
422 
423 	if (!IS_ERR_OR_NULL(path1)) {
424 		dpu_kms->path[1] = path1;
425 		dpu_kms->num_paths++;
426 	}
427 	return 0;
428 }
429 
430 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
431 {
432 	return dpu_crtc_vblank(crtc, true);
433 }
434 
435 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
436 {
437 	dpu_crtc_vblank(crtc, false);
438 }
439 
440 static void dpu_kms_enable_commit(struct msm_kms *kms)
441 {
442 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
443 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
444 }
445 
446 static void dpu_kms_disable_commit(struct msm_kms *kms)
447 {
448 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
449 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
450 }
451 
452 static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state)
453 {
454 	struct drm_crtc_state *new_crtc_state;
455 	struct drm_crtc_state *old_crtc_state;
456 	struct drm_crtc *crtc;
457 	int i;
458 
459 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
460 		dpu_crtc_check_mode_changed(old_crtc_state, new_crtc_state);
461 
462 	return 0;
463 }
464 
465 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
466 {
467 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
468 	struct drm_crtc *crtc;
469 
470 	for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
471 		if (!crtc->state->active)
472 			continue;
473 
474 		trace_dpu_kms_commit(DRMID(crtc));
475 		dpu_crtc_commit_kickoff(crtc);
476 	}
477 }
478 
479 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
480 {
481 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
482 	struct drm_crtc *crtc;
483 
484 	DPU_ATRACE_BEGIN("kms_complete_commit");
485 
486 	for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
487 		dpu_crtc_complete_commit(crtc);
488 
489 	DPU_ATRACE_END("kms_complete_commit");
490 }
491 
492 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
493 		struct drm_crtc *crtc)
494 {
495 	struct drm_encoder *encoder;
496 	struct drm_device *dev;
497 	int ret;
498 
499 	if (!kms || !crtc || !crtc->state) {
500 		DPU_ERROR("invalid params\n");
501 		return;
502 	}
503 
504 	dev = crtc->dev;
505 
506 	if (!crtc->state->enable) {
507 		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
508 		return;
509 	}
510 
511 	if (!drm_atomic_crtc_effectively_active(crtc->state)) {
512 		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
513 		return;
514 	}
515 
516 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
517 		if (encoder->crtc != crtc)
518 			continue;
519 		/*
520 		 * Wait for post-flush if necessary to delay before
521 		 * plane_cleanup. For example, wait for vsync in case of video
522 		 * mode panels. This may be a no-op for command mode panels.
523 		 */
524 		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
525 		ret = dpu_encoder_wait_for_commit_done(encoder);
526 		if (ret && ret != -EWOULDBLOCK) {
527 			DPU_ERROR("wait for commit done returned %d\n", ret);
528 			break;
529 		}
530 	}
531 }
532 
533 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
534 {
535 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
536 	struct drm_crtc *crtc;
537 
538 	for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
539 		dpu_kms_wait_for_commit_done(kms, crtc);
540 }
541 
542 static const char *dpu_vsync_sources[] = {
543 	[DPU_VSYNC_SOURCE_GPIO_0] = "mdp_vsync_p",
544 	[DPU_VSYNC_SOURCE_GPIO_1] = "mdp_vsync_s",
545 	[DPU_VSYNC_SOURCE_GPIO_2] = "mdp_vsync_e",
546 	[DPU_VSYNC_SOURCE_INTF_0] = "mdp_intf0",
547 	[DPU_VSYNC_SOURCE_INTF_1] = "mdp_intf1",
548 	[DPU_VSYNC_SOURCE_INTF_2] = "mdp_intf2",
549 	[DPU_VSYNC_SOURCE_INTF_3] = "mdp_intf3",
550 	[DPU_VSYNC_SOURCE_WD_TIMER_0] = "timer0",
551 	[DPU_VSYNC_SOURCE_WD_TIMER_1] = "timer1",
552 	[DPU_VSYNC_SOURCE_WD_TIMER_2] = "timer2",
553 	[DPU_VSYNC_SOURCE_WD_TIMER_3] = "timer3",
554 	[DPU_VSYNC_SOURCE_WD_TIMER_4] = "timer4",
555 };
556 
557 static int dpu_kms_dsi_set_te_source(struct msm_display_info *info,
558 				     struct msm_dsi *dsi)
559 {
560 	const char *te_source = msm_dsi_get_te_source(dsi);
561 	int i;
562 
563 	if (!te_source) {
564 		info->vsync_source = DPU_VSYNC_SOURCE_GPIO_0;
565 		return 0;
566 	}
567 
568 	/* we can not use match_string since dpu_vsync_sources is a sparse array */
569 	for (i = 0; i < ARRAY_SIZE(dpu_vsync_sources); i++) {
570 		if (dpu_vsync_sources[i] &&
571 		    !strcmp(dpu_vsync_sources[i], te_source)) {
572 			info->vsync_source = i;
573 			return 0;
574 		}
575 	}
576 
577 	return -EINVAL;
578 }
579 
580 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
581 				    struct msm_drm_private *priv,
582 				    struct dpu_kms *dpu_kms)
583 {
584 	struct drm_encoder *encoder = NULL;
585 	struct msm_display_info info;
586 	int i, rc = 0;
587 
588 	if (!(priv->kms->dsi[0] || priv->kms->dsi[1]))
589 		return rc;
590 
591 	/*
592 	 * We support following confiurations:
593 	 * - Single DSI host (dsi0 or dsi1)
594 	 * - Two independent DSI hosts
595 	 * - Bonded DSI0 and DSI1 hosts
596 	 *
597 	 * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
598 	 */
599 	for (i = 0; i < ARRAY_SIZE(priv->kms->dsi); i++) {
600 		int other = (i + 1) % 2;
601 
602 		if (!priv->kms->dsi[i])
603 			continue;
604 
605 		if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) &&
606 		    !msm_dsi_is_master_dsi(priv->kms->dsi[i]))
607 			continue;
608 
609 		memset(&info, 0, sizeof(info));
610 		info.intf_type = INTF_DSI;
611 
612 		info.h_tile_instance[info.num_of_h_tiles++] = i;
613 		if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]))
614 			info.h_tile_instance[info.num_of_h_tiles++] = other;
615 
616 		info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->kms->dsi[i]);
617 
618 		rc = dpu_kms_dsi_set_te_source(&info, priv->kms->dsi[i]);
619 		if (rc) {
620 			DPU_ERROR("failed to identify TE source for dsi display\n");
621 			return rc;
622 		}
623 
624 		encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
625 		if (IS_ERR(encoder)) {
626 			DPU_ERROR("encoder init failed for dsi display\n");
627 			return PTR_ERR(encoder);
628 		}
629 
630 		rc = msm_dsi_modeset_init(priv->kms->dsi[i], dev, encoder);
631 		if (rc) {
632 			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
633 				i, rc);
634 			break;
635 		}
636 
637 		if (msm_dsi_is_bonded_dsi(priv->kms->dsi[i]) && priv->kms->dsi[other]) {
638 			rc = msm_dsi_modeset_init(priv->kms->dsi[other], dev, encoder);
639 			if (rc) {
640 				DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
641 					other, rc);
642 				break;
643 			}
644 		}
645 	}
646 
647 	return rc;
648 }
649 
650 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
651 					    struct msm_drm_private *priv,
652 					    struct dpu_kms *dpu_kms)
653 {
654 	struct drm_encoder *encoder = NULL;
655 	struct msm_display_info info;
656 	bool yuv_supported;
657 	int rc;
658 	int i;
659 
660 	for (i = 0; i < ARRAY_SIZE(priv->kms->dp); i++) {
661 		if (!priv->kms->dp[i])
662 			continue;
663 
664 		memset(&info, 0, sizeof(info));
665 		info.num_of_h_tiles = 1;
666 		info.h_tile_instance[0] = i;
667 		info.intf_type = INTF_DP;
668 
669 		encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
670 		if (IS_ERR(encoder)) {
671 			DPU_ERROR("encoder init failed for dsi display\n");
672 			return PTR_ERR(encoder);
673 		}
674 
675 		yuv_supported = !!dpu_kms->catalog->cdm;
676 		rc = msm_dp_modeset_init(priv->kms->dp[i], dev, encoder, yuv_supported);
677 		if (rc) {
678 			DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
679 			return rc;
680 		}
681 	}
682 
683 	return 0;
684 }
685 
686 static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
687 				    struct msm_drm_private *priv,
688 				    struct dpu_kms *dpu_kms)
689 {
690 	struct drm_encoder *encoder = NULL;
691 	struct msm_display_info info;
692 	int rc;
693 
694 	if (!priv->kms->hdmi)
695 		return 0;
696 
697 	memset(&info, 0, sizeof(info));
698 	info.num_of_h_tiles = 1;
699 	info.h_tile_instance[0] = 0;
700 	info.intf_type = INTF_HDMI;
701 
702 	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
703 	if (IS_ERR(encoder)) {
704 		DPU_ERROR("encoder init failed for HDMI display\n");
705 		return PTR_ERR(encoder);
706 	}
707 
708 	rc = msm_hdmi_modeset_init(priv->kms->hdmi, dev, encoder);
709 	if (rc) {
710 		DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
711 		return rc;
712 	}
713 
714 	return 0;
715 }
716 
717 static int _dpu_kms_initialize_writeback(struct drm_device *dev,
718 		struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
719 		const u32 *wb_formats, int n_formats)
720 {
721 	struct drm_encoder *encoder = NULL;
722 	struct msm_display_info info;
723 	const enum dpu_wb wb_idx = WB_2;
724 	u32 maxlinewidth;
725 	int rc;
726 
727 	memset(&info, 0, sizeof(info));
728 
729 	info.num_of_h_tiles = 1;
730 	/* use only WB idx 2 instance for DPU */
731 	info.h_tile_instance[0] = wb_idx;
732 	info.intf_type = INTF_WB;
733 
734 	maxlinewidth = dpu_rm_get_wb(&dpu_kms->rm, info.h_tile_instance[0])->caps->maxlinewidth;
735 
736 	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
737 	if (IS_ERR(encoder)) {
738 		DPU_ERROR("encoder init failed for dsi display\n");
739 		return PTR_ERR(encoder);
740 	}
741 
742 	rc = dpu_writeback_init(dev, encoder, wb_formats, n_formats, maxlinewidth);
743 	if (rc) {
744 		DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
745 		return rc;
746 	}
747 
748 	return 0;
749 }
750 
751 /**
752  * _dpu_kms_setup_displays - create encoders, bridges and connectors
753  *                           for underlying displays
754  * @dev:        Pointer to drm device structure
755  * @priv:       Pointer to private drm device data
756  * @dpu_kms:    Pointer to dpu kms structure
757  * Returns:     Zero on success
758  */
759 static int _dpu_kms_setup_displays(struct drm_device *dev,
760 				    struct msm_drm_private *priv,
761 				    struct dpu_kms *dpu_kms)
762 {
763 	int rc = 0;
764 	int i;
765 
766 	rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
767 	if (rc) {
768 		DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
769 		return rc;
770 	}
771 
772 	rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
773 	if (rc) {
774 		DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
775 		return rc;
776 	}
777 
778 	rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms);
779 	if (rc) {
780 		DPU_ERROR("initialize HDMI failed, rc = %d\n", rc);
781 		return rc;
782 	}
783 
784 	/* Since WB isn't a driver check the catalog before initializing */
785 	if (dpu_kms->catalog->wb_count) {
786 		for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
787 			if (dpu_kms->catalog->wb[i].id == WB_2) {
788 				rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
789 						dpu_kms->catalog->wb[i].format_list,
790 						dpu_kms->catalog->wb[i].num_formats);
791 				if (rc) {
792 					DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
793 					return rc;
794 				}
795 			}
796 		}
797 	}
798 
799 	return rc;
800 }
801 
802 #define MAX_PLANES 20
803 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
804 {
805 	struct drm_device *dev;
806 	struct drm_plane *primary_planes[MAX_PLANES], *plane;
807 	struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
808 	struct drm_crtc *crtc;
809 	struct drm_encoder *encoder;
810 	unsigned int num_encoders;
811 
812 	struct msm_drm_private *priv;
813 	const struct dpu_mdss_cfg *catalog;
814 
815 	int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
816 	int max_crtc_count;
817 	dev = dpu_kms->dev;
818 	priv = dev->dev_private;
819 	catalog = dpu_kms->catalog;
820 
821 	/*
822 	 * Create encoder and query display drivers to create
823 	 * bridges and connectors
824 	 */
825 	ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
826 	if (ret)
827 		return ret;
828 
829 	num_encoders = 0;
830 	drm_for_each_encoder(encoder, dev) {
831 		num_encoders++;
832 		if (catalog->cwb_count > 0)
833 			encoder->possible_clones = dpu_encoder_get_clones(encoder);
834 	}
835 
836 	max_crtc_count = min(catalog->mixer_count, num_encoders);
837 
838 	/* Create the planes, keeping track of one primary/cursor per crtc */
839 	for (i = 0; i < catalog->sspp_count; i++) {
840 		enum drm_plane_type type;
841 
842 		if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
843 			&& cursor_planes_idx < max_crtc_count)
844 			type = DRM_PLANE_TYPE_CURSOR;
845 		else if (primary_planes_idx < max_crtc_count)
846 			type = DRM_PLANE_TYPE_PRIMARY;
847 		else
848 			type = DRM_PLANE_TYPE_OVERLAY;
849 
850 		DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
851 			  type, catalog->sspp[i].features,
852 			  catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
853 
854 		if (dpu_use_virtual_planes)
855 			plane = dpu_plane_init_virtual(dev, type, (1UL << max_crtc_count) - 1);
856 		else
857 			plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
858 					       (1UL << max_crtc_count) - 1);
859 		if (IS_ERR(plane)) {
860 			DPU_ERROR("dpu_plane_init failed\n");
861 			ret = PTR_ERR(plane);
862 			return ret;
863 		}
864 
865 		if (type == DRM_PLANE_TYPE_CURSOR)
866 			cursor_planes[cursor_planes_idx++] = plane;
867 		else if (type == DRM_PLANE_TYPE_PRIMARY)
868 			primary_planes[primary_planes_idx++] = plane;
869 	}
870 
871 	max_crtc_count = min(max_crtc_count, primary_planes_idx);
872 
873 	/* Create one CRTC per encoder */
874 	for (i = 0; i < max_crtc_count; i++) {
875 		crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
876 		if (IS_ERR(crtc)) {
877 			ret = PTR_ERR(crtc);
878 			return ret;
879 		}
880 	}
881 
882 	/* All CRTCs are compatible with all encoders */
883 	drm_for_each_encoder(encoder, dev)
884 		encoder->possible_crtcs = (1 << dev->mode_config.num_crtc) - 1;
885 
886 	return 0;
887 }
888 
889 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
890 {
891 	dpu_kms->hw_intr = NULL;
892 
893 	/* safe to call these more than once during shutdown */
894 	_dpu_kms_mmu_destroy(dpu_kms);
895 
896 	dpu_kms->hw_vbif = NULL;
897 
898 	dpu_kms_global_obj_fini(dpu_kms);
899 
900 	dpu_kms->catalog = NULL;
901 
902 	dpu_kms->hw_mdp = NULL;
903 }
904 
905 static void dpu_kms_destroy(struct msm_kms *kms)
906 {
907 	struct dpu_kms *dpu_kms;
908 
909 	if (!kms) {
910 		DPU_ERROR("invalid kms\n");
911 		return;
912 	}
913 
914 	dpu_kms = to_dpu_kms(kms);
915 
916 	_dpu_kms_hw_destroy(dpu_kms);
917 
918 	msm_kms_destroy(&dpu_kms->base);
919 
920 	if (dpu_kms->rpm_enabled)
921 		pm_runtime_disable(&dpu_kms->pdev->dev);
922 }
923 
924 static int dpu_irq_postinstall(struct msm_kms *kms)
925 {
926 	struct msm_drm_private *priv;
927 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
928 
929 	if (!dpu_kms || !dpu_kms->dev)
930 		return -EINVAL;
931 
932 	priv = dpu_kms->dev->dev_private;
933 	if (!priv)
934 		return -EINVAL;
935 
936 	return 0;
937 }
938 
939 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
940 {
941 	int i;
942 	struct dpu_kms *dpu_kms;
943 	const struct dpu_mdss_cfg *cat;
944 	void __iomem *base;
945 
946 	dpu_kms = to_dpu_kms(kms);
947 
948 	cat = dpu_kms->catalog;
949 
950 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
951 
952 	/* dump CTL sub-blocks HW regs info */
953 	for (i = 0; i < cat->ctl_count; i++)
954 		msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
955 				dpu_kms->mmio + cat->ctl[i].base, "%s",
956 				cat->ctl[i].name);
957 
958 	/* dump DSPP sub-blocks HW regs info */
959 	for (i = 0; i < cat->dspp_count; i++) {
960 		base = dpu_kms->mmio + cat->dspp[i].base;
961 		msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base,
962 					    "%s", cat->dspp[i].name);
963 
964 		if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
965 			msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
966 						    base + cat->dspp[i].sblk->pcc.base, "%s_%s",
967 						    cat->dspp[i].name,
968 						    cat->dspp[i].sblk->pcc.name);
969 	}
970 
971 	/* dump INTF sub-blocks HW regs info */
972 	for (i = 0; i < cat->intf_count; i++)
973 		msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
974 				dpu_kms->mmio + cat->intf[i].base, "%s",
975 				cat->intf[i].name);
976 
977 	/* dump PP sub-blocks HW regs info */
978 	for (i = 0; i < cat->pingpong_count; i++) {
979 		base = dpu_kms->mmio + cat->pingpong[i].base;
980 		msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
981 					    "%s", cat->pingpong[i].name);
982 
983 		/* TE2 sub-block has length of 0, so will not print it */
984 
985 		if (cat->pingpong[i].sblk && cat->pingpong[i].sblk->dither.len > 0)
986 			msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].sblk->dither.len,
987 						    base + cat->pingpong[i].sblk->dither.base,
988 						    "%s_%s", cat->pingpong[i].name,
989 						    cat->pingpong[i].sblk->dither.name);
990 	}
991 
992 	/* dump SSPP sub-blocks HW regs info */
993 	for (i = 0; i < cat->sspp_count; i++) {
994 		base = dpu_kms->mmio + cat->sspp[i].base;
995 		msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base,
996 					    "%s", cat->sspp[i].name);
997 
998 		if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
999 			msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
1000 						    base + cat->sspp[i].sblk->scaler_blk.base,
1001 						    "%s_%s", cat->sspp[i].name,
1002 						    cat->sspp[i].sblk->scaler_blk.name);
1003 
1004 		if (cat->sspp[i].sblk && cat->sspp[i].sblk->csc_blk.len > 0)
1005 			msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->csc_blk.len,
1006 						    base + cat->sspp[i].sblk->csc_blk.base,
1007 						    "%s_%s", cat->sspp[i].name,
1008 						    cat->sspp[i].sblk->csc_blk.name);
1009 	}
1010 
1011 	/* dump LM sub-blocks HW regs info */
1012 	for (i = 0; i < cat->mixer_count; i++)
1013 		msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
1014 				dpu_kms->mmio + cat->mixer[i].base,
1015 				"%s", cat->mixer[i].name);
1016 
1017 	/* dump WB sub-blocks HW regs info */
1018 	for (i = 0; i < cat->wb_count; i++)
1019 		msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
1020 				dpu_kms->mmio + cat->wb[i].base, "%s",
1021 				cat->wb[i].name);
1022 
1023 	if (dpu_kms->catalog->mdss_ver->core_major_ver >= 8) {
1024 		msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
1025 				dpu_kms->mmio + cat->mdp[0].base, "top");
1026 		msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
1027 				dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
1028 	} else {
1029 		msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
1030 				dpu_kms->mmio + cat->mdp[0].base, "top");
1031 	}
1032 
1033 	/* dump CWB sub-blocks HW regs info */
1034 	for (i = 0; i < cat->cwb_count; i++)
1035 		msm_disp_snapshot_add_block(disp_state, cat->cwb[i].len,
1036 					    dpu_kms->mmio + cat->cwb[i].base, cat->cwb[i].name);
1037 
1038 	/* dump DSC sub-blocks HW regs info */
1039 	for (i = 0; i < cat->dsc_count; i++) {
1040 		base = dpu_kms->mmio + cat->dsc[i].base;
1041 		msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
1042 					    "%s", cat->dsc[i].name);
1043 
1044 		if (cat->mdss_ver->core_major_ver >= 7) {
1045 			struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
1046 			struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
1047 
1048 			msm_disp_snapshot_add_block(disp_state, enc.len, base + enc.base, "%s_%s",
1049 						    cat->dsc[i].name, enc.name);
1050 			msm_disp_snapshot_add_block(disp_state, ctl.len, base + ctl.base, "%s_%s",
1051 						    cat->dsc[i].name, ctl.name);
1052 		}
1053 	}
1054 
1055 	if (cat->cdm)
1056 		msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
1057 					    dpu_kms->mmio + cat->cdm->base,
1058 					    "%s", cat->cdm->name);
1059 
1060 	const struct dpu_vbif_cfg *vbif = dpu_kms->catalog->vbif;
1061 
1062 	msm_disp_snapshot_add_block(disp_state, vbif->len,
1063 				    dpu_kms->vbif,
1064 				    "vbif");
1065 
1066 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1067 }
1068 
1069 static const struct msm_kms_funcs kms_funcs = {
1070 	.hw_init         = dpu_kms_hw_init,
1071 	.irq_preinstall  = dpu_core_irq_preinstall,
1072 	.irq_postinstall = dpu_irq_postinstall,
1073 	.irq_uninstall   = dpu_core_irq_uninstall,
1074 	.irq             = dpu_core_irq,
1075 	.enable_commit   = dpu_kms_enable_commit,
1076 	.disable_commit  = dpu_kms_disable_commit,
1077 	.check_mode_changed = dpu_kms_check_mode_changed,
1078 	.flush_commit    = dpu_kms_flush_commit,
1079 	.wait_flush      = dpu_kms_wait_flush,
1080 	.complete_commit = dpu_kms_complete_commit,
1081 	.enable_vblank   = dpu_kms_enable_vblank,
1082 	.disable_vblank  = dpu_kms_disable_vblank,
1083 	.destroy         = dpu_kms_destroy,
1084 	.snapshot        = dpu_kms_mdp_snapshot,
1085 #ifdef CONFIG_DEBUG_FS
1086 	.debugfs_init    = dpu_kms_debugfs_init,
1087 #endif
1088 };
1089 
1090 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
1091 {
1092 	struct msm_mmu *mmu;
1093 
1094 	if (!dpu_kms->base.vm)
1095 		return;
1096 
1097 	mmu = to_msm_vm(dpu_kms->base.vm)->mmu;
1098 
1099 	mmu->funcs->detach(mmu);
1100 	drm_gpuvm_put(dpu_kms->base.vm);
1101 
1102 	dpu_kms->base.vm = NULL;
1103 }
1104 
1105 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
1106 {
1107 	struct drm_gpuvm *vm;
1108 
1109 	vm = msm_kms_init_vm(dpu_kms->dev, dpu_kms->dev->dev->parent);
1110 	if (IS_ERR(vm))
1111 		return PTR_ERR(vm);
1112 
1113 	dpu_kms->base.vm = vm;
1114 
1115 	return 0;
1116 }
1117 
1118 /**
1119  * dpu_kms_get_clk_rate() - get the clock rate
1120  * @dpu_kms:  pointer to dpu_kms structure
1121  * @clock_name: clock name to get the rate
1122  *
1123  * Return: current clock rate
1124  */
1125 unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
1126 {
1127 	struct clk *clk;
1128 
1129 	clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
1130 	if (!clk)
1131 		return 0;
1132 
1133 	return clk_get_rate(clk);
1134 }
1135 
1136 #define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
1137 
1138 static int dpu_kms_hw_init(struct msm_kms *kms)
1139 {
1140 	struct dpu_kms *dpu_kms;
1141 	struct drm_device *dev;
1142 	int rc = -EINVAL;
1143 	unsigned long max_core_clk_rate;
1144 	u32 core_rev;
1145 
1146 	if (!kms) {
1147 		DPU_ERROR("invalid kms\n");
1148 		return rc;
1149 	}
1150 
1151 	dpu_kms = to_dpu_kms(kms);
1152 	dev = dpu_kms->dev;
1153 
1154 	dev->mode_config.cursor_width = 512;
1155 	dev->mode_config.cursor_height = 512;
1156 
1157 	drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
1158 				    &dpu_kms_global_state_funcs);
1159 
1160 	atomic_set(&dpu_kms->bandwidth_ref, 0);
1161 
1162 	rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
1163 	if (rc < 0)
1164 		goto error;
1165 
1166 	core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
1167 
1168 	pr_info("dpu hardware revision:0x%x\n", core_rev);
1169 
1170 	dpu_kms->catalog = of_device_get_match_data(dev->dev);
1171 	if (!dpu_kms->catalog) {
1172 		DPU_ERROR("device config not known!\n");
1173 		rc = -EINVAL;
1174 		goto err_pm_put;
1175 	}
1176 
1177 	/*
1178 	 * Now we need to read the HW catalog and initialize resources such as
1179 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1180 	 */
1181 	rc = _dpu_kms_mmu_init(dpu_kms);
1182 	if (rc) {
1183 		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1184 		goto err_pm_put;
1185 	}
1186 
1187 	dpu_kms->mdss = qcom_ubwc_config_get_data();
1188 	if (IS_ERR(dpu_kms->mdss)) {
1189 		rc = PTR_ERR(dpu_kms->mdss);
1190 		DPU_ERROR("failed to get UBWC config data: %d\n", rc);
1191 		goto err_pm_put;
1192 	}
1193 
1194 	if (!dpu_kms->mdss) {
1195 		rc = -EINVAL;
1196 		DPU_ERROR("NULL MDSS data\n");
1197 		goto err_pm_put;
1198 	}
1199 
1200 	rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
1201 	if (rc) {
1202 		DPU_ERROR("rm init failed: %d\n", rc);
1203 		goto err_pm_put;
1204 	}
1205 
1206 	dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
1207 					     dpu_kms->catalog->mdp,
1208 					     dpu_kms->mmio,
1209 					     dpu_kms->catalog->mdss_ver);
1210 	if (IS_ERR(dpu_kms->hw_mdp)) {
1211 		rc = PTR_ERR(dpu_kms->hw_mdp);
1212 		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1213 		dpu_kms->hw_mdp = NULL;
1214 		goto err_pm_put;
1215 	}
1216 
1217 	struct dpu_hw_vbif *hw;
1218 	const struct dpu_vbif_cfg *vbif = dpu_kms->catalog->vbif;
1219 
1220 	hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif);
1221 	if (IS_ERR(hw)) {
1222 		rc = PTR_ERR(hw);
1223 		DPU_ERROR("failed to init vbif: %d\n", rc);
1224 		goto err_pm_put;
1225 	}
1226 
1227 	dpu_kms->hw_vbif = hw;
1228 
1229 	/* TODO: use the same max_freq as in dpu_kms_hw_init */
1230 	max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core");
1231 	if (!max_core_clk_rate) {
1232 		DPU_DEBUG("max core clk rate not determined, using default\n");
1233 		max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
1234 	}
1235 
1236 	rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
1237 	if (rc) {
1238 		DPU_ERROR("failed to init perf %d\n", rc);
1239 		goto err_pm_put;
1240 	}
1241 
1242 	/*
1243 	 * We need to program DP <-> PHY relationship only for SC8180X since it
1244 	 * has fewer DP controllers than DP PHYs.
1245 	 * If any other platform requires the same kind of programming, or if
1246 	 * the INTF <->DP relationship isn't static anymore, this needs to be
1247 	 * configured through the DT.
1248 	 */
1249 	if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu"))
1250 		dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, });
1251 
1252 	dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
1253 	if (IS_ERR(dpu_kms->hw_intr)) {
1254 		rc = PTR_ERR(dpu_kms->hw_intr);
1255 		DPU_ERROR("hw_intr init failed: %d\n", rc);
1256 		dpu_kms->hw_intr = NULL;
1257 		goto err_pm_put;
1258 	}
1259 
1260 	dev->mode_config.min_width = 0;
1261 	dev->mode_config.min_height = 0;
1262 
1263 	dev->mode_config.max_width = DPU_MAX_IMG_WIDTH;
1264 	dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT;
1265 
1266 	dev->max_vblank_count = 0xffffffff;
1267 	/* Disable vblank irqs aggressively for power-saving */
1268 	dev->vblank_disable_immediate = true;
1269 
1270 	/*
1271 	 * _dpu_kms_drm_obj_init should create the DRM related objects
1272 	 * i.e. CRTCs, planes, encoders, connectors and so forth
1273 	 */
1274 	rc = _dpu_kms_drm_obj_init(dpu_kms);
1275 	if (rc) {
1276 		DPU_ERROR("modeset init failed: %d\n", rc);
1277 		goto err_pm_put;
1278 	}
1279 
1280 	dpu_vbif_init_memtypes(dpu_kms);
1281 
1282 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1283 
1284 	return 0;
1285 
1286 err_pm_put:
1287 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1288 error:
1289 	_dpu_kms_hw_destroy(dpu_kms);
1290 
1291 	return rc;
1292 }
1293 
1294 static int dpu_kms_init(struct drm_device *ddev)
1295 {
1296 	struct msm_drm_private *priv = ddev->dev_private;
1297 	struct device *dev = ddev->dev;
1298 	struct platform_device *pdev = to_platform_device(dev);
1299 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1300 	struct dev_pm_opp *opp;
1301 	int ret = 0;
1302 	unsigned long max_freq = ULONG_MAX;
1303 
1304 	opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1305 	if (!IS_ERR(opp))
1306 		dev_pm_opp_put(opp);
1307 
1308 	dev_pm_opp_set_rate(dev, max_freq);
1309 
1310 	ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1311 	if (ret) {
1312 		DPU_ERROR("failed to init kms, ret=%d\n", ret);
1313 		return ret;
1314 	}
1315 	dpu_kms->dev = ddev;
1316 
1317 	pm_runtime_enable(&pdev->dev);
1318 	dpu_kms->rpm_enabled = true;
1319 
1320 	return 0;
1321 }
1322 
1323 static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
1324 {
1325 	struct platform_device *pdev = dpu_kms->pdev;
1326 	struct platform_device *mdss_dev;
1327 	int ret;
1328 
1329 	if (!dev_is_platform(dpu_kms->pdev->dev.parent))
1330 		return -EINVAL;
1331 
1332 	mdss_dev = to_platform_device(dpu_kms->pdev->dev.parent);
1333 
1334 	dpu_kms->mmio = msm_ioremap(pdev, "mdp_phys");
1335 	if (IS_ERR(dpu_kms->mmio)) {
1336 		ret = PTR_ERR(dpu_kms->mmio);
1337 		DPU_ERROR("mdp register memory map failed: %d\n", ret);
1338 		dpu_kms->mmio = NULL;
1339 		return ret;
1340 	}
1341 	DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
1342 
1343 	dpu_kms->vbif = msm_ioremap_mdss(mdss_dev, dpu_kms->pdev, "vbif_phys");
1344 	if (IS_ERR(dpu_kms->vbif)) {
1345 		ret = PTR_ERR(dpu_kms->vbif);
1346 		DPU_ERROR("vbif register memory map failed: %d\n", ret);
1347 		dpu_kms->vbif = NULL;
1348 		return ret;
1349 	}
1350 
1351 	return 0;
1352 }
1353 
1354 static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
1355 {
1356 	struct platform_device *pdev = dpu_kms->pdev;
1357 	int ret;
1358 
1359 	dpu_kms->mmio = msm_ioremap(pdev, "mdp");
1360 	if (IS_ERR(dpu_kms->mmio)) {
1361 		ret = PTR_ERR(dpu_kms->mmio);
1362 		DPU_ERROR("mdp register memory map failed: %d\n", ret);
1363 		dpu_kms->mmio = NULL;
1364 		return ret;
1365 	}
1366 	DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
1367 
1368 	dpu_kms->vbif = msm_ioremap(pdev, "vbif");
1369 	if (IS_ERR(dpu_kms->vbif)) {
1370 		ret = PTR_ERR(dpu_kms->vbif);
1371 		DPU_ERROR("vbif register memory map failed: %d\n", ret);
1372 		dpu_kms->vbif = NULL;
1373 		return ret;
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 static int dpu_dev_probe(struct platform_device *pdev)
1380 {
1381 	struct device *dev = &pdev->dev;
1382 	struct dpu_kms *dpu_kms;
1383 	int irq;
1384 	int ret = 0;
1385 
1386 	if (!msm_disp_drv_should_bind(&pdev->dev, true))
1387 		return -ENODEV;
1388 
1389 	dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
1390 	if (!dpu_kms)
1391 		return -ENOMEM;
1392 
1393 	dpu_kms->pdev = pdev;
1394 
1395 	ret = devm_pm_opp_set_clkname(dev, "core");
1396 	if (ret)
1397 		return ret;
1398 	/* OPP table is optional */
1399 	ret = devm_pm_opp_of_add_table(dev);
1400 	if (ret && ret != -ENODEV)
1401 		return dev_err_probe(dev, ret, "invalid OPP table in device tree\n");
1402 
1403 	ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1404 	if (ret < 0)
1405 		return dev_err_probe(dev, ret, "failed to parse clocks\n");
1406 
1407 	dpu_kms->num_clocks = ret;
1408 
1409 	irq = platform_get_irq(pdev, 0);
1410 	if (irq < 0)
1411 		return dev_err_probe(dev, irq, "failed to get irq\n");
1412 
1413 	dpu_kms->base.irq = irq;
1414 
1415 	if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,mdp5"))
1416 		ret = dpu_kms_mmap_mdp5(dpu_kms);
1417 	else
1418 		ret = dpu_kms_mmap_dpu(dpu_kms);
1419 	if (ret)
1420 		return ret;
1421 
1422 	ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
1423 	if (ret)
1424 		return ret;
1425 
1426 	return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base);
1427 }
1428 
1429 static void dpu_dev_remove(struct platform_device *pdev)
1430 {
1431 	component_master_del(&pdev->dev, &msm_drm_ops);
1432 }
1433 
1434 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1435 {
1436 	int i;
1437 	struct platform_device *pdev = to_platform_device(dev);
1438 	struct msm_drm_private *priv = platform_get_drvdata(pdev);
1439 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1440 
1441 	clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
1442 
1443 	for (i = 0; i < dpu_kms->num_paths; i++)
1444 		icc_set_bw(dpu_kms->path[i], 0, 0);
1445 
1446 	return 0;
1447 }
1448 
1449 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1450 {
1451 	int rc = -1;
1452 	struct platform_device *pdev = to_platform_device(dev);
1453 	struct msm_drm_private *priv = platform_get_drvdata(pdev);
1454 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1455 	struct drm_encoder *encoder;
1456 	struct drm_device *ddev;
1457 
1458 	ddev = dpu_kms->dev;
1459 
1460 	rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
1461 	if (rc) {
1462 		DPU_ERROR("clock enable failed rc:%d\n", rc);
1463 		return rc;
1464 	}
1465 
1466 	dpu_vbif_init_memtypes(dpu_kms);
1467 
1468 	drm_for_each_encoder(encoder, ddev)
1469 		dpu_encoder_virt_runtime_resume(encoder);
1470 
1471 	return rc;
1472 }
1473 
1474 static const struct dev_pm_ops dpu_pm_ops = {
1475 	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1476 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1477 				pm_runtime_force_resume)
1478 	.prepare = msm_kms_pm_prepare,
1479 	.complete = msm_kms_pm_complete,
1480 };
1481 
1482 static const struct of_device_id dpu_dt_match[] = {
1483 	{ .compatible = "qcom,eliza-dpu", .data = &dpu_eliza_cfg, },
1484 	{ .compatible = "qcom,glymur-dpu", .data = &dpu_glymur_cfg, },
1485 	{ .compatible = "qcom,kaanapali-dpu", .data = &dpu_kaanapali_cfg, },
1486 	{ .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, },
1487 	{ .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, },
1488 	{ .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, },
1489 	{ .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, },
1490 	{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
1491 	{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
1492 	{ .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
1493 	{ .compatible = "qcom,sar2130p-dpu", .data = &dpu_sar2130p_cfg, },
1494 	{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
1495 	{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
1496 	{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
1497 	{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
1498 	{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
1499 	{ .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
1500 	{ .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
1501 	{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
1502 	{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
1503 	{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
1504 	{ .compatible = "qcom,sm6150-dpu", .data = &dpu_sm6150_cfg, },
1505 	{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
1506 	{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
1507 	{ .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },
1508 	{ .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
1509 	{ .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
1510 	{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
1511 	{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
1512 	{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
1513 	{ .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
1514 	{ .compatible = "qcom,sm8750-dpu", .data = &dpu_sm8750_cfg, },
1515 	{ .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
1516 	{}
1517 };
1518 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1519 
1520 static struct platform_driver dpu_driver = {
1521 	.probe = dpu_dev_probe,
1522 	.remove = dpu_dev_remove,
1523 	.shutdown = msm_kms_shutdown,
1524 	.driver = {
1525 		.name = "msm_dpu",
1526 		.of_match_table = dpu_dt_match,
1527 		.pm = &dpu_pm_ops,
1528 	},
1529 };
1530 
1531 void __init msm_dpu_register(void)
1532 {
1533 	platform_driver_register(&dpu_driver);
1534 }
1535 
1536 void __exit msm_dpu_unregister(void)
1537 {
1538 	platform_driver_unregister(&dpu_driver);
1539 }
1540