xref: /linux/drivers/gpu/drm/msm/msm_drv.h (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #ifndef __MSM_DRV_H__
9 #define __MSM_DRV_H__
10 
11 #include <linux/kernel.h>
12 #include <linux/clk.h>
13 #include <linux/cpufreq.h>
14 #include <linux/devfreq.h>
15 #include <linux/module.h>
16 #include <linux/component.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/iommu.h>
23 #include <linux/types.h>
24 #include <linux/of_graph.h>
25 #include <linux/of_device.h>
26 #include <linux/sizes.h>
27 #include <linux/kthread.h>
28 
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_print.h>
32 #include <drm/drm_probe_helper.h>
33 #include <drm/display/drm_dsc.h>
34 #include <drm/msm_drm.h>
35 #include <drm/drm_gem.h>
36 
37 extern struct fault_attr fail_gem_alloc;
38 extern struct fault_attr fail_gem_iova;
39 
40 struct drm_fb_helper;
41 struct drm_fb_helper_surface_size;
42 
43 struct msm_kms;
44 struct msm_gpu;
45 struct msm_mmu;
46 struct msm_mdss;
47 struct msm_rd_state;
48 struct msm_perf_state;
49 struct msm_gem_submit;
50 struct msm_fence_context;
51 struct msm_disp_state;
52 
53 #define MAX_CRTCS      8
54 
55 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
56 
57 enum msm_dp_controller {
58 	MSM_DP_CONTROLLER_0,
59 	MSM_DP_CONTROLLER_1,
60 	MSM_DP_CONTROLLER_2,
61 	MSM_DP_CONTROLLER_3,
62 	MSM_DP_CONTROLLER_COUNT,
63 };
64 
65 enum msm_dsi_controller {
66 	MSM_DSI_CONTROLLER_0,
67 	MSM_DSI_CONTROLLER_1,
68 	MSM_DSI_CONTROLLER_COUNT,
69 };
70 
71 #define MSM_GPU_MAX_RINGS 4
72 
73 struct msm_drm_private {
74 
75 	struct drm_device *dev;
76 
77 	struct msm_kms *kms;
78 	int (*kms_init)(struct drm_device *dev);
79 
80 	/* subordinate devices, if present: */
81 	struct platform_device *gpu_pdev;
82 
83 	/* when we have more than one 'msm_gpu' these need to be an array: */
84 	struct msm_gpu *gpu;
85 
86 	/* gpu is only set on open(), but we need this info earlier */
87 	bool is_a2xx;
88 	bool has_cached_coherent;
89 
90 	struct msm_rd_state *rd;       /* debugfs to dump all submits */
91 	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
92 	struct msm_perf_state *perf;
93 
94 	/**
95 	 * total_mem: Total/global amount of memory backing GEM objects.
96 	 */
97 	atomic64_t total_mem;
98 
99 	/**
100 	 * List of all GEM objects (mainly for debugfs, protected by obj_lock
101 	 * (acquire before per GEM object lock)
102 	 */
103 	struct list_head objects;
104 	struct mutex obj_lock;
105 
106 	/**
107 	 * lru:
108 	 *
109 	 * The various LRU's that a GEM object is in at various stages of
110 	 * it's lifetime.  Objects start out in the unbacked LRU.  When
111 	 * pinned (for scannout or permanently mapped GPU buffers, like
112 	 * ringbuffer, memptr, fw, etc) it moves to the pinned LRU.  When
113 	 * unpinned, it moves into willneed or dontneed LRU depending on
114 	 * madvise state.  When backing pages are evicted (willneed) or
115 	 * purged (dontneed) it moves back into the unbacked LRU.
116 	 *
117 	 * The dontneed LRU is considered by the shrinker for objects
118 	 * that are candidate for purging, and the willneed LRU is
119 	 * considered for objects that could be evicted.
120 	 */
121 	struct {
122 		/**
123 		 * unbacked:
124 		 *
125 		 * The LRU for GEM objects without backing pages allocated.
126 		 * This mostly exists so that objects are always is one
127 		 * LRU.
128 		 */
129 		struct drm_gem_lru unbacked;
130 
131 		/**
132 		 * pinned:
133 		 *
134 		 * The LRU for pinned GEM objects
135 		 */
136 		struct drm_gem_lru pinned;
137 
138 		/**
139 		 * willneed:
140 		 *
141 		 * The LRU for unpinned GEM objects which are in madvise
142 		 * WILLNEED state (ie. can be evicted)
143 		 */
144 		struct drm_gem_lru willneed;
145 
146 		/**
147 		 * dontneed:
148 		 *
149 		 * The LRU for unpinned GEM objects which are in madvise
150 		 * DONTNEED state (ie. can be purged)
151 		 */
152 		struct drm_gem_lru dontneed;
153 
154 		/**
155 		 * lock:
156 		 *
157 		 * Protects manipulation of all of the LRUs.
158 		 */
159 		struct mutex lock;
160 	} lru;
161 
162 	struct notifier_block vmap_notifier;
163 	struct shrinker *shrinker;
164 
165 	/**
166 	 * hangcheck_period: For hang detection, in ms
167 	 *
168 	 * Note that in practice, a submit/job will get at least two hangcheck
169 	 * periods, due to checking for progress being implemented as simply
170 	 * "have the CP position registers changed since last time?"
171 	 */
172 	unsigned int hangcheck_period;
173 
174 	/** gpu_devfreq_config: Devfreq tuning config for the GPU. */
175 	struct devfreq_simple_ondemand_data gpu_devfreq_config;
176 
177 	/**
178 	 * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
179 	 */
180 	bool gpu_clamp_to_idle;
181 
182 	/**
183 	 * disable_err_irq:
184 	 *
185 	 * Disable handling of GPU hw error interrupts, to force fallback to
186 	 * sw hangcheck timer.  Written (via debugfs) by igt tests to test
187 	 * the sw hangcheck mechanism.
188 	 */
189 	bool disable_err_irq;
190 
191 	/**
192 	 * @fault_stall_lock:
193 	 *
194 	 * Serialize changes to stall-on-fault state.
195 	 */
196 	spinlock_t fault_stall_lock;
197 
198 	/**
199 	 * @fault_stall_reenable_time:
200 	 *
201 	 * If stall_enabled is false, when to reenable stall-on-fault.
202 	 * Protected by @fault_stall_lock.
203 	 */
204 	ktime_t stall_reenable_time;
205 
206 	/**
207 	 * @stall_enabled:
208 	 *
209 	 * Whether stall-on-fault is currently enabled. Protected by
210 	 * @fault_stall_lock.
211 	 */
212 	bool stall_enabled;
213 };
214 
215 const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
216 
217 struct msm_pending_timer;
218 
219 int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
220 		struct msm_kms *kms, int crtc_idx);
221 void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
222 void msm_atomic_commit_tail(struct drm_atomic_state *state);
223 int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
224 struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
225 
226 int msm_crtc_enable_vblank(struct drm_crtc *crtc);
227 void msm_crtc_disable_vblank(struct drm_crtc *crtc);
228 
229 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
230 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
231 
232 struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
233 bool msm_use_mmu(struct drm_device *dev);
234 
235 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
236 			 struct drm_file *file);
237 int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
238 		      struct drm_file *file);
239 
240 #ifdef CONFIG_DEBUG_FS
241 unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
242 #endif
243 
244 int msm_gem_shrinker_init(struct drm_device *dev);
245 void msm_gem_shrinker_cleanup(struct drm_device *dev);
246 
247 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
248 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
249 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
250 struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf);
251 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
252 		struct dma_buf_attachment *attach, struct sg_table *sg);
253 struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
254 int msm_gem_prime_pin(struct drm_gem_object *obj);
255 void msm_gem_prime_unpin(struct drm_gem_object *obj);
256 
257 int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb);
258 void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb);
259 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane);
260 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
261 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
262 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
263 		struct drm_file *file, const struct drm_format_info *info,
264 		const struct drm_mode_fb_cmd2 *mode_cmd);
265 struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
266 		int w, int h, int p, uint32_t format);
267 
268 #ifdef CONFIG_DRM_MSM_KMS_FBDEV
269 int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
270 				 struct drm_fb_helper_surface_size *sizes);
271 #define MSM_FBDEV_DRIVER_OPS \
272 	.fbdev_probe = msm_fbdev_driver_fbdev_probe
273 #else
274 #define MSM_FBDEV_DRIVER_OPS \
275 	.fbdev_probe = NULL
276 #endif
277 
278 struct hdmi;
279 #ifdef CONFIG_DRM_MSM_HDMI
280 int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
281 		struct drm_encoder *encoder);
282 void __init msm_hdmi_register(void);
283 void __exit msm_hdmi_unregister(void);
284 #else
msm_hdmi_modeset_init(struct hdmi * hdmi,struct drm_device * dev,struct drm_encoder * encoder)285 static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
286 		struct drm_encoder *encoder)
287 {
288 	return -EINVAL;
289 }
msm_hdmi_register(void)290 static inline void __init msm_hdmi_register(void) {}
msm_hdmi_unregister(void)291 static inline void __exit msm_hdmi_unregister(void) {}
292 #endif
293 
294 struct msm_dsi;
295 #ifdef CONFIG_DRM_MSM_DSI
296 int dsi_dev_attach(struct platform_device *pdev);
297 void dsi_dev_detach(struct platform_device *pdev);
298 void __init msm_dsi_register(void);
299 void __exit msm_dsi_unregister(void);
300 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
301 			 struct drm_encoder *encoder);
302 void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
303 bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
304 bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
305 bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
306 bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi);
307 struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
308 const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi);
309 #else
msm_dsi_register(void)310 static inline void __init msm_dsi_register(void)
311 {
312 }
msm_dsi_unregister(void)313 static inline void __exit msm_dsi_unregister(void)
314 {
315 }
msm_dsi_modeset_init(struct msm_dsi * msm_dsi,struct drm_device * dev,struct drm_encoder * encoder)316 static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
317 				       struct drm_device *dev,
318 				       struct drm_encoder *encoder)
319 {
320 	return -EINVAL;
321 }
msm_dsi_snapshot(struct msm_disp_state * disp_state,struct msm_dsi * msm_dsi)322 static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
323 {
324 }
msm_dsi_is_cmd_mode(struct msm_dsi * msm_dsi)325 static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
326 {
327 	return false;
328 }
msm_dsi_is_bonded_dsi(struct msm_dsi * msm_dsi)329 static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
330 {
331 	return false;
332 }
msm_dsi_is_master_dsi(struct msm_dsi * msm_dsi)333 static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
334 {
335 	return false;
336 }
msm_dsi_wide_bus_enabled(struct msm_dsi * msm_dsi)337 static inline bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
338 {
339 	return false;
340 }
341 
msm_dsi_get_dsc_config(struct msm_dsi * msm_dsi)342 static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
343 {
344 	return NULL;
345 }
346 
msm_dsi_get_te_source(struct msm_dsi * msm_dsi)347 static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
348 {
349 	return NULL;
350 }
351 #endif
352 
353 struct msm_dp;
354 #ifdef CONFIG_DRM_MSM_DP
355 int __init msm_dp_register(void);
356 void __exit msm_dp_unregister(void);
357 int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
358 			 struct drm_encoder *encoder, bool yuv_supported);
359 void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
360 bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
361 			       const struct drm_display_mode *mode);
362 bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
363 			       const struct drm_display_mode *mode);
364 bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
365 
366 #else
msm_dp_register(void)367 static inline int __init msm_dp_register(void)
368 {
369 	return -EINVAL;
370 }
msm_dp_unregister(void)371 static inline void __exit msm_dp_unregister(void)
372 {
373 }
msm_dp_modeset_init(struct msm_dp * dp_display,struct drm_device * dev,struct drm_encoder * encoder,bool yuv_supported)374 static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
375 				       struct drm_device *dev,
376 				       struct drm_encoder *encoder,
377 				       bool yuv_supported)
378 {
379 	return -EINVAL;
380 }
381 
msm_dp_snapshot(struct msm_disp_state * disp_state,struct msm_dp * dp_display)382 static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
383 {
384 }
385 
msm_dp_is_yuv_420_enabled(const struct msm_dp * dp_display,const struct drm_display_mode * mode)386 static inline bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
387 					     const struct drm_display_mode *mode)
388 {
389 	return false;
390 }
391 
msm_dp_needs_periph_flush(const struct msm_dp * dp_display,const struct drm_display_mode * mode)392 static inline bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
393 					     const struct drm_display_mode *mode)
394 {
395 	return false;
396 }
397 
msm_dp_wide_bus_available(const struct msm_dp * dp_display)398 static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
399 {
400 	return false;
401 }
402 
403 #endif
404 
405 #ifdef CONFIG_DRM_MSM_MDP4
406 void msm_mdp4_register(void);
407 void msm_mdp4_unregister(void);
408 #else
msm_mdp4_register(void)409 static inline void msm_mdp4_register(void) {}
msm_mdp4_unregister(void)410 static inline void msm_mdp4_unregister(void) {}
411 #endif
412 
413 #ifdef CONFIG_DRM_MSM_MDP5
414 void msm_mdp_register(void);
415 void msm_mdp_unregister(void);
416 #else
msm_mdp_register(void)417 static inline void msm_mdp_register(void) {}
msm_mdp_unregister(void)418 static inline void msm_mdp_unregister(void) {}
419 #endif
420 
421 #ifdef CONFIG_DRM_MSM_DPU
422 void msm_dpu_register(void);
423 void msm_dpu_unregister(void);
424 #else
msm_dpu_register(void)425 static inline void msm_dpu_register(void) {}
msm_dpu_unregister(void)426 static inline void msm_dpu_unregister(void) {}
427 #endif
428 
429 #ifdef CONFIG_DRM_MSM_MDSS
430 void msm_mdss_register(void);
431 void msm_mdss_unregister(void);
432 #else
msm_mdss_register(void)433 static inline void msm_mdss_register(void) {}
msm_mdss_unregister(void)434 static inline void msm_mdss_unregister(void) {}
435 #endif
436 
437 #ifdef CONFIG_DEBUG_FS
438 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
439 int msm_debugfs_late_init(struct drm_device *dev);
440 int msm_rd_debugfs_init(struct drm_minor *minor);
441 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
442 __printf(3, 4)
443 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
444 		const char *fmt, ...);
445 int msm_perf_debugfs_init(struct drm_minor *minor);
446 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
447 #else
msm_debugfs_late_init(struct drm_device * dev)448 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
449 __printf(3, 4)
msm_rd_dump_submit(struct msm_rd_state * rd,struct msm_gem_submit * submit,const char * fmt,...)450 static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
451 			struct msm_gem_submit *submit,
452 			const char *fmt, ...) {}
msm_rd_debugfs_cleanup(struct msm_drm_private * priv)453 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
msm_perf_debugfs_cleanup(struct msm_drm_private * priv)454 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
455 #endif
456 
457 struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
458 
459 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
460 	const char *name);
461 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
462 void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
463 		phys_addr_t *size);
464 void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
465 void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev,
466 			       struct platform_device *dev,
467 			       const char *name);
468 
469 struct icc_path *msm_icc_get(struct device *dev, const char *name);
470 
msm_rmw(void __iomem * addr,u32 mask,u32 or)471 static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
472 {
473 	u32 val = readl(addr);
474 
475 	val &= ~mask;
476 	writel(val | or, addr);
477 }
478 
479 /**
480  * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
481  *
482  * @timer: hrtimer to control when the kthread work is triggered
483  * @work:  the kthread work
484  * @worker: the kthread worker the work will be scheduled on
485  */
486 struct msm_hrtimer_work {
487 	struct hrtimer timer;
488 	struct kthread_work work;
489 	struct kthread_worker *worker;
490 };
491 
492 void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
493 			    ktime_t wakeup_time,
494 			    enum hrtimer_mode mode);
495 void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
496 			   struct kthread_worker *worker,
497 			   kthread_work_func_t fn,
498 			   clockid_t clock_id,
499 			   enum hrtimer_mode mode);
500 
501 /* Helper for returning a UABI error with optional logging which can make
502  * it easier for userspace to understand what it is doing wrong.
503  */
504 #define UERR(err, drm, fmt, ...) \
505 	({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
506 
507 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
508 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
509 
align_pitch(int width,int bpp)510 static inline int align_pitch(int width, int bpp)
511 {
512 	int bytespp = (bpp + 7) / 8;
513 	/* adreno needs pitch aligned to 32 pixels: */
514 	return bytespp * ALIGN(width, 32);
515 }
516 
517 /* for the generated headers: */
518 #define INVALID_IDX(idx) ({BUG(); 0;})
519 #define fui(x)                ({BUG(); 0;})
520 #define _mesa_float_to_half(x) ({BUG(); 0;})
521 
522 
523 #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
524 
525 /* for conditionally setting boolean flag(s): */
526 #define COND(bool, val) ((bool) ? (val) : 0)
527 
timeout_to_jiffies(const ktime_t * timeout)528 static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
529 {
530 	ktime_t now = ktime_get();
531 
532 	if (ktime_compare(*timeout, now) <= 0)
533 		return 0;
534 
535 	ktime_t rem = ktime_sub(*timeout, now);
536 	s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
537 	return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
538 }
539 
540 /* Driver helpers */
541 
542 extern const struct component_master_ops msm_drm_ops;
543 
544 int msm_kms_pm_prepare(struct device *dev);
545 void msm_kms_pm_complete(struct device *dev);
546 
547 int msm_gpu_probe(struct platform_device *pdev,
548 		  const struct component_ops *ops);
549 void msm_gpu_remove(struct platform_device *pdev,
550 		    const struct component_ops *ops);
551 int msm_drv_probe(struct device *dev,
552 	int (*kms_init)(struct drm_device *dev),
553 	struct msm_kms *kms);
554 void msm_kms_shutdown(struct platform_device *pdev);
555 
556 bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
557 
558 bool msm_gpu_no_components(void);
559 
560 #endif /* __MSM_DRV_H__ */
561