xref: /linux/drivers/gpu/drm/msm/msm_drv.h (revision 504f9bdd3a1588604b0452bfe927ff86e5f6e6df)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #ifndef __MSM_DRV_H__
9 #define __MSM_DRV_H__
10 
11 #include <linux/kernel.h>
12 #include <linux/clk.h>
13 #include <linux/cpufreq.h>
14 #include <linux/devfreq.h>
15 #include <linux/module.h>
16 #include <linux/component.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/iommu.h>
23 #include <linux/types.h>
24 #include <linux/of_graph.h>
25 #include <linux/of_device.h>
26 #include <linux/sizes.h>
27 #include <linux/kthread.h>
28 
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_print.h>
32 #include <drm/drm_probe_helper.h>
33 #include <drm/display/drm_dsc.h>
34 #include <drm/msm_drm.h>
35 #include <drm/drm_gem.h>
36 
37 extern struct fault_attr fail_gem_alloc;
38 extern struct fault_attr fail_gem_iova;
39 
40 struct drm_fb_helper;
41 struct drm_fb_helper_surface_size;
42 
43 struct msm_kms;
44 struct msm_gpu;
45 struct msm_mmu;
46 struct msm_mdss;
47 struct msm_rd_state;
48 struct msm_perf_state;
49 struct msm_gem_submit;
50 struct msm_fence_context;
51 struct msm_disp_state;
52 
53 #define MAX_CRTCS      8
54 
55 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
56 
57 enum msm_dp_controller {
58 	MSM_DP_CONTROLLER_0,
59 	MSM_DP_CONTROLLER_1,
60 	MSM_DP_CONTROLLER_2,
61 	MSM_DP_CONTROLLER_3,
62 	MSM_DP_CONTROLLER_COUNT,
63 };
64 
65 enum msm_dsi_controller {
66 	MSM_DSI_CONTROLLER_0,
67 	MSM_DSI_CONTROLLER_1,
68 	MSM_DSI_CONTROLLER_COUNT,
69 };
70 
71 #define MSM_GPU_MAX_RINGS 4
72 
73 struct msm_drm_private {
74 
75 	struct drm_device *dev;
76 
77 	struct msm_kms *kms;
78 	int (*kms_init)(struct drm_device *dev);
79 
80 	/* subordinate devices, if present: */
81 	struct platform_device *gpu_pdev;
82 
83 	/* when we have more than one 'msm_gpu' these need to be an array: */
84 	struct msm_gpu *gpu;
85 
86 	/* gpu is only set on open(), but we need this info earlier */
87 	bool is_a2xx;
88 	bool has_cached_coherent;
89 
90 	struct msm_rd_state *rd;       /* debugfs to dump all submits */
91 	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
92 	struct msm_perf_state *perf;
93 
94 	/**
95 	 * total_mem: Total/global amount of memory backing GEM objects.
96 	 */
97 	atomic64_t total_mem;
98 
99 	/**
100 	 * List of all GEM objects (mainly for debugfs, protected by obj_lock
101 	 * (acquire before per GEM object lock)
102 	 */
103 	struct list_head objects;
104 	struct mutex obj_lock;
105 
106 	/**
107 	 * lru:
108 	 *
109 	 * The various LRU's that a GEM object is in at various stages of
110 	 * it's lifetime.  Objects start out in the unbacked LRU.  When
111 	 * pinned (for scannout or permanently mapped GPU buffers, like
112 	 * ringbuffer, memptr, fw, etc) it moves to the pinned LRU.  When
113 	 * unpinned, it moves into willneed or dontneed LRU depending on
114 	 * madvise state.  When backing pages are evicted (willneed) or
115 	 * purged (dontneed) it moves back into the unbacked LRU.
116 	 *
117 	 * The dontneed LRU is considered by the shrinker for objects
118 	 * that are candidate for purging, and the willneed LRU is
119 	 * considered for objects that could be evicted.
120 	 */
121 	struct {
122 		/**
123 		 * unbacked:
124 		 *
125 		 * The LRU for GEM objects without backing pages allocated.
126 		 * This mostly exists so that objects are always is one
127 		 * LRU.
128 		 */
129 		struct drm_gem_lru unbacked;
130 
131 		/**
132 		 * pinned:
133 		 *
134 		 * The LRU for pinned GEM objects
135 		 */
136 		struct drm_gem_lru pinned;
137 
138 		/**
139 		 * willneed:
140 		 *
141 		 * The LRU for unpinned GEM objects which are in madvise
142 		 * WILLNEED state (ie. can be evicted)
143 		 */
144 		struct drm_gem_lru willneed;
145 
146 		/**
147 		 * dontneed:
148 		 *
149 		 * The LRU for unpinned GEM objects which are in madvise
150 		 * DONTNEED state (ie. can be purged)
151 		 */
152 		struct drm_gem_lru dontneed;
153 
154 		/**
155 		 * lock:
156 		 *
157 		 * Protects manipulation of all of the LRUs.
158 		 */
159 		struct mutex lock;
160 	} lru;
161 
162 	struct notifier_block vmap_notifier;
163 	struct shrinker *shrinker;
164 
165 	/**
166 	 * hangcheck_period: For hang detection, in ms
167 	 *
168 	 * Note that in practice, a submit/job will get at least two hangcheck
169 	 * periods, due to checking for progress being implemented as simply
170 	 * "have the CP position registers changed since last time?"
171 	 */
172 	unsigned int hangcheck_period;
173 
174 	/** gpu_devfreq_config: Devfreq tuning config for the GPU. */
175 	struct devfreq_simple_ondemand_data gpu_devfreq_config;
176 
177 	/**
178 	 * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
179 	 */
180 	bool gpu_clamp_to_idle;
181 
182 	/**
183 	 * disable_err_irq:
184 	 *
185 	 * Disable handling of GPU hw error interrupts, to force fallback to
186 	 * sw hangcheck timer.  Written (via debugfs) by igt tests to test
187 	 * the sw hangcheck mechanism.
188 	 */
189 	bool disable_err_irq;
190 
191 	/**
192 	 * @fault_stall_lock:
193 	 *
194 	 * Serialize changes to stall-on-fault state.
195 	 */
196 	spinlock_t fault_stall_lock;
197 
198 	/**
199 	 * @fault_stall_reenable_time:
200 	 *
201 	 * If stall_enabled is false, when to reenable stall-on-fault.
202 	 * Protected by @fault_stall_lock.
203 	 */
204 	ktime_t stall_reenable_time;
205 
206 	/**
207 	 * @stall_enabled:
208 	 *
209 	 * Whether stall-on-fault is currently enabled. Protected by
210 	 * @fault_stall_lock.
211 	 */
212 	bool stall_enabled;
213 };
214 
215 const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
216 
217 struct msm_pending_timer;
218 
219 int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
220 		struct msm_kms *kms, int crtc_idx);
221 void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
222 void msm_atomic_commit_tail(struct drm_atomic_state *state);
223 int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
224 struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
225 
226 int msm_crtc_enable_vblank(struct drm_crtc *crtc);
227 void msm_crtc_disable_vblank(struct drm_crtc *crtc);
228 
229 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
230 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
231 
232 struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
233 bool msm_use_mmu(struct drm_device *dev);
234 
235 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
236 			 struct drm_file *file);
237 int msm_ioctl_vm_bind(struct drm_device *dev, void *data,
238 		      struct drm_file *file);
239 
240 #ifdef CONFIG_DEBUG_FS
241 unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
242 #endif
243 
244 int msm_gem_shrinker_init(struct drm_device *dev);
245 void msm_gem_shrinker_cleanup(struct drm_device *dev);
246 
247 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
248 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
249 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
250 struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf);
251 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
252 		struct dma_buf_attachment *attach, struct sg_table *sg);
253 struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags);
254 int msm_gem_prime_pin(struct drm_gem_object *obj);
255 void msm_gem_prime_unpin(struct drm_gem_object *obj);
256 
257 int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb);
258 void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb);
259 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane);
260 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
261 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
262 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
263 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
264 struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
265 		int w, int h, int p, uint32_t format);
266 
267 #ifdef CONFIG_DRM_MSM_KMS_FBDEV
268 int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
269 				 struct drm_fb_helper_surface_size *sizes);
270 #define MSM_FBDEV_DRIVER_OPS \
271 	.fbdev_probe = msm_fbdev_driver_fbdev_probe
272 #else
273 #define MSM_FBDEV_DRIVER_OPS \
274 	.fbdev_probe = NULL
275 #endif
276 
277 struct hdmi;
278 #ifdef CONFIG_DRM_MSM_HDMI
279 int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
280 		struct drm_encoder *encoder);
281 void __init msm_hdmi_register(void);
282 void __exit msm_hdmi_unregister(void);
283 #else
284 static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
285 		struct drm_encoder *encoder)
286 {
287 	return -EINVAL;
288 }
289 static inline void __init msm_hdmi_register(void) {}
290 static inline void __exit msm_hdmi_unregister(void) {}
291 #endif
292 
293 struct msm_dsi;
294 #ifdef CONFIG_DRM_MSM_DSI
295 int dsi_dev_attach(struct platform_device *pdev);
296 void dsi_dev_detach(struct platform_device *pdev);
297 void __init msm_dsi_register(void);
298 void __exit msm_dsi_unregister(void);
299 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
300 			 struct drm_encoder *encoder);
301 void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
302 bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
303 bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
304 bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
305 bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi);
306 struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
307 const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi);
308 #else
309 static inline void __init msm_dsi_register(void)
310 {
311 }
312 static inline void __exit msm_dsi_unregister(void)
313 {
314 }
315 static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
316 				       struct drm_device *dev,
317 				       struct drm_encoder *encoder)
318 {
319 	return -EINVAL;
320 }
321 static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
322 {
323 }
324 static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
325 {
326 	return false;
327 }
328 static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
329 {
330 	return false;
331 }
332 static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
333 {
334 	return false;
335 }
336 static inline bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
337 {
338 	return false;
339 }
340 
341 static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
342 {
343 	return NULL;
344 }
345 
346 static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
347 {
348 	return NULL;
349 }
350 #endif
351 
352 struct msm_dp;
353 #ifdef CONFIG_DRM_MSM_DP
354 int __init msm_dp_register(void);
355 void __exit msm_dp_unregister(void);
356 int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
357 			 struct drm_encoder *encoder, bool yuv_supported);
358 void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
359 bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
360 			       const struct drm_display_mode *mode);
361 bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
362 			       const struct drm_display_mode *mode);
363 bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
364 
365 #else
366 static inline int __init msm_dp_register(void)
367 {
368 	return -EINVAL;
369 }
370 static inline void __exit msm_dp_unregister(void)
371 {
372 }
373 static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
374 				       struct drm_device *dev,
375 				       struct drm_encoder *encoder,
376 				       bool yuv_supported)
377 {
378 	return -EINVAL;
379 }
380 
381 static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
382 {
383 }
384 
385 static inline bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
386 					     const struct drm_display_mode *mode)
387 {
388 	return false;
389 }
390 
391 static inline bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
392 					     const struct drm_display_mode *mode)
393 {
394 	return false;
395 }
396 
397 static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
398 {
399 	return false;
400 }
401 
402 #endif
403 
404 #ifdef CONFIG_DRM_MSM_MDP4
405 void msm_mdp4_register(void);
406 void msm_mdp4_unregister(void);
407 #else
408 static inline void msm_mdp4_register(void) {}
409 static inline void msm_mdp4_unregister(void) {}
410 #endif
411 
412 #ifdef CONFIG_DRM_MSM_MDP5
413 void msm_mdp_register(void);
414 void msm_mdp_unregister(void);
415 #else
416 static inline void msm_mdp_register(void) {}
417 static inline void msm_mdp_unregister(void) {}
418 #endif
419 
420 #ifdef CONFIG_DRM_MSM_DPU
421 void msm_dpu_register(void);
422 void msm_dpu_unregister(void);
423 #else
424 static inline void msm_dpu_register(void) {}
425 static inline void msm_dpu_unregister(void) {}
426 #endif
427 
428 #ifdef CONFIG_DRM_MSM_MDSS
429 void msm_mdss_register(void);
430 void msm_mdss_unregister(void);
431 #else
432 static inline void msm_mdss_register(void) {}
433 static inline void msm_mdss_unregister(void) {}
434 #endif
435 
436 #ifdef CONFIG_DEBUG_FS
437 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
438 int msm_debugfs_late_init(struct drm_device *dev);
439 int msm_rd_debugfs_init(struct drm_minor *minor);
440 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
441 __printf(3, 4)
442 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
443 		const char *fmt, ...);
444 int msm_perf_debugfs_init(struct drm_minor *minor);
445 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
446 #else
447 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
448 __printf(3, 4)
449 static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
450 			struct msm_gem_submit *submit,
451 			const char *fmt, ...) {}
452 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
453 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
454 #endif
455 
456 struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
457 
458 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
459 	const char *name);
460 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
461 void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
462 		phys_addr_t *size);
463 void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
464 void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev,
465 			       struct platform_device *dev,
466 			       const char *name);
467 
468 struct icc_path *msm_icc_get(struct device *dev, const char *name);
469 
470 static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
471 {
472 	u32 val = readl(addr);
473 
474 	val &= ~mask;
475 	writel(val | or, addr);
476 }
477 
478 /**
479  * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
480  *
481  * @timer: hrtimer to control when the kthread work is triggered
482  * @work:  the kthread work
483  * @worker: the kthread worker the work will be scheduled on
484  */
485 struct msm_hrtimer_work {
486 	struct hrtimer timer;
487 	struct kthread_work work;
488 	struct kthread_worker *worker;
489 };
490 
491 void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
492 			    ktime_t wakeup_time,
493 			    enum hrtimer_mode mode);
494 void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
495 			   struct kthread_worker *worker,
496 			   kthread_work_func_t fn,
497 			   clockid_t clock_id,
498 			   enum hrtimer_mode mode);
499 
500 /* Helper for returning a UABI error with optional logging which can make
501  * it easier for userspace to understand what it is doing wrong.
502  */
503 #define UERR(err, drm, fmt, ...) \
504 	({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
505 
506 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
507 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
508 
509 static inline int align_pitch(int width, int bpp)
510 {
511 	int bytespp = (bpp + 7) / 8;
512 	/* adreno needs pitch aligned to 32 pixels: */
513 	return bytespp * ALIGN(width, 32);
514 }
515 
516 /* for the generated headers: */
517 #define INVALID_IDX(idx) ({BUG(); 0;})
518 #define fui(x)                ({BUG(); 0;})
519 #define _mesa_float_to_half(x) ({BUG(); 0;})
520 
521 
522 #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
523 
524 /* for conditionally setting boolean flag(s): */
525 #define COND(bool, val) ((bool) ? (val) : 0)
526 
527 static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
528 {
529 	ktime_t now = ktime_get();
530 
531 	if (ktime_compare(*timeout, now) <= 0)
532 		return 0;
533 
534 	ktime_t rem = ktime_sub(*timeout, now);
535 	s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
536 	return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
537 }
538 
539 /* Driver helpers */
540 
541 extern const struct component_master_ops msm_drm_ops;
542 
543 int msm_kms_pm_prepare(struct device *dev);
544 void msm_kms_pm_complete(struct device *dev);
545 
546 int msm_gpu_probe(struct platform_device *pdev,
547 		  const struct component_ops *ops);
548 void msm_gpu_remove(struct platform_device *pdev,
549 		    const struct component_ops *ops);
550 int msm_drv_probe(struct device *dev,
551 	int (*kms_init)(struct drm_device *dev),
552 	struct msm_kms *kms);
553 void msm_kms_shutdown(struct platform_device *pdev);
554 
555 bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver);
556 
557 bool msm_gpu_no_components(void);
558 
559 #endif /* __MSM_DRV_H__ */
560