1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #ifndef __MSM_DRV_H__ 9 #define __MSM_DRV_H__ 10 11 #include <linux/kernel.h> 12 #include <linux/clk.h> 13 #include <linux/cpufreq.h> 14 #include <linux/devfreq.h> 15 #include <linux/module.h> 16 #include <linux/component.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/slab.h> 21 #include <linux/list.h> 22 #include <linux/iommu.h> 23 #include <linux/types.h> 24 #include <linux/of_graph.h> 25 #include <linux/of_device.h> 26 #include <linux/sizes.h> 27 #include <linux/kthread.h> 28 29 #include <drm/drm_atomic.h> 30 #include <drm/drm_atomic_helper.h> 31 #include <drm/drm_print.h> 32 #include <drm/drm_probe_helper.h> 33 #include <drm/display/drm_dsc.h> 34 #include <drm/msm_drm.h> 35 #include <drm/drm_gem.h> 36 37 extern struct fault_attr fail_gem_alloc; 38 extern struct fault_attr fail_gem_iova; 39 40 struct drm_fb_helper; 41 struct drm_fb_helper_surface_size; 42 43 struct msm_kms; 44 struct msm_gpu; 45 struct msm_mmu; 46 struct msm_mdss; 47 struct msm_rd_state; 48 struct msm_perf_state; 49 struct msm_gem_submit; 50 struct msm_fence_context; 51 struct msm_gem_address_space; 52 struct msm_gem_vma; 53 struct msm_disp_state; 54 55 #define MAX_CRTCS 8 56 57 #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) 58 59 enum msm_dp_controller { 60 MSM_DP_CONTROLLER_0, 61 MSM_DP_CONTROLLER_1, 62 MSM_DP_CONTROLLER_2, 63 MSM_DP_CONTROLLER_3, 64 MSM_DP_CONTROLLER_COUNT, 65 }; 66 67 enum msm_dsi_controller { 68 MSM_DSI_CONTROLLER_0, 69 MSM_DSI_CONTROLLER_1, 70 MSM_DSI_CONTROLLER_COUNT, 71 }; 72 73 #define MSM_GPU_MAX_RINGS 4 74 75 /* Commit/Event thread specific structure */ 76 struct msm_drm_thread { 77 struct drm_device *dev; 78 struct kthread_worker *worker; 79 }; 80 81 struct msm_drm_private { 82 83 struct drm_device *dev; 84 85 struct msm_kms *kms; 86 int (*kms_init)(struct drm_device *dev); 87 88 /* subordinate devices, if present: */ 89 struct platform_device *gpu_pdev; 90 91 /* possibly this should be in the kms component, but it is 92 * shared by both mdp4 and mdp5.. 93 */ 94 struct hdmi *hdmi; 95 96 /* DSI is shared by mdp4 and mdp5 */ 97 struct msm_dsi *dsi[MSM_DSI_CONTROLLER_COUNT]; 98 99 struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT]; 100 101 /* when we have more than one 'msm_gpu' these need to be an array: */ 102 struct msm_gpu *gpu; 103 104 /* gpu is only set on open(), but we need this info earlier */ 105 bool is_a2xx; 106 bool has_cached_coherent; 107 108 struct msm_rd_state *rd; /* debugfs to dump all submits */ 109 struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ 110 struct msm_perf_state *perf; 111 112 /** 113 * total_mem: Total/global amount of memory backing GEM objects. 114 */ 115 atomic64_t total_mem; 116 117 /** 118 * List of all GEM objects (mainly for debugfs, protected by obj_lock 119 * (acquire before per GEM object lock) 120 */ 121 struct list_head objects; 122 struct mutex obj_lock; 123 124 /** 125 * lru: 126 * 127 * The various LRU's that a GEM object is in at various stages of 128 * it's lifetime. Objects start out in the unbacked LRU. When 129 * pinned (for scannout or permanently mapped GPU buffers, like 130 * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When 131 * unpinned, it moves into willneed or dontneed LRU depending on 132 * madvise state. When backing pages are evicted (willneed) or 133 * purged (dontneed) it moves back into the unbacked LRU. 134 * 135 * The dontneed LRU is considered by the shrinker for objects 136 * that are candidate for purging, and the willneed LRU is 137 * considered for objects that could be evicted. 138 */ 139 struct { 140 /** 141 * unbacked: 142 * 143 * The LRU for GEM objects without backing pages allocated. 144 * This mostly exists so that objects are always is one 145 * LRU. 146 */ 147 struct drm_gem_lru unbacked; 148 149 /** 150 * pinned: 151 * 152 * The LRU for pinned GEM objects 153 */ 154 struct drm_gem_lru pinned; 155 156 /** 157 * willneed: 158 * 159 * The LRU for unpinned GEM objects which are in madvise 160 * WILLNEED state (ie. can be evicted) 161 */ 162 struct drm_gem_lru willneed; 163 164 /** 165 * dontneed: 166 * 167 * The LRU for unpinned GEM objects which are in madvise 168 * DONTNEED state (ie. can be purged) 169 */ 170 struct drm_gem_lru dontneed; 171 172 /** 173 * lock: 174 * 175 * Protects manipulation of all of the LRUs. 176 */ 177 struct mutex lock; 178 } lru; 179 180 struct workqueue_struct *wq; 181 182 unsigned int num_crtcs; 183 184 struct msm_drm_thread event_thread[MAX_CRTCS]; 185 186 /* VRAM carveout, used when no IOMMU: */ 187 struct { 188 unsigned long size; 189 dma_addr_t paddr; 190 /* NOTE: mm managed at the page level, size is in # of pages 191 * and position mm_node->start is in # of pages: 192 */ 193 struct drm_mm mm; 194 spinlock_t lock; /* Protects drm_mm node allocation/removal */ 195 } vram; 196 197 struct notifier_block vmap_notifier; 198 struct shrinker *shrinker; 199 200 /** 201 * hangcheck_period: For hang detection, in ms 202 * 203 * Note that in practice, a submit/job will get at least two hangcheck 204 * periods, due to checking for progress being implemented as simply 205 * "have the CP position registers changed since last time?" 206 */ 207 unsigned int hangcheck_period; 208 209 /** gpu_devfreq_config: Devfreq tuning config for the GPU. */ 210 struct devfreq_simple_ondemand_data gpu_devfreq_config; 211 212 /** 213 * gpu_clamp_to_idle: Enable clamping to idle freq when inactive 214 */ 215 bool gpu_clamp_to_idle; 216 217 /** 218 * disable_err_irq: 219 * 220 * Disable handling of GPU hw error interrupts, to force fallback to 221 * sw hangcheck timer. Written (via debugfs) by igt tests to test 222 * the sw hangcheck mechanism. 223 */ 224 bool disable_err_irq; 225 }; 226 227 const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); 228 229 struct msm_pending_timer; 230 231 int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, 232 struct msm_kms *kms, int crtc_idx); 233 void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer); 234 void msm_atomic_commit_tail(struct drm_atomic_state *state); 235 int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); 236 struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); 237 238 int msm_crtc_enable_vblank(struct drm_crtc *crtc); 239 void msm_crtc_disable_vblank(struct drm_crtc *crtc); 240 241 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 242 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); 243 244 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev); 245 bool msm_use_mmu(struct drm_device *dev); 246 247 int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 248 struct drm_file *file); 249 250 #ifdef CONFIG_DEBUG_FS 251 unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan); 252 #endif 253 254 int msm_gem_shrinker_init(struct drm_device *dev); 255 void msm_gem_shrinker_cleanup(struct drm_device *dev); 256 257 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); 258 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); 259 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); 260 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 261 struct dma_buf_attachment *attach, struct sg_table *sg); 262 int msm_gem_prime_pin(struct drm_gem_object *obj); 263 void msm_gem_prime_unpin(struct drm_gem_object *obj); 264 265 int msm_framebuffer_prepare(struct drm_framebuffer *fb, 266 struct msm_gem_address_space *aspace, bool needs_dirtyfb); 267 void msm_framebuffer_cleanup(struct drm_framebuffer *fb, 268 struct msm_gem_address_space *aspace, bool needed_dirtyfb); 269 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, 270 struct msm_gem_address_space *aspace, int plane); 271 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 272 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 273 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, 274 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 275 struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, 276 int w, int h, int p, uint32_t format); 277 278 #ifdef CONFIG_DRM_FBDEV_EMULATION 279 int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, 280 struct drm_fb_helper_surface_size *sizes); 281 #define MSM_FBDEV_DRIVER_OPS \ 282 .fbdev_probe = msm_fbdev_driver_fbdev_probe 283 #else 284 #define MSM_FBDEV_DRIVER_OPS \ 285 .fbdev_probe = NULL 286 #endif 287 288 struct hdmi; 289 #ifdef CONFIG_DRM_MSM_HDMI 290 int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, 291 struct drm_encoder *encoder); 292 void __init msm_hdmi_register(void); 293 void __exit msm_hdmi_unregister(void); 294 #else 295 static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, 296 struct drm_encoder *encoder) 297 { 298 return -EINVAL; 299 } 300 static inline void __init msm_hdmi_register(void) {} 301 static inline void __exit msm_hdmi_unregister(void) {} 302 #endif 303 304 struct msm_dsi; 305 #ifdef CONFIG_DRM_MSM_DSI 306 int dsi_dev_attach(struct platform_device *pdev); 307 void dsi_dev_detach(struct platform_device *pdev); 308 void __init msm_dsi_register(void); 309 void __exit msm_dsi_unregister(void); 310 int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, 311 struct drm_encoder *encoder); 312 void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi); 313 bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi); 314 bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi); 315 bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi); 316 bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi); 317 struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi); 318 const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi); 319 #else 320 static inline void __init msm_dsi_register(void) 321 { 322 } 323 static inline void __exit msm_dsi_unregister(void) 324 { 325 } 326 static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, 327 struct drm_device *dev, 328 struct drm_encoder *encoder) 329 { 330 return -EINVAL; 331 } 332 static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi) 333 { 334 } 335 static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) 336 { 337 return false; 338 } 339 static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi) 340 { 341 return false; 342 } 343 static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) 344 { 345 return false; 346 } 347 static inline bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi) 348 { 349 return false; 350 } 351 352 static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) 353 { 354 return NULL; 355 } 356 357 static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi) 358 { 359 return NULL; 360 } 361 #endif 362 363 #ifdef CONFIG_DRM_MSM_DP 364 int __init msm_dp_register(void); 365 void __exit msm_dp_unregister(void); 366 int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, 367 struct drm_encoder *encoder, bool yuv_supported); 368 void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display); 369 bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display, 370 const struct drm_display_mode *mode); 371 bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display, 372 const struct drm_display_mode *mode); 373 bool msm_dp_wide_bus_available(const struct msm_dp *dp_display); 374 375 #else 376 static inline int __init msm_dp_register(void) 377 { 378 return -EINVAL; 379 } 380 static inline void __exit msm_dp_unregister(void) 381 { 382 } 383 static inline int msm_dp_modeset_init(struct msm_dp *dp_display, 384 struct drm_device *dev, 385 struct drm_encoder *encoder, 386 bool yuv_supported) 387 { 388 return -EINVAL; 389 } 390 391 static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display) 392 { 393 } 394 395 static inline bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display, 396 const struct drm_display_mode *mode) 397 { 398 return false; 399 } 400 401 static inline bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display, 402 const struct drm_display_mode *mode) 403 { 404 return false; 405 } 406 407 static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) 408 { 409 return false; 410 } 411 412 #endif 413 414 #ifdef CONFIG_DRM_MSM_MDP4 415 void msm_mdp4_register(void); 416 void msm_mdp4_unregister(void); 417 #else 418 static inline void msm_mdp4_register(void) {} 419 static inline void msm_mdp4_unregister(void) {} 420 #endif 421 422 #ifdef CONFIG_DRM_MSM_MDP5 423 void msm_mdp_register(void); 424 void msm_mdp_unregister(void); 425 #else 426 static inline void msm_mdp_register(void) {} 427 static inline void msm_mdp_unregister(void) {} 428 #endif 429 430 #ifdef CONFIG_DRM_MSM_DPU 431 void msm_dpu_register(void); 432 void msm_dpu_unregister(void); 433 #else 434 static inline void msm_dpu_register(void) {} 435 static inline void msm_dpu_unregister(void) {} 436 #endif 437 438 #ifdef CONFIG_DRM_MSM_MDSS 439 void msm_mdss_register(void); 440 void msm_mdss_unregister(void); 441 #else 442 static inline void msm_mdss_register(void) {} 443 static inline void msm_mdss_unregister(void) {} 444 #endif 445 446 #ifdef CONFIG_DEBUG_FS 447 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); 448 int msm_debugfs_late_init(struct drm_device *dev); 449 int msm_rd_debugfs_init(struct drm_minor *minor); 450 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); 451 __printf(3, 4) 452 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 453 const char *fmt, ...); 454 int msm_perf_debugfs_init(struct drm_minor *minor); 455 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); 456 #else 457 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } 458 __printf(3, 4) 459 static inline void msm_rd_dump_submit(struct msm_rd_state *rd, 460 struct msm_gem_submit *submit, 461 const char *fmt, ...) {} 462 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} 463 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {} 464 #endif 465 466 struct clk *msm_clk_get(struct platform_device *pdev, const char *name); 467 468 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, 469 const char *name); 470 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name); 471 void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name, 472 phys_addr_t *size); 473 void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name); 474 void __iomem *msm_ioremap_mdss(struct platform_device *mdss_pdev, 475 struct platform_device *dev, 476 const char *name); 477 478 struct icc_path *msm_icc_get(struct device *dev, const char *name); 479 480 static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or) 481 { 482 u32 val = readl(addr); 483 484 val &= ~mask; 485 writel(val | or, addr); 486 } 487 488 /** 489 * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work 490 * 491 * @timer: hrtimer to control when the kthread work is triggered 492 * @work: the kthread work 493 * @worker: the kthread worker the work will be scheduled on 494 */ 495 struct msm_hrtimer_work { 496 struct hrtimer timer; 497 struct kthread_work work; 498 struct kthread_worker *worker; 499 }; 500 501 void msm_hrtimer_queue_work(struct msm_hrtimer_work *work, 502 ktime_t wakeup_time, 503 enum hrtimer_mode mode); 504 void msm_hrtimer_work_init(struct msm_hrtimer_work *work, 505 struct kthread_worker *worker, 506 kthread_work_func_t fn, 507 clockid_t clock_id, 508 enum hrtimer_mode mode); 509 510 /* Helper for returning a UABI error with optional logging which can make 511 * it easier for userspace to understand what it is doing wrong. 512 */ 513 #define UERR(err, drm, fmt, ...) \ 514 ({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); }) 515 516 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) 517 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) 518 519 static inline int align_pitch(int width, int bpp) 520 { 521 int bytespp = (bpp + 7) / 8; 522 /* adreno needs pitch aligned to 32 pixels: */ 523 return bytespp * ALIGN(width, 32); 524 } 525 526 /* for the generated headers: */ 527 #define INVALID_IDX(idx) ({BUG(); 0;}) 528 #define fui(x) ({BUG(); 0;}) 529 #define _mesa_float_to_half(x) ({BUG(); 0;}) 530 531 532 #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT) 533 534 /* for conditionally setting boolean flag(s): */ 535 #define COND(bool, val) ((bool) ? (val) : 0) 536 537 static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) 538 { 539 ktime_t now = ktime_get(); 540 s64 remaining_jiffies; 541 542 if (ktime_compare(*timeout, now) < 0) { 543 remaining_jiffies = 0; 544 } else { 545 ktime_t rem = ktime_sub(*timeout, now); 546 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); 547 } 548 549 return clamp(remaining_jiffies, 1LL, (s64)INT_MAX); 550 } 551 552 /* Driver helpers */ 553 554 extern const struct component_master_ops msm_drm_ops; 555 556 int msm_kms_pm_prepare(struct device *dev); 557 void msm_kms_pm_complete(struct device *dev); 558 559 int msm_drv_probe(struct device *dev, 560 int (*kms_init)(struct drm_device *dev), 561 struct msm_kms *kms); 562 void msm_kms_shutdown(struct platform_device *pdev); 563 564 bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver); 565 566 #endif /* __MSM_DRV_H__ */ 567