1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 2c8b75bcaSEric Anholt /* 3c8b75bcaSEric Anholt * Copyright (C) 2015 Broadcom 4c8b75bcaSEric Anholt */ 56a88752cSMaxime Ripard #ifndef _VC4_DRV_H_ 66a88752cSMaxime Ripard #define _VC4_DRV_H_ 7c8b75bcaSEric Anholt 8fd6d6d80SSam Ravnborg #include <linux/delay.h> 973289afeSVille Syrjälä #include <linux/of.h> 10fd6d6d80SSam Ravnborg #include <linux/refcount.h> 11fd6d6d80SSam Ravnborg #include <linux/uaccess.h> 12fd6d6d80SSam Ravnborg 13fd6d6d80SSam Ravnborg #include <drm/drm_atomic.h> 14fd6d6d80SSam Ravnborg #include <drm/drm_debugfs.h> 15fd6d6d80SSam Ravnborg #include <drm/drm_device.h> 169338203cSLaurent Pinchart #include <drm/drm_encoder.h> 174a83c26aSDanilo Krummrich #include <drm/drm_gem_dma_helper.h> 181c80be48SMaxime Ripard #include <drm/drm_managed.h> 19fd6d6d80SSam Ravnborg #include <drm/drm_mm.h> 20fd6d6d80SSam Ravnborg #include <drm/drm_modeset_lock.h> 219338203cSLaurent Pinchart 2265101d8cSBoris Brezillon #include "uapi/drm/vc4_drm.h" 2365101d8cSBoris Brezillon 24fd6d6d80SSam Ravnborg struct drm_device; 25fd6d6d80SSam Ravnborg struct drm_gem_object; 26fd6d6d80SSam Ravnborg 27*f759f5b5SMaxime Ripard extern const struct drm_driver vc4_drm_driver; 28*f759f5b5SMaxime Ripard extern const struct drm_driver vc5_drm_driver; 29*f759f5b5SMaxime Ripard 30f3099462SEric Anholt /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 31f3099462SEric Anholt * this. 32f3099462SEric Anholt */ 33f3099462SEric Anholt enum vc4_kernel_bo_type { 34f3099462SEric Anholt /* Any kernel allocation (gem_create_object hook) before it 35f3099462SEric Anholt * gets another type set. 36f3099462SEric Anholt */ 37f3099462SEric Anholt VC4_BO_TYPE_KERNEL, 38f3099462SEric Anholt VC4_BO_TYPE_V3D, 39f3099462SEric Anholt VC4_BO_TYPE_V3D_SHADER, 40f3099462SEric Anholt VC4_BO_TYPE_DUMB, 41f3099462SEric Anholt VC4_BO_TYPE_BIN, 42f3099462SEric Anholt VC4_BO_TYPE_RCL, 43f3099462SEric Anholt VC4_BO_TYPE_BCL, 44f3099462SEric Anholt VC4_BO_TYPE_KERNEL_CACHE, 45f3099462SEric Anholt VC4_BO_TYPE_COUNT 46f3099462SEric Anholt }; 47f3099462SEric Anholt 4865101d8cSBoris Brezillon /* Performance monitor object. The perform lifetime is controlled by userspace 4965101d8cSBoris Brezillon * using perfmon related ioctls. A perfmon can be attached to a submit_cl 5065101d8cSBoris Brezillon * request, and when this is the case, HW perf counters will be activated just 5165101d8cSBoris Brezillon * before the submit_cl is submitted to the GPU and disabled when the job is 5265101d8cSBoris Brezillon * done. This way, only events related to a specific job will be counted. 5365101d8cSBoris Brezillon */ 5465101d8cSBoris Brezillon struct vc4_perfmon { 5530f8c74cSMaxime Ripard struct vc4_dev *dev; 5630f8c74cSMaxime Ripard 5765101d8cSBoris Brezillon /* Tracks the number of users of the perfmon, when this counter reaches 5865101d8cSBoris Brezillon * zero the perfmon is destroyed. 5965101d8cSBoris Brezillon */ 6065101d8cSBoris Brezillon refcount_t refcnt; 6165101d8cSBoris Brezillon 6265101d8cSBoris Brezillon /* Number of counters activated in this perfmon instance 6365101d8cSBoris Brezillon * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 6465101d8cSBoris Brezillon */ 6565101d8cSBoris Brezillon u8 ncounters; 6665101d8cSBoris Brezillon 6765101d8cSBoris Brezillon /* Events counted by the HW perf counters. */ 6865101d8cSBoris Brezillon u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 6965101d8cSBoris Brezillon 7065101d8cSBoris Brezillon /* Storage for counter values. Counters are incremented by the HW 7165101d8cSBoris Brezillon * perf counter values every time the perfmon is attached to a GPU job. 7265101d8cSBoris Brezillon * This way, perfmon users don't have to retrieve the results after 7365101d8cSBoris Brezillon * each job if they want to track events covering several submissions. 7465101d8cSBoris Brezillon * Note that counter values can't be reset, but you can fake a reset by 7565101d8cSBoris Brezillon * destroying the perfmon and creating a new one. 7665101d8cSBoris Brezillon */ 775b2adbddSGustavo A. R. Silva u64 counters[]; 7865101d8cSBoris Brezillon }; 7965101d8cSBoris Brezillon 80c8b75bcaSEric Anholt struct vc4_dev { 8184d7d472SMaxime Ripard struct drm_device base; 826cf61bf4SMaxime Ripard struct device *dev; 83c8b75bcaSEric Anholt 841cbc91ebSMaxime Ripard bool is_vc5; 851cbc91ebSMaxime Ripard 865226711eSThomas Zimmermann unsigned int irq; 875226711eSThomas Zimmermann 88c8b75bcaSEric Anholt struct vc4_hvs *hvs; 89d3f5168aSEric Anholt struct vc4_v3d *v3d; 9048666d56SDerek Foreman 9121461365SEric Anholt struct vc4_hang_state *hang_state; 9221461365SEric Anholt 93c826a6e1SEric Anholt /* The kernel-space BO cache. Tracks buffers that have been 94c826a6e1SEric Anholt * unreferenced by all other users (refcounts of 0!) but not 95c826a6e1SEric Anholt * yet freed, so we can do cheap allocations. 96c826a6e1SEric Anholt */ 97c826a6e1SEric Anholt struct vc4_bo_cache { 98c826a6e1SEric Anholt /* Array of list heads for entries in the BO cache, 99c826a6e1SEric Anholt * based on number of pages, so we can do O(1) lookups 100c826a6e1SEric Anholt * in the cache when allocating. 101c826a6e1SEric Anholt */ 102c826a6e1SEric Anholt struct list_head *size_list; 103c826a6e1SEric Anholt uint32_t size_list_size; 104c826a6e1SEric Anholt 105c826a6e1SEric Anholt /* List of all BOs in the cache, ordered by age, so we 106c826a6e1SEric Anholt * can do O(1) lookups when trying to free old 107c826a6e1SEric Anholt * buffers. 108c826a6e1SEric Anholt */ 109c826a6e1SEric Anholt struct list_head time_list; 110c826a6e1SEric Anholt struct work_struct time_work; 111c826a6e1SEric Anholt struct timer_list time_timer; 112c826a6e1SEric Anholt } bo_cache; 113c826a6e1SEric Anholt 114f3099462SEric Anholt u32 num_labels; 115f3099462SEric Anholt struct vc4_label { 116f3099462SEric Anholt const char *name; 117c826a6e1SEric Anholt u32 num_allocated; 118c826a6e1SEric Anholt u32 size_allocated; 119f3099462SEric Anholt } *bo_labels; 120c826a6e1SEric Anholt 121f3099462SEric Anholt /* Protects bo_cache and bo_labels. */ 122c826a6e1SEric Anholt struct mutex bo_lock; 123d5b1a78aSEric Anholt 124b9f19259SBoris Brezillon /* Purgeable BO pool. All BOs in this pool can have their memory 125b9f19259SBoris Brezillon * reclaimed if the driver is unable to allocate new BOs. We also 126b9f19259SBoris Brezillon * keep stats related to the purge mechanism here. 127b9f19259SBoris Brezillon */ 128b9f19259SBoris Brezillon struct { 129b9f19259SBoris Brezillon struct list_head list; 130b9f19259SBoris Brezillon unsigned int num; 131b9f19259SBoris Brezillon size_t size; 132b9f19259SBoris Brezillon unsigned int purged_num; 133b9f19259SBoris Brezillon size_t purged_size; 134b9f19259SBoris Brezillon struct mutex lock; 135b9f19259SBoris Brezillon } purgeable; 136b9f19259SBoris Brezillon 137cdec4d36SEric Anholt uint64_t dma_fence_context; 138cdec4d36SEric Anholt 139ca26d28bSVarad Gautam /* Sequence number for the last job queued in bin_job_list. 140d5b1a78aSEric Anholt * Starts at 0 (no jobs emitted). 141d5b1a78aSEric Anholt */ 142d5b1a78aSEric Anholt uint64_t emit_seqno; 143d5b1a78aSEric Anholt 144d5b1a78aSEric Anholt /* Sequence number for the last completed job on the GPU. 145d5b1a78aSEric Anholt * Starts at 0 (no jobs completed). 146d5b1a78aSEric Anholt */ 147d5b1a78aSEric Anholt uint64_t finished_seqno; 148d5b1a78aSEric Anholt 149ca26d28bSVarad Gautam /* List of all struct vc4_exec_info for jobs to be executed in 150ca26d28bSVarad Gautam * the binner. The first job in the list is the one currently 151ca26d28bSVarad Gautam * programmed into ct0ca for execution. 152d5b1a78aSEric Anholt */ 153ca26d28bSVarad Gautam struct list_head bin_job_list; 154ca26d28bSVarad Gautam 155ca26d28bSVarad Gautam /* List of all struct vc4_exec_info for jobs that have 156ca26d28bSVarad Gautam * completed binning and are ready for rendering. The first 157ca26d28bSVarad Gautam * job in the list is the one currently programmed into ct1ca 158ca26d28bSVarad Gautam * for execution. 159ca26d28bSVarad Gautam */ 160ca26d28bSVarad Gautam struct list_head render_job_list; 161ca26d28bSVarad Gautam 162d5b1a78aSEric Anholt /* List of the finished vc4_exec_infos waiting to be freed by 163d5b1a78aSEric Anholt * job_done_work. 164d5b1a78aSEric Anholt */ 165d5b1a78aSEric Anholt struct list_head job_done_list; 166d5b1a78aSEric Anholt /* Spinlock used to synchronize the job_list and seqno 167d5b1a78aSEric Anholt * accesses between the IRQ handler and GEM ioctls. 168d5b1a78aSEric Anholt */ 169d5b1a78aSEric Anholt spinlock_t job_lock; 170d5b1a78aSEric Anholt wait_queue_head_t job_wait_queue; 171d5b1a78aSEric Anholt struct work_struct job_done_work; 172d5b1a78aSEric Anholt 17365101d8cSBoris Brezillon /* Used to track the active perfmon if any. Access to this field is 17465101d8cSBoris Brezillon * protected by job_lock. 17565101d8cSBoris Brezillon */ 17665101d8cSBoris Brezillon struct vc4_perfmon *active_perfmon; 17765101d8cSBoris Brezillon 178b501baccSEric Anholt /* List of struct vc4_seqno_cb for callbacks to be made from a 179b501baccSEric Anholt * workqueue when the given seqno is passed. 180b501baccSEric Anholt */ 181b501baccSEric Anholt struct list_head seqno_cb_list; 182b501baccSEric Anholt 183553c942fSEric Anholt /* The memory used for storing binner tile alloc, tile state, 184553c942fSEric Anholt * and overflow memory allocations. This is freed when V3D 185553c942fSEric Anholt * powers down. 186d5b1a78aSEric Anholt */ 187553c942fSEric Anholt struct vc4_bo *bin_bo; 188553c942fSEric Anholt 189553c942fSEric Anholt /* Size of blocks allocated within bin_bo. */ 190553c942fSEric Anholt uint32_t bin_alloc_size; 191553c942fSEric Anholt 192553c942fSEric Anholt /* Bitmask of the bin_alloc_size chunks in bin_bo that are 193553c942fSEric Anholt * used. 194553c942fSEric Anholt */ 195553c942fSEric Anholt uint32_t bin_alloc_used; 196553c942fSEric Anholt 197553c942fSEric Anholt /* Bitmask of the current bin_alloc used for overflow memory. */ 198553c942fSEric Anholt uint32_t bin_alloc_overflow; 199553c942fSEric Anholt 200531a1b62SBoris Brezillon /* Incremented when an underrun error happened after an atomic commit. 201531a1b62SBoris Brezillon * This is particularly useful to detect when a specific modeset is too 202531a1b62SBoris Brezillon * demanding in term of memory or HVS bandwidth which is hard to guess 203531a1b62SBoris Brezillon * at atomic check time. 204531a1b62SBoris Brezillon */ 205531a1b62SBoris Brezillon atomic_t underrun; 206531a1b62SBoris Brezillon 207d5b1a78aSEric Anholt struct work_struct overflow_mem_work; 208d5b1a78aSEric Anholt 20936cb6253SEric Anholt int power_refcount; 21036cb6253SEric Anholt 2116b5c029dSPaul Kocialkowski /* Set to true when the load tracker is active. */ 2126b5c029dSPaul Kocialkowski bool load_tracker_enabled; 2136b5c029dSPaul Kocialkowski 21436cb6253SEric Anholt /* Mutex controlling the power refcount. */ 21536cb6253SEric Anholt struct mutex power_lock; 21636cb6253SEric Anholt 217d5b1a78aSEric Anholt struct { 218d5b1a78aSEric Anholt struct timer_list timer; 219d5b1a78aSEric Anholt struct work_struct reset_work; 220d5b1a78aSEric Anholt } hangcheck; 221d5b1a78aSEric Anholt 222766cc6b1SStefan Schake struct drm_modeset_lock ctm_state_lock; 223766cc6b1SStefan Schake struct drm_private_obj ctm_manager; 224f2df84e0SMaxime Ripard struct drm_private_obj hvs_channels; 2254686da83SBoris Brezillon struct drm_private_obj load_tracker; 226c9be804cSEric Anholt 227c9be804cSEric Anholt /* List of vc4_debugfs_info_entry for adding to debugfs once 228c9be804cSEric Anholt * the minor is available (after drm_dev_register()). 229c9be804cSEric Anholt */ 230c9be804cSEric Anholt struct list_head debugfs_list; 23135c8b4b2SPaul Kocialkowski 23235c8b4b2SPaul Kocialkowski /* Mutex for binner bo allocation. */ 23335c8b4b2SPaul Kocialkowski struct mutex bin_bo_lock; 23435c8b4b2SPaul Kocialkowski /* Reference count for our binner bo. */ 23535c8b4b2SPaul Kocialkowski struct kref bin_bo_kref; 236c8b75bcaSEric Anholt }; 237c8b75bcaSEric Anholt 238c8b75bcaSEric Anholt static inline struct vc4_dev * 239553a241bSMaxime Ripard to_vc4_dev(const struct drm_device *dev) 240c8b75bcaSEric Anholt { 24184d7d472SMaxime Ripard return container_of(dev, struct vc4_dev, base); 242c8b75bcaSEric Anholt } 243c8b75bcaSEric Anholt 244c8b75bcaSEric Anholt struct vc4_bo { 2454a83c26aSDanilo Krummrich struct drm_gem_dma_object base; 246c826a6e1SEric Anholt 2477edabee0SEric Anholt /* seqno of the last job to render using this BO. */ 248d5b1a78aSEric Anholt uint64_t seqno; 249d5b1a78aSEric Anholt 2507edabee0SEric Anholt /* seqno of the last job to use the RCL to write to this BO. 2517edabee0SEric Anholt * 2527edabee0SEric Anholt * Note that this doesn't include binner overflow memory 2537edabee0SEric Anholt * writes. 2547edabee0SEric Anholt */ 2557edabee0SEric Anholt uint64_t write_seqno; 2567edabee0SEric Anholt 25783753117SEric Anholt bool t_format; 25883753117SEric Anholt 259c826a6e1SEric Anholt /* List entry for the BO's position in either 260c826a6e1SEric Anholt * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 261c826a6e1SEric Anholt */ 262c826a6e1SEric Anholt struct list_head unref_head; 263c826a6e1SEric Anholt 264c826a6e1SEric Anholt /* Time in jiffies when the BO was put in vc4->bo_cache. */ 265c826a6e1SEric Anholt unsigned long free_time; 266c826a6e1SEric Anholt 267c826a6e1SEric Anholt /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 268c826a6e1SEric Anholt struct list_head size_head; 269463873d5SEric Anholt 270463873d5SEric Anholt /* Struct for shader validation state, if created by 271463873d5SEric Anholt * DRM_IOCTL_VC4_CREATE_SHADER_BO. 272463873d5SEric Anholt */ 273463873d5SEric Anholt struct vc4_validated_shader_info *validated_shader; 274cdec4d36SEric Anholt 275f3099462SEric Anholt /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 276f3099462SEric Anholt * for user-allocated labels. 277f3099462SEric Anholt */ 278f3099462SEric Anholt int label; 279b9f19259SBoris Brezillon 280b9f19259SBoris Brezillon /* Count the number of active users. This is needed to determine 281b9f19259SBoris Brezillon * whether we can move the BO to the purgeable list or not (when the BO 282b9f19259SBoris Brezillon * is used by the GPU or the display engine we can't purge it). 283b9f19259SBoris Brezillon */ 284b9f19259SBoris Brezillon refcount_t usecnt; 285b9f19259SBoris Brezillon 286b9f19259SBoris Brezillon /* Store purgeable/purged state here */ 287b9f19259SBoris Brezillon u32 madv; 288b9f19259SBoris Brezillon struct mutex madv_lock; 289c8b75bcaSEric Anholt }; 290c8b75bcaSEric Anholt 291c8b75bcaSEric Anholt static inline struct vc4_bo * 292553a241bSMaxime Ripard to_vc4_bo(const struct drm_gem_object *bo) 293c8b75bcaSEric Anholt { 2944a83c26aSDanilo Krummrich return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base); 295c8b75bcaSEric Anholt } 296c8b75bcaSEric Anholt 297cdec4d36SEric Anholt struct vc4_fence { 298cdec4d36SEric Anholt struct dma_fence base; 299cdec4d36SEric Anholt struct drm_device *dev; 300cdec4d36SEric Anholt /* vc4 seqno for signaled() test */ 301cdec4d36SEric Anholt uint64_t seqno; 302cdec4d36SEric Anholt }; 303cdec4d36SEric Anholt 304cdec4d36SEric Anholt static inline struct vc4_fence * 305553a241bSMaxime Ripard to_vc4_fence(const struct dma_fence *fence) 306cdec4d36SEric Anholt { 3075066f42cSMaxime Ripard return container_of(fence, struct vc4_fence, base); 308cdec4d36SEric Anholt } 309cdec4d36SEric Anholt 310b501baccSEric Anholt struct vc4_seqno_cb { 311b501baccSEric Anholt struct work_struct work; 312b501baccSEric Anholt uint64_t seqno; 313b501baccSEric Anholt void (*func)(struct vc4_seqno_cb *cb); 314b501baccSEric Anholt }; 315b501baccSEric Anholt 316d3f5168aSEric Anholt struct vc4_v3d { 317001bdb55SEric Anholt struct vc4_dev *vc4; 318d3f5168aSEric Anholt struct platform_device *pdev; 319d3f5168aSEric Anholt void __iomem *regs; 320b72a2816SEric Anholt struct clk *clk; 3213051719aSEric Anholt struct debugfs_regset32 regset; 322d3f5168aSEric Anholt }; 323d3f5168aSEric Anholt 324c8b75bcaSEric Anholt struct vc4_hvs { 3251cbc91ebSMaxime Ripard struct vc4_dev *vc4; 326c8b75bcaSEric Anholt struct platform_device *pdev; 327c8b75bcaSEric Anholt void __iomem *regs; 328d8dbf44fSEric Anholt u32 __iomem *dlist; 329d8dbf44fSEric Anholt 330d7d96c00SMaxime Ripard struct clk *core_clk; 331d7d96c00SMaxime Ripard 3322a001ca0SMaxime Ripard unsigned long max_core_rate; 3332a001ca0SMaxime Ripard 334d8dbf44fSEric Anholt /* Memory manager for CRTCs to allocate space in the display 335d8dbf44fSEric Anholt * list. Units are dwords. 336d8dbf44fSEric Anholt */ 337d8dbf44fSEric Anholt struct drm_mm dlist_mm; 33821af94cfSEric Anholt /* Memory manager for the LBM memory used by HVS scaling. */ 33921af94cfSEric Anholt struct drm_mm lbm_mm; 340d8dbf44fSEric Anholt spinlock_t mm_lock; 34121af94cfSEric Anholt 34221af94cfSEric Anholt struct drm_mm_node mitchell_netravali_filter; 343c54619b0SDave Stevenson 3443051719aSEric Anholt struct debugfs_regset32 regset; 3452a001ca0SMaxime Ripard 3462a001ca0SMaxime Ripard /* 3472a001ca0SMaxime Ripard * Even if HDMI0 on the RPi4 can output modes requiring a pixel 3482a001ca0SMaxime Ripard * rate higher than 297MHz, it needs some adjustments in the 3492a001ca0SMaxime Ripard * config.txt file to be able to do so and thus won't always be 3502a001ca0SMaxime Ripard * available. 3512a001ca0SMaxime Ripard */ 3522a001ca0SMaxime Ripard bool vc5_hdmi_enable_hdmi_20; 353f09e172dSDom Cobley 354f09e172dSDom Cobley /* 355f09e172dSDom Cobley * 4096x2160@60 requires a core overclock to work, so register 356f09e172dSDom Cobley * whether that is sufficient. 357f09e172dSDom Cobley */ 358f09e172dSDom Cobley bool vc5_hdmi_enable_4096by2160; 359c8b75bcaSEric Anholt }; 360c8b75bcaSEric Anholt 3613c5cb5ecSMaxime Ripard #define HVS_NUM_CHANNELS 3 3623c5cb5ecSMaxime Ripard 3633c5cb5ecSMaxime Ripard struct vc4_hvs_state { 3643c5cb5ecSMaxime Ripard struct drm_private_state base; 3653c5cb5ecSMaxime Ripard unsigned long core_clock_rate; 3663c5cb5ecSMaxime Ripard 3673c5cb5ecSMaxime Ripard struct { 3683c5cb5ecSMaxime Ripard unsigned in_use: 1; 3693c5cb5ecSMaxime Ripard unsigned long fifo_load; 3703c5cb5ecSMaxime Ripard struct drm_crtc_commit *pending_commit; 3713c5cb5ecSMaxime Ripard } fifo_state[HVS_NUM_CHANNELS]; 3723c5cb5ecSMaxime Ripard }; 3733c5cb5ecSMaxime Ripard 3743c5cb5ecSMaxime Ripard static inline struct vc4_hvs_state * 3753c5cb5ecSMaxime Ripard to_vc4_hvs_state(const struct drm_private_state *priv) 3763c5cb5ecSMaxime Ripard { 3773c5cb5ecSMaxime Ripard return container_of(priv, struct vc4_hvs_state, base); 3783c5cb5ecSMaxime Ripard } 3793c5cb5ecSMaxime Ripard 3803c5cb5ecSMaxime Ripard struct vc4_hvs_state *vc4_hvs_get_global_state(struct drm_atomic_state *state); 3813c5cb5ecSMaxime Ripard struct vc4_hvs_state *vc4_hvs_get_old_global_state(const struct drm_atomic_state *state); 3823c5cb5ecSMaxime Ripard struct vc4_hvs_state *vc4_hvs_get_new_global_state(const struct drm_atomic_state *state); 3833c5cb5ecSMaxime Ripard 384c8b75bcaSEric Anholt struct vc4_plane { 385c8b75bcaSEric Anholt struct drm_plane base; 386c8b75bcaSEric Anholt }; 387c8b75bcaSEric Anholt 388c8b75bcaSEric Anholt static inline struct vc4_plane * 389553a241bSMaxime Ripard to_vc4_plane(const struct drm_plane *plane) 390c8b75bcaSEric Anholt { 3915066f42cSMaxime Ripard return container_of(plane, struct vc4_plane, base); 392c8b75bcaSEric Anholt } 393c8b75bcaSEric Anholt 39482364698SStefan Schake enum vc4_scaling_mode { 39582364698SStefan Schake VC4_SCALING_NONE, 39682364698SStefan Schake VC4_SCALING_TPZ, 39782364698SStefan Schake VC4_SCALING_PPF, 39882364698SStefan Schake }; 39982364698SStefan Schake 40082364698SStefan Schake struct vc4_plane_state { 40182364698SStefan Schake struct drm_plane_state base; 40282364698SStefan Schake /* System memory copy of the display list for this element, computed 40382364698SStefan Schake * at atomic_check time. 40482364698SStefan Schake */ 40582364698SStefan Schake u32 *dlist; 40682364698SStefan Schake u32 dlist_size; /* Number of dwords allocated for the display list */ 40782364698SStefan Schake u32 dlist_count; /* Number of used dwords in the display list. */ 40882364698SStefan Schake 40982364698SStefan Schake /* Offset in the dlist to various words, for pageflip or 41082364698SStefan Schake * cursor updates. 41182364698SStefan Schake */ 41282364698SStefan Schake u32 pos0_offset; 41382364698SStefan Schake u32 pos2_offset; 41482364698SStefan Schake u32 ptr0_offset; 4150a038c1cSBoris Brezillon u32 lbm_offset; 41682364698SStefan Schake 41782364698SStefan Schake /* Offset where the plane's dlist was last stored in the 41882364698SStefan Schake * hardware at vc4_crtc_atomic_flush() time. 41982364698SStefan Schake */ 42082364698SStefan Schake u32 __iomem *hw_dlist; 42182364698SStefan Schake 42282364698SStefan Schake /* Clipped coordinates of the plane on the display. */ 42382364698SStefan Schake int crtc_x, crtc_y, crtc_w, crtc_h; 42482364698SStefan Schake /* Clipped area being scanned from in the FB. */ 42582364698SStefan Schake u32 src_x, src_y; 42682364698SStefan Schake 42782364698SStefan Schake u32 src_w[2], src_h[2]; 42882364698SStefan Schake 42982364698SStefan Schake /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 43082364698SStefan Schake enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 43182364698SStefan Schake bool is_unity; 43282364698SStefan Schake bool is_yuv; 43382364698SStefan Schake 43482364698SStefan Schake /* Offset to start scanning out from the start of the plane's 43582364698SStefan Schake * BO. 43682364698SStefan Schake */ 43782364698SStefan Schake u32 offsets[3]; 43882364698SStefan Schake 43982364698SStefan Schake /* Our allocation in LBM for temporary storage during scaling. */ 44082364698SStefan Schake struct drm_mm_node lbm; 44182364698SStefan Schake 44282364698SStefan Schake /* Set when the plane has per-pixel alpha content or does not cover 44382364698SStefan Schake * the entire screen. This is a hint to the CRTC that it might need 44482364698SStefan Schake * to enable background color fill. 44582364698SStefan Schake */ 44682364698SStefan Schake bool needs_bg_fill; 4478d938449SBoris Brezillon 4488d938449SBoris Brezillon /* Mark the dlist as initialized. Useful to avoid initializing it twice 4498d938449SBoris Brezillon * when async update is not possible. 4508d938449SBoris Brezillon */ 4518d938449SBoris Brezillon bool dlist_initialized; 4524686da83SBoris Brezillon 4534686da83SBoris Brezillon /* Load of this plane on the HVS block. The load is expressed in HVS 4544686da83SBoris Brezillon * cycles/sec. 4554686da83SBoris Brezillon */ 4564686da83SBoris Brezillon u64 hvs_load; 4574686da83SBoris Brezillon 4584686da83SBoris Brezillon /* Memory bandwidth needed for this plane. This is expressed in 4594686da83SBoris Brezillon * bytes/sec. 4604686da83SBoris Brezillon */ 4614686da83SBoris Brezillon u64 membus_load; 46282364698SStefan Schake }; 46382364698SStefan Schake 46482364698SStefan Schake static inline struct vc4_plane_state * 465553a241bSMaxime Ripard to_vc4_plane_state(const struct drm_plane_state *state) 46682364698SStefan Schake { 4675066f42cSMaxime Ripard return container_of(state, struct vc4_plane_state, base); 46882364698SStefan Schake } 46982364698SStefan Schake 470c8b75bcaSEric Anholt enum vc4_encoder_type { 471ab8df60eSBoris Brezillon VC4_ENCODER_TYPE_NONE, 472ed024b22SMaxime Ripard VC4_ENCODER_TYPE_HDMI0, 473aa2fd1caSMaxime Ripard VC4_ENCODER_TYPE_HDMI1, 474c8b75bcaSEric Anholt VC4_ENCODER_TYPE_VEC, 475c8b75bcaSEric Anholt VC4_ENCODER_TYPE_DSI0, 476c8b75bcaSEric Anholt VC4_ENCODER_TYPE_DSI1, 477c8b75bcaSEric Anholt VC4_ENCODER_TYPE_SMI, 478c8b75bcaSEric Anholt VC4_ENCODER_TYPE_DPI, 479b998eb4fSMaxime Ripard VC4_ENCODER_TYPE_TXP, 480c8b75bcaSEric Anholt }; 481c8b75bcaSEric Anholt 482c8b75bcaSEric Anholt struct vc4_encoder { 483c8b75bcaSEric Anholt struct drm_encoder base; 484c8b75bcaSEric Anholt enum vc4_encoder_type type; 485c8b75bcaSEric Anholt u32 clock_select; 486792c3132SMaxime Ripard 4878d914746SMaxime Ripard void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state); 4888d914746SMaxime Ripard void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 4898d914746SMaxime Ripard void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 490792c3132SMaxime Ripard 4918d914746SMaxime Ripard void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 4928d914746SMaxime Ripard void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state); 493c8b75bcaSEric Anholt }; 494c8b75bcaSEric Anholt 495c8b75bcaSEric Anholt static inline struct vc4_encoder * 496553a241bSMaxime Ripard to_vc4_encoder(const struct drm_encoder *encoder) 497c8b75bcaSEric Anholt { 498c8b75bcaSEric Anholt return container_of(encoder, struct vc4_encoder, base); 499c8b75bcaSEric Anholt } 500c8b75bcaSEric Anholt 5010656ce12SMaxime Ripard static inline 5020656ce12SMaxime Ripard struct drm_encoder *vc4_find_encoder_by_type(struct drm_device *drm, 5030656ce12SMaxime Ripard enum vc4_encoder_type type) 5040656ce12SMaxime Ripard { 5050656ce12SMaxime Ripard struct drm_encoder *encoder; 5060656ce12SMaxime Ripard 5070656ce12SMaxime Ripard drm_for_each_encoder(encoder, drm) { 5080656ce12SMaxime Ripard struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); 5090656ce12SMaxime Ripard 5100656ce12SMaxime Ripard if (vc4_encoder->type == type) 5110656ce12SMaxime Ripard return encoder; 5120656ce12SMaxime Ripard } 5130656ce12SMaxime Ripard 5140656ce12SMaxime Ripard return NULL; 5150656ce12SMaxime Ripard } 5160656ce12SMaxime Ripard 51779271807SStefan Schake struct vc4_crtc_data { 5189a49bf09SMaxime Ripard const char *name; 5199a49bf09SMaxime Ripard 5206bad4774SMaxime Ripard const char *debugfs_name; 5216bad4774SMaxime Ripard 52287ebcd42SMaxime Ripard /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ 52387ebcd42SMaxime Ripard unsigned int hvs_available_channels; 52487ebcd42SMaxime Ripard 5258ebb2cf0SMaxime Ripard /* Which output of the HVS this pixelvalve sources from. */ 5268ebb2cf0SMaxime Ripard int hvs_output; 5275a20ff8bSMaxime Ripard }; 5285a20ff8bSMaxime Ripard 529*f759f5b5SMaxime Ripard extern const struct vc4_crtc_data vc4_txp_crtc_data; 530*f759f5b5SMaxime Ripard 5315a20ff8bSMaxime Ripard struct vc4_pv_data { 5325a20ff8bSMaxime Ripard struct vc4_crtc_data base; 53379271807SStefan Schake 534649abf2fSMaxime Ripard /* Depth of the PixelValve FIFO in bytes */ 535649abf2fSMaxime Ripard unsigned int fifo_depth; 536649abf2fSMaxime Ripard 537644df22fSMaxime Ripard /* Number of pixels output per clock period */ 538644df22fSMaxime Ripard u8 pixels_per_clock; 539644df22fSMaxime Ripard 54079271807SStefan Schake enum vc4_encoder_type encoder_types[4]; 54179271807SStefan Schake }; 54279271807SStefan Schake 543*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2835_pv0_data; 544*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2835_pv1_data; 545*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2835_pv2_data; 546*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2711_pv0_data; 547*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2711_pv1_data; 548*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2711_pv2_data; 549*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2711_pv3_data; 550*f759f5b5SMaxime Ripard extern const struct vc4_pv_data bcm2711_pv4_data; 551*f759f5b5SMaxime Ripard 55279271807SStefan Schake struct vc4_crtc { 55379271807SStefan Schake struct drm_crtc base; 5543051719aSEric Anholt struct platform_device *pdev; 55579271807SStefan Schake const struct vc4_crtc_data *data; 55679271807SStefan Schake void __iomem *regs; 55779271807SStefan Schake 55879271807SStefan Schake /* Timestamp at start of vblank irq - unaffected by lock delays. */ 55979271807SStefan Schake ktime_t t_vblank; 56079271807SStefan Schake 56179271807SStefan Schake u8 lut_r[256]; 56279271807SStefan Schake u8 lut_g[256]; 56379271807SStefan Schake u8 lut_b[256]; 56479271807SStefan Schake 56579271807SStefan Schake struct drm_pending_vblank_event *event; 5663051719aSEric Anholt 5673051719aSEric Anholt struct debugfs_regset32 regset; 568a16c6640SMaxime Ripard 569a16c6640SMaxime Ripard /** 570a16c6640SMaxime Ripard * @feeds_txp: True if the CRTC feeds our writeback controller. 571a16c6640SMaxime Ripard */ 572a16c6640SMaxime Ripard bool feeds_txp; 5730c250c15SMaxime Ripard 5740c250c15SMaxime Ripard /** 5750c250c15SMaxime Ripard * @irq_lock: Spinlock protecting the resources shared between 5760c250c15SMaxime Ripard * the atomic code and our vblank handler. 5770c250c15SMaxime Ripard */ 5780c250c15SMaxime Ripard spinlock_t irq_lock; 5790c250c15SMaxime Ripard 5800c250c15SMaxime Ripard /** 5810c250c15SMaxime Ripard * @current_dlist: Start offset of the display list currently 5820c250c15SMaxime Ripard * set in the HVS for that CRTC. Protected by @irq_lock, and 5830c250c15SMaxime Ripard * copied in vc4_hvs_update_dlist() for the CRTC interrupt 5840c250c15SMaxime Ripard * handler to have access to that value. 5850c250c15SMaxime Ripard */ 5860c250c15SMaxime Ripard unsigned int current_dlist; 587eeb6ab46SMaxime Ripard 588eeb6ab46SMaxime Ripard /** 589eeb6ab46SMaxime Ripard * @current_hvs_channel: HVS channel currently assigned to the 590eeb6ab46SMaxime Ripard * CRTC. Protected by @irq_lock, and copied in 591eeb6ab46SMaxime Ripard * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have 592eeb6ab46SMaxime Ripard * access to that value. 593eeb6ab46SMaxime Ripard */ 594eeb6ab46SMaxime Ripard unsigned int current_hvs_channel; 59579271807SStefan Schake }; 59679271807SStefan Schake 59779271807SStefan Schake static inline struct vc4_crtc * 598553a241bSMaxime Ripard to_vc4_crtc(const struct drm_crtc *crtc) 59979271807SStefan Schake { 6005066f42cSMaxime Ripard return container_of(crtc, struct vc4_crtc, base); 60179271807SStefan Schake } 60279271807SStefan Schake 6035a20ff8bSMaxime Ripard static inline const struct vc4_crtc_data * 6045a20ff8bSMaxime Ripard vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc) 6055a20ff8bSMaxime Ripard { 6065a20ff8bSMaxime Ripard return crtc->data; 6075a20ff8bSMaxime Ripard } 6085a20ff8bSMaxime Ripard 6095a20ff8bSMaxime Ripard static inline const struct vc4_pv_data * 6105a20ff8bSMaxime Ripard vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) 6115a20ff8bSMaxime Ripard { 6125a20ff8bSMaxime Ripard const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc); 6135a20ff8bSMaxime Ripard 6145a20ff8bSMaxime Ripard return container_of(data, struct vc4_pv_data, base); 6155a20ff8bSMaxime Ripard } 6165a20ff8bSMaxime Ripard 617d0229c36SMaxime Ripard struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, 61894c1adc4SMaxime Ripard struct drm_crtc_state *state); 619d0229c36SMaxime Ripard 620ae44a527SMaxime Ripard struct vc4_crtc_state { 621ae44a527SMaxime Ripard struct drm_crtc_state base; 622ae44a527SMaxime Ripard /* Dlist area for this CRTC configuration. */ 623ae44a527SMaxime Ripard struct drm_mm_node mm; 624ae44a527SMaxime Ripard bool txp_armed; 62587ebcd42SMaxime Ripard unsigned int assigned_channel; 626ae44a527SMaxime Ripard 627ae44a527SMaxime Ripard struct { 628ae44a527SMaxime Ripard unsigned int left; 629ae44a527SMaxime Ripard unsigned int right; 630ae44a527SMaxime Ripard unsigned int top; 631ae44a527SMaxime Ripard unsigned int bottom; 632ae44a527SMaxime Ripard } margins; 6332820526dSMaxime Ripard 63416e10105SMaxime Ripard unsigned long hvs_load; 63516e10105SMaxime Ripard 6362820526dSMaxime Ripard /* Transitional state below, only valid during atomic commits */ 6372820526dSMaxime Ripard bool update_muxing; 638ae44a527SMaxime Ripard }; 639ae44a527SMaxime Ripard 6408ba0b6d1SMaxime Ripard #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) 6418ba0b6d1SMaxime Ripard 642ae44a527SMaxime Ripard static inline struct vc4_crtc_state * 643553a241bSMaxime Ripard to_vc4_crtc_state(const struct drm_crtc_state *crtc_state) 644ae44a527SMaxime Ripard { 6455066f42cSMaxime Ripard return container_of(crtc_state, struct vc4_crtc_state, base); 646ae44a527SMaxime Ripard } 647ae44a527SMaxime Ripard 648d3f5168aSEric Anholt #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 649d3f5168aSEric Anholt #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 6503454f01aSMaxime Ripard #define HVS_READ(offset) readl(hvs->regs + offset) 6513454f01aSMaxime Ripard #define HVS_WRITE(offset, val) writel(val, hvs->regs + offset) 652c8b75bcaSEric Anholt 6533051719aSEric Anholt #define VC4_REG32(reg) { .name = #reg, .offset = reg } 6543051719aSEric Anholt 655d5b1a78aSEric Anholt struct vc4_exec_info { 65630f8c74cSMaxime Ripard struct vc4_dev *dev; 65730f8c74cSMaxime Ripard 658d5b1a78aSEric Anholt /* Sequence number for this bin/render job. */ 659d5b1a78aSEric Anholt uint64_t seqno; 660d5b1a78aSEric Anholt 6617edabee0SEric Anholt /* Latest write_seqno of any BO that binning depends on. */ 6627edabee0SEric Anholt uint64_t bin_dep_seqno; 6637edabee0SEric Anholt 664cdec4d36SEric Anholt struct dma_fence *fence; 665cdec4d36SEric Anholt 666c4ce60dcSEric Anholt /* Last current addresses the hardware was processing when the 667c4ce60dcSEric Anholt * hangcheck timer checked on us. 668c4ce60dcSEric Anholt */ 669c4ce60dcSEric Anholt uint32_t last_ct0ca, last_ct1ca; 670c4ce60dcSEric Anholt 671d5b1a78aSEric Anholt /* Kernel-space copy of the ioctl arguments */ 672d5b1a78aSEric Anholt struct drm_vc4_submit_cl *args; 673d5b1a78aSEric Anholt 674d5b1a78aSEric Anholt /* This is the array of BOs that were looked up at the start of exec. 675d5b1a78aSEric Anholt * Command validation will use indices into this array. 676d5b1a78aSEric Anholt */ 6774a83c26aSDanilo Krummrich struct drm_gem_dma_object **bo; 678d5b1a78aSEric Anholt uint32_t bo_count; 679d5b1a78aSEric Anholt 6807edabee0SEric Anholt /* List of BOs that are being written by the RCL. Other than 6817edabee0SEric Anholt * the binner temporary storage, this is all the BOs written 6827edabee0SEric Anholt * by the job. 6837edabee0SEric Anholt */ 6844a83c26aSDanilo Krummrich struct drm_gem_dma_object *rcl_write_bo[4]; 6857edabee0SEric Anholt uint32_t rcl_write_bo_count; 6867edabee0SEric Anholt 687d5b1a78aSEric Anholt /* Pointers for our position in vc4->job_list */ 688d5b1a78aSEric Anholt struct list_head head; 689d5b1a78aSEric Anholt 690d5b1a78aSEric Anholt /* List of other BOs used in the job that need to be released 691d5b1a78aSEric Anholt * once the job is complete. 692d5b1a78aSEric Anholt */ 693d5b1a78aSEric Anholt struct list_head unref_list; 694d5b1a78aSEric Anholt 695d5b1a78aSEric Anholt /* Current unvalidated indices into @bo loaded by the non-hardware 696d5b1a78aSEric Anholt * VC4_PACKET_GEM_HANDLES. 697d5b1a78aSEric Anholt */ 698d5b1a78aSEric Anholt uint32_t bo_index[2]; 699d5b1a78aSEric Anholt 700d5b1a78aSEric Anholt /* This is the BO where we store the validated command lists, shader 701d5b1a78aSEric Anholt * records, and uniforms. 702d5b1a78aSEric Anholt */ 7034a83c26aSDanilo Krummrich struct drm_gem_dma_object *exec_bo; 704d5b1a78aSEric Anholt 705d5b1a78aSEric Anholt /** 706d5b1a78aSEric Anholt * This tracks the per-shader-record state (packet 64) that 707d5b1a78aSEric Anholt * determines the length of the shader record and the offset 708d5b1a78aSEric Anholt * it's expected to be found at. It gets read in from the 709d5b1a78aSEric Anholt * command lists. 710d5b1a78aSEric Anholt */ 711d5b1a78aSEric Anholt struct vc4_shader_state { 712d5b1a78aSEric Anholt uint32_t addr; 713d5b1a78aSEric Anholt /* Maximum vertex index referenced by any primitive using this 714d5b1a78aSEric Anholt * shader state. 715d5b1a78aSEric Anholt */ 716d5b1a78aSEric Anholt uint32_t max_index; 717d5b1a78aSEric Anholt } *shader_state; 718d5b1a78aSEric Anholt 719d5b1a78aSEric Anholt /** How many shader states the user declared they were using. */ 720d5b1a78aSEric Anholt uint32_t shader_state_size; 721d5b1a78aSEric Anholt /** How many shader state records the validator has seen. */ 722d5b1a78aSEric Anholt uint32_t shader_state_count; 723d5b1a78aSEric Anholt 724d5b1a78aSEric Anholt bool found_tile_binning_mode_config_packet; 725d5b1a78aSEric Anholt bool found_start_tile_binning_packet; 726d5b1a78aSEric Anholt bool found_increment_semaphore_packet; 727d5b1a78aSEric Anholt bool found_flush; 728d5b1a78aSEric Anholt uint8_t bin_tiles_x, bin_tiles_y; 729553c942fSEric Anholt /* Physical address of the start of the tile alloc array 730553c942fSEric Anholt * (where each tile's binned CL will start) 731553c942fSEric Anholt */ 732d5b1a78aSEric Anholt uint32_t tile_alloc_offset; 733553c942fSEric Anholt /* Bitmask of which binner slots are freed when this job completes. */ 734553c942fSEric Anholt uint32_t bin_slots; 735d5b1a78aSEric Anholt 736d5b1a78aSEric Anholt /** 737d5b1a78aSEric Anholt * Computed addresses pointing into exec_bo where we start the 738d5b1a78aSEric Anholt * bin thread (ct0) and render thread (ct1). 739d5b1a78aSEric Anholt */ 740d5b1a78aSEric Anholt uint32_t ct0ca, ct0ea; 741d5b1a78aSEric Anholt uint32_t ct1ca, ct1ea; 742d5b1a78aSEric Anholt 743d5b1a78aSEric Anholt /* Pointer to the unvalidated bin CL (if present). */ 744d5b1a78aSEric Anholt void *bin_u; 745d5b1a78aSEric Anholt 746d5b1a78aSEric Anholt /* Pointers to the shader recs. These paddr gets incremented as CL 747d5b1a78aSEric Anholt * packets are relocated in validate_gl_shader_state, and the vaddrs 748d5b1a78aSEric Anholt * (u and v) get incremented and size decremented as the shader recs 749d5b1a78aSEric Anholt * themselves are validated. 750d5b1a78aSEric Anholt */ 751d5b1a78aSEric Anholt void *shader_rec_u; 752d5b1a78aSEric Anholt void *shader_rec_v; 753d5b1a78aSEric Anholt uint32_t shader_rec_p; 754d5b1a78aSEric Anholt uint32_t shader_rec_size; 755d5b1a78aSEric Anholt 756d5b1a78aSEric Anholt /* Pointers to the uniform data. These pointers are incremented, and 757d5b1a78aSEric Anholt * size decremented, as each batch of uniforms is uploaded. 758d5b1a78aSEric Anholt */ 759d5b1a78aSEric Anholt void *uniforms_u; 760d5b1a78aSEric Anholt void *uniforms_v; 761d5b1a78aSEric Anholt uint32_t uniforms_p; 762d5b1a78aSEric Anholt uint32_t uniforms_size; 76365101d8cSBoris Brezillon 76465101d8cSBoris Brezillon /* Pointer to a performance monitor object if the user requested it, 76565101d8cSBoris Brezillon * NULL otherwise. 76665101d8cSBoris Brezillon */ 76765101d8cSBoris Brezillon struct vc4_perfmon *perfmon; 76835c8b4b2SPaul Kocialkowski 76935c8b4b2SPaul Kocialkowski /* Whether the exec has taken a reference to the binner BO, which should 77035c8b4b2SPaul Kocialkowski * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet. 77135c8b4b2SPaul Kocialkowski */ 77235c8b4b2SPaul Kocialkowski bool bin_bo_used; 77365101d8cSBoris Brezillon }; 77465101d8cSBoris Brezillon 77565101d8cSBoris Brezillon /* Per-open file private data. Any driver-specific resource that has to be 77665101d8cSBoris Brezillon * released when the DRM file is closed should be placed here. 77765101d8cSBoris Brezillon */ 77865101d8cSBoris Brezillon struct vc4_file { 77930f8c74cSMaxime Ripard struct vc4_dev *dev; 78030f8c74cSMaxime Ripard 78165101d8cSBoris Brezillon struct { 78265101d8cSBoris Brezillon struct idr idr; 78365101d8cSBoris Brezillon struct mutex lock; 78465101d8cSBoris Brezillon } perfmon; 78535c8b4b2SPaul Kocialkowski 78635c8b4b2SPaul Kocialkowski bool bin_bo_used; 787d5b1a78aSEric Anholt }; 788d5b1a78aSEric Anholt 789d5b1a78aSEric Anholt static inline struct vc4_exec_info * 790ca26d28bSVarad Gautam vc4_first_bin_job(struct vc4_dev *vc4) 791d5b1a78aSEric Anholt { 79257b9f569SMasahiro Yamada return list_first_entry_or_null(&vc4->bin_job_list, 79357b9f569SMasahiro Yamada struct vc4_exec_info, head); 794ca26d28bSVarad Gautam } 795ca26d28bSVarad Gautam 796ca26d28bSVarad Gautam static inline struct vc4_exec_info * 797ca26d28bSVarad Gautam vc4_first_render_job(struct vc4_dev *vc4) 798ca26d28bSVarad Gautam { 79957b9f569SMasahiro Yamada return list_first_entry_or_null(&vc4->render_job_list, 800ca26d28bSVarad Gautam struct vc4_exec_info, head); 801d5b1a78aSEric Anholt } 802d5b1a78aSEric Anholt 8039326e6f2SEric Anholt static inline struct vc4_exec_info * 8049326e6f2SEric Anholt vc4_last_render_job(struct vc4_dev *vc4) 8059326e6f2SEric Anholt { 8069326e6f2SEric Anholt if (list_empty(&vc4->render_job_list)) 8079326e6f2SEric Anholt return NULL; 8089326e6f2SEric Anholt return list_last_entry(&vc4->render_job_list, 8099326e6f2SEric Anholt struct vc4_exec_info, head); 8109326e6f2SEric Anholt } 8119326e6f2SEric Anholt 812c8b75bcaSEric Anholt /** 813463873d5SEric Anholt * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 814463873d5SEric Anholt * setup parameters. 815463873d5SEric Anholt * 816463873d5SEric Anholt * This will be used at draw time to relocate the reference to the texture 817463873d5SEric Anholt * contents in p0, and validate that the offset combined with 818463873d5SEric Anholt * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 819463873d5SEric Anholt * Note that the hardware treats unprovided config parameters as 0, so not all 820463873d5SEric Anholt * of them need to be set up for every texure sample, and we'll store ~0 as 821463873d5SEric Anholt * the offset to mark the unused ones. 822463873d5SEric Anholt * 823463873d5SEric Anholt * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 824463873d5SEric Anholt * Setup") for definitions of the texture parameters. 825463873d5SEric Anholt */ 826463873d5SEric Anholt struct vc4_texture_sample_info { 827463873d5SEric Anholt bool is_direct; 828463873d5SEric Anholt uint32_t p_offset[4]; 829463873d5SEric Anholt }; 830463873d5SEric Anholt 831463873d5SEric Anholt /** 832463873d5SEric Anholt * struct vc4_validated_shader_info - information about validated shaders that 833463873d5SEric Anholt * needs to be used from command list validation. 834463873d5SEric Anholt * 835463873d5SEric Anholt * For a given shader, each time a shader state record references it, we need 836463873d5SEric Anholt * to verify that the shader doesn't read more uniforms than the shader state 837463873d5SEric Anholt * record's uniform BO pointer can provide, and we need to apply relocations 838463873d5SEric Anholt * and validate the shader state record's uniforms that define the texture 839463873d5SEric Anholt * samples. 840463873d5SEric Anholt */ 841463873d5SEric Anholt struct vc4_validated_shader_info { 842463873d5SEric Anholt uint32_t uniforms_size; 843463873d5SEric Anholt uint32_t uniforms_src_size; 844463873d5SEric Anholt uint32_t num_texture_samples; 845463873d5SEric Anholt struct vc4_texture_sample_info *texture_samples; 8466d45c81dSEric Anholt 8476d45c81dSEric Anholt uint32_t num_uniform_addr_offsets; 8486d45c81dSEric Anholt uint32_t *uniform_addr_offsets; 849c778cc5dSJonas Pfeil 850c778cc5dSJonas Pfeil bool is_threaded; 851463873d5SEric Anholt }; 852463873d5SEric Anholt 853463873d5SEric Anholt /** 8547f2a09ecSJames Hughes * __wait_for - magic wait macro 855c8b75bcaSEric Anholt * 8567f2a09ecSJames Hughes * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 8577f2a09ecSJames Hughes * important that we check the condition again after having timed out, since the 8587f2a09ecSJames Hughes * timeout could be due to preemption or similar and we've never had a chance to 8597f2a09ecSJames Hughes * check the condition before the timeout. 860c8b75bcaSEric Anholt */ 8617f2a09ecSJames Hughes #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 8627f2a09ecSJames Hughes const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 8637f2a09ecSJames Hughes long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 8647f2a09ecSJames Hughes int ret__; \ 8657f2a09ecSJames Hughes might_sleep(); \ 8667f2a09ecSJames Hughes for (;;) { \ 8677f2a09ecSJames Hughes const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 8687f2a09ecSJames Hughes OP; \ 8697f2a09ecSJames Hughes /* Guarantee COND check prior to timeout */ \ 8707f2a09ecSJames Hughes barrier(); \ 8717f2a09ecSJames Hughes if (COND) { \ 8727f2a09ecSJames Hughes ret__ = 0; \ 8737f2a09ecSJames Hughes break; \ 8747f2a09ecSJames Hughes } \ 8757f2a09ecSJames Hughes if (expired__) { \ 876c8b75bcaSEric Anholt ret__ = -ETIMEDOUT; \ 877c8b75bcaSEric Anholt break; \ 878c8b75bcaSEric Anholt } \ 8797f2a09ecSJames Hughes usleep_range(wait__, wait__ * 2); \ 8807f2a09ecSJames Hughes if (wait__ < (Wmax)) \ 8817f2a09ecSJames Hughes wait__ <<= 1; \ 882c8b75bcaSEric Anholt } \ 883c8b75bcaSEric Anholt ret__; \ 884c8b75bcaSEric Anholt }) 885c8b75bcaSEric Anholt 8867f2a09ecSJames Hughes #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 8877f2a09ecSJames Hughes (Wmax)) 8887f2a09ecSJames Hughes #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 889c8b75bcaSEric Anholt 890c8b75bcaSEric Anholt /* vc4_bo.c */ 891c826a6e1SEric Anholt struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 892c826a6e1SEric Anholt struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 893f3099462SEric Anholt bool from_cache, enum vc4_kernel_bo_type type); 894dd2dfd44SMaxime Ripard int vc4_bo_dumb_create(struct drm_file *file_priv, 895c8b75bcaSEric Anholt struct drm_device *dev, 896c8b75bcaSEric Anholt struct drm_mode_create_dumb *args); 897d5bc60f6SEric Anholt int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 898d5bc60f6SEric Anholt struct drm_file *file_priv); 899463873d5SEric Anholt int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 900463873d5SEric Anholt struct drm_file *file_priv); 901d5bc60f6SEric Anholt int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 902d5bc60f6SEric Anholt struct drm_file *file_priv); 90383753117SEric Anholt int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 90483753117SEric Anholt struct drm_file *file_priv); 90583753117SEric Anholt int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 90683753117SEric Anholt struct drm_file *file_priv); 90721461365SEric Anholt int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 90821461365SEric Anholt struct drm_file *file_priv); 909f3099462SEric Anholt int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 910f3099462SEric Anholt struct drm_file *file_priv); 911f3099462SEric Anholt int vc4_bo_cache_init(struct drm_device *dev); 912b9f19259SBoris Brezillon int vc4_bo_inc_usecnt(struct vc4_bo *bo); 913b9f19259SBoris Brezillon void vc4_bo_dec_usecnt(struct vc4_bo *bo); 914b9f19259SBoris Brezillon void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 915b9f19259SBoris Brezillon void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 916445b287eSMaxime Ripard int vc4_bo_debugfs_init(struct drm_minor *minor); 917c8b75bcaSEric Anholt 918c8b75bcaSEric Anholt /* vc4_crtc.c */ 919c8b75bcaSEric Anholt extern struct platform_driver vc4_crtc_driver; 920875a4d53SMaxime Ripard int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); 921ee33ac27SMaxime Ripard int __vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev, 922ee33ac27SMaxime Ripard struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data, 923ee33ac27SMaxime Ripard struct drm_plane *primary_plane, 924ee33ac27SMaxime Ripard const struct drm_crtc_funcs *crtc_funcs, 925ee33ac27SMaxime Ripard const struct drm_crtc_helper_funcs *crtc_helper_funcs, 926ee33ac27SMaxime Ripard bool feeds_txp); 9273f98076fSMaxime Ripard int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev, 9283f98076fSMaxime Ripard struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data, 9295fefc601SMaxime Ripard const struct drm_crtc_funcs *crtc_funcs, 9303f98076fSMaxime Ripard const struct drm_crtc_helper_funcs *crtc_helper_funcs, 9313f98076fSMaxime Ripard bool feeds_txp); 932bdd96472SMaxime Ripard int vc4_page_flip(struct drm_crtc *crtc, 933bdd96472SMaxime Ripard struct drm_framebuffer *fb, 934bdd96472SMaxime Ripard struct drm_pending_vblank_event *event, 935bdd96472SMaxime Ripard uint32_t flags, 936bdd96472SMaxime Ripard struct drm_modeset_acquire_ctx *ctx); 937*f759f5b5SMaxime Ripard int vc4_crtc_atomic_check(struct drm_crtc *crtc, 938*f759f5b5SMaxime Ripard struct drm_atomic_state *state); 939bdd96472SMaxime Ripard struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc); 940bdd96472SMaxime Ripard void vc4_crtc_destroy_state(struct drm_crtc *crtc, 941bdd96472SMaxime Ripard struct drm_crtc_state *state); 942bdd96472SMaxime Ripard void vc4_crtc_reset(struct drm_crtc *crtc); 943008095e0SBoris Brezillon void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 94468e4a69aSMaxime Ripard void vc4_crtc_send_vblank(struct drm_crtc *crtc); 945445b287eSMaxime Ripard int vc4_crtc_late_register(struct drm_crtc *crtc); 946666e7358SBoris Brezillon void vc4_crtc_get_margins(struct drm_crtc_state *state, 947e590c2b0SDan Carpenter unsigned int *left, unsigned int *right, 948666e7358SBoris Brezillon unsigned int *top, unsigned int *bottom); 949c8b75bcaSEric Anholt 950c8b75bcaSEric Anholt /* vc4_debugfs.c */ 9517ce84471SWambui Karuga void vc4_debugfs_init(struct drm_minor *minor); 952c9be804cSEric Anholt #ifdef CONFIG_DEBUG_FS 953445b287eSMaxime Ripard int vc4_debugfs_add_file(struct drm_minor *minor, 954c9be804cSEric Anholt const char *filename, 955c9be804cSEric Anholt int (*show)(struct seq_file*, void*), 956c9be804cSEric Anholt void *data); 957445b287eSMaxime Ripard int vc4_debugfs_add_regset32(struct drm_minor *minor, 958c9be804cSEric Anholt const char *filename, 959c9be804cSEric Anholt struct debugfs_regset32 *regset); 960c9be804cSEric Anholt #else 961445b287eSMaxime Ripard static inline int vc4_debugfs_add_file(struct drm_minor *minor, 962c9be804cSEric Anholt const char *filename, 963c9be804cSEric Anholt int (*show)(struct seq_file*, void*), 964c9be804cSEric Anholt void *data) 965c9be804cSEric Anholt { 966fe3b0f78SMaxime Ripard return 0; 967c9be804cSEric Anholt } 968c9be804cSEric Anholt 969445b287eSMaxime Ripard static inline int vc4_debugfs_add_regset32(struct drm_minor *minor, 970c9be804cSEric Anholt const char *filename, 971c9be804cSEric Anholt struct debugfs_regset32 *regset) 972c9be804cSEric Anholt { 973fe3b0f78SMaxime Ripard return 0; 974c9be804cSEric Anholt } 975c9be804cSEric Anholt #endif 976c8b75bcaSEric Anholt 977c8b75bcaSEric Anholt /* vc4_drv.c */ 978c8b75bcaSEric Anholt void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 9793d763742SMaxime Ripard int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args); 980c8b75bcaSEric Anholt 98108302c35SEric Anholt /* vc4_dpi.c */ 98208302c35SEric Anholt extern struct platform_driver vc4_dpi_driver; 98308302c35SEric Anholt 9844078f575SEric Anholt /* vc4_dsi.c */ 9854078f575SEric Anholt extern struct platform_driver vc4_dsi_driver; 9864078f575SEric Anholt 987cdec4d36SEric Anholt /* vc4_fence.c */ 988cdec4d36SEric Anholt extern const struct dma_fence_ops vc4_fence_ops; 989cdec4d36SEric Anholt 990d5b1a78aSEric Anholt /* vc4_gem.c */ 991171a072bSMaxime Ripard int vc4_gem_init(struct drm_device *dev); 992d5b1a78aSEric Anholt int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 993d5b1a78aSEric Anholt struct drm_file *file_priv); 994d5b1a78aSEric Anholt int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 995d5b1a78aSEric Anholt struct drm_file *file_priv); 996d5b1a78aSEric Anholt int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 997d5b1a78aSEric Anholt struct drm_file *file_priv); 998ca26d28bSVarad Gautam void vc4_submit_next_bin_job(struct drm_device *dev); 999ca26d28bSVarad Gautam void vc4_submit_next_render_job(struct drm_device *dev); 1000ca26d28bSVarad Gautam void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 1001d5b1a78aSEric Anholt int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 1002d5b1a78aSEric Anholt uint64_t timeout_ns, bool interruptible); 1003d5b1a78aSEric Anholt void vc4_job_handle_completed(struct vc4_dev *vc4); 1004b501baccSEric Anholt int vc4_queue_seqno_cb(struct drm_device *dev, 1005b501baccSEric Anholt struct vc4_seqno_cb *cb, uint64_t seqno, 1006b501baccSEric Anholt void (*func)(struct vc4_seqno_cb *cb)); 1007b9f19259SBoris Brezillon int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 1008b9f19259SBoris Brezillon struct drm_file *file_priv); 1009d5b1a78aSEric Anholt 1010c8b75bcaSEric Anholt /* vc4_hdmi.c */ 1011c8b75bcaSEric Anholt extern struct platform_driver vc4_hdmi_driver; 1012c8b75bcaSEric Anholt 10139a8d5e4aSBoris Brezillon /* vc4_vec.c */ 1014e4b81f8cSBoris Brezillon extern struct platform_driver vc4_vec_driver; 1015e4b81f8cSBoris Brezillon 1016008095e0SBoris Brezillon /* vc4_txp.c */ 1017008095e0SBoris Brezillon extern struct platform_driver vc4_txp_driver; 1018008095e0SBoris Brezillon 1019d5b1a78aSEric Anholt /* vc4_irq.c */ 10205226711eSThomas Zimmermann void vc4_irq_enable(struct drm_device *dev); 10215226711eSThomas Zimmermann void vc4_irq_disable(struct drm_device *dev); 10225226711eSThomas Zimmermann int vc4_irq_install(struct drm_device *dev, int irq); 1023d5b1a78aSEric Anholt void vc4_irq_uninstall(struct drm_device *dev); 1024d5b1a78aSEric Anholt void vc4_irq_reset(struct drm_device *dev); 1025d5b1a78aSEric Anholt 1026c8b75bcaSEric Anholt /* vc4_hvs.c */ 1027c8b75bcaSEric Anholt extern struct platform_driver vc4_hvs_driver; 1028640dbcc9SMaxime Ripard struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev); 10293454f01aSMaxime Ripard void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); 10303454f01aSMaxime Ripard int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); 10313454f01aSMaxime Ripard u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); 1032ee6965c8SMaxime Ripard int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); 1033eeb6ab46SMaxime Ripard void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); 1034ee6965c8SMaxime Ripard void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); 1035ee6965c8SMaxime Ripard void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); 1036ee6965c8SMaxime Ripard void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); 10373454f01aSMaxime Ripard void vc4_hvs_dump_state(struct vc4_hvs *hvs); 10383454f01aSMaxime Ripard void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel); 10393454f01aSMaxime Ripard void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel); 1040445b287eSMaxime Ripard int vc4_hvs_debugfs_init(struct drm_minor *minor); 1041c8b75bcaSEric Anholt 1042c8b75bcaSEric Anholt /* vc4_kms.c */ 1043c8b75bcaSEric Anholt int vc4_kms_load(struct drm_device *dev); 1044c8b75bcaSEric Anholt 1045c8b75bcaSEric Anholt /* vc4_plane.c */ 1046c8b75bcaSEric Anholt struct drm_plane *vc4_plane_init(struct drm_device *dev, 104777c5fb12SMaxime Ripard enum drm_plane_type type, 104877c5fb12SMaxime Ripard uint32_t possible_crtcs); 10490c2a50f1SMaxime Ripard int vc4_plane_create_additional_planes(struct drm_device *dev); 1050c8b75bcaSEric Anholt u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 10512f196b7cSDaniel Vetter u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 1052b501baccSEric Anholt void vc4_plane_async_set_fb(struct drm_plane *plane, 1053b501baccSEric Anholt struct drm_framebuffer *fb); 1054463873d5SEric Anholt 1055d3f5168aSEric Anholt /* vc4_v3d.c */ 1056d3f5168aSEric Anholt extern struct platform_driver vc4_v3d_driver; 1057ffc26740SEric Anholt extern const struct of_device_id vc4_v3d_dt_match[]; 1058553c942fSEric Anholt int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 105935c8b4b2SPaul Kocialkowski int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used); 106035c8b4b2SPaul Kocialkowski void vc4_v3d_bin_bo_put(struct vc4_dev *vc4); 1061cb74f6eeSEric Anholt int vc4_v3d_pm_get(struct vc4_dev *vc4); 1062cb74f6eeSEric Anholt void vc4_v3d_pm_put(struct vc4_dev *vc4); 1063445b287eSMaxime Ripard int vc4_v3d_debugfs_init(struct drm_minor *minor); 1064d5b1a78aSEric Anholt 1065d5b1a78aSEric Anholt /* vc4_validate.c */ 1066d5b1a78aSEric Anholt int 1067d5b1a78aSEric Anholt vc4_validate_bin_cl(struct drm_device *dev, 1068d5b1a78aSEric Anholt void *validated, 1069d5b1a78aSEric Anholt void *unvalidated, 1070d5b1a78aSEric Anholt struct vc4_exec_info *exec); 1071d5b1a78aSEric Anholt 1072d5b1a78aSEric Anholt int 1073d5b1a78aSEric Anholt vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 1074d5b1a78aSEric Anholt 10754a83c26aSDanilo Krummrich struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec, 1076d5b1a78aSEric Anholt uint32_t hindex); 1077d5b1a78aSEric Anholt 1078d5b1a78aSEric Anholt int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 1079d5b1a78aSEric Anholt 1080d5b1a78aSEric Anholt bool vc4_check_tex_size(struct vc4_exec_info *exec, 10814a83c26aSDanilo Krummrich struct drm_gem_dma_object *fbo, 1082d5b1a78aSEric Anholt uint32_t offset, uint8_t tiling_format, 1083d5b1a78aSEric Anholt uint32_t width, uint32_t height, uint8_t cpp); 1084d3f5168aSEric Anholt 1085463873d5SEric Anholt /* vc4_validate_shader.c */ 1086463873d5SEric Anholt struct vc4_validated_shader_info * 10874a83c26aSDanilo Krummrich vc4_validate_shader(struct drm_gem_dma_object *shader_obj); 108865101d8cSBoris Brezillon 108965101d8cSBoris Brezillon /* vc4_perfmon.c */ 109065101d8cSBoris Brezillon void vc4_perfmon_get(struct vc4_perfmon *perfmon); 109165101d8cSBoris Brezillon void vc4_perfmon_put(struct vc4_perfmon *perfmon); 109265101d8cSBoris Brezillon void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 109365101d8cSBoris Brezillon void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 109465101d8cSBoris Brezillon bool capture); 109565101d8cSBoris Brezillon struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 109665101d8cSBoris Brezillon void vc4_perfmon_open_file(struct vc4_file *vc4file); 109765101d8cSBoris Brezillon void vc4_perfmon_close_file(struct vc4_file *vc4file); 109865101d8cSBoris Brezillon int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 109965101d8cSBoris Brezillon struct drm_file *file_priv); 110065101d8cSBoris Brezillon int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 110165101d8cSBoris Brezillon struct drm_file *file_priv); 110265101d8cSBoris Brezillon int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 110365101d8cSBoris Brezillon struct drm_file *file_priv); 11046a88752cSMaxime Ripard 11056a88752cSMaxime Ripard #endif /* _VC4_DRV_H_ */ 1106