xref: /linux/include/drm/drm_device.h (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 #ifndef _DRM_DEVICE_H_
2 #define _DRM_DEVICE_H_
3 
4 #include <linux/list.h>
5 #include <linux/kref.h>
6 #include <linux/mutex.h>
7 #include <linux/idr.h>
8 #include <linux/sched.h>
9 
10 #include <drm/drm_mode_config.h>
11 
12 struct drm_driver;
13 struct drm_minor;
14 struct drm_master;
15 struct drm_vblank_crtc;
16 struct drm_vma_offset_manager;
17 struct drm_vram_mm;
18 struct drm_fb_helper;
19 
20 struct inode;
21 
22 struct pci_dev;
23 struct pci_controller;
24 
25 /*
26  * Recovery methods for wedged device in order of less to more side-effects.
27  * To be used with drm_dev_wedged_event() as recovery @method. Callers can
28  * use any one, multiple (or'd) or none depending on their needs.
29  *
30  * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
31  * details.
32  */
33 #define DRM_WEDGE_RECOVERY_NONE		BIT(0)	/* optional telemetry collection */
34 #define DRM_WEDGE_RECOVERY_REBIND	BIT(1)	/* unbind + bind driver */
35 #define DRM_WEDGE_RECOVERY_BUS_RESET	BIT(2)	/* unbind + reset bus device + bind */
36 #define DRM_WEDGE_RECOVERY_VENDOR	BIT(3)	/* vendor specific recovery method */
37 
38 /**
39  * struct drm_wedge_task_info - information about the guilty task of a wedge dev
40  */
41 struct drm_wedge_task_info {
42 	/** @pid: pid of the task */
43 	pid_t pid;
44 	/** @comm: command name of the task */
45 	char comm[TASK_COMM_LEN];
46 };
47 
48 /**
49  * enum switch_power_state - power state of drm device
50  */
51 
52 enum switch_power_state {
53 	/** @DRM_SWITCH_POWER_ON: Power state is ON */
54 	DRM_SWITCH_POWER_ON = 0,
55 
56 	/** @DRM_SWITCH_POWER_OFF: Power state is OFF */
57 	DRM_SWITCH_POWER_OFF = 1,
58 
59 	/** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
60 	DRM_SWITCH_POWER_CHANGING = 2,
61 
62 	/** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
63 	DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
64 };
65 
66 /**
67  * struct drm_device - DRM device structure
68  *
69  * This structure represent a complete card that
70  * may contain multiple heads.
71  */
72 struct drm_device {
73 	/** @if_version: Highest interface version set */
74 	int if_version;
75 
76 	/** @ref: Object ref-count */
77 	struct kref ref;
78 
79 	/** @dev: Device structure of bus-device */
80 	struct device *dev;
81 
82 	/**
83 	 * @dma_dev:
84 	 *
85 	 * Device for DMA operations. Only required if the device @dev
86 	 * cannot perform DMA by itself. Should be NULL otherwise. Call
87 	 * drm_dev_dma_dev() to get the DMA device instead of using this
88 	 * field directly. Call drm_dev_set_dma_dev() to set this field.
89 	 *
90 	 * DRM devices are sometimes bound to virtual devices that cannot
91 	 * perform DMA by themselves. Drivers should set this field to the
92 	 * respective DMA controller.
93 	 *
94 	 * Devices on USB and other peripheral busses also cannot perform
95 	 * DMA by themselves. The @dma_dev field should point the bus
96 	 * controller that does DMA on behalve of such a device. Required
97 	 * for importing buffers via dma-buf.
98 	 *
99 	 * If set, the DRM core automatically releases the reference on the
100 	 * device.
101 	 */
102 	struct device *dma_dev;
103 
104 	/**
105 	 * @managed:
106 	 *
107 	 * Managed resources linked to the lifetime of this &drm_device as
108 	 * tracked by @ref.
109 	 */
110 	struct {
111 		/** @managed.resources: managed resources list */
112 		struct list_head resources;
113 		/** @managed.final_kfree: pointer for final kfree() call */
114 		void *final_kfree;
115 		/** @managed.lock: protects @managed.resources */
116 		spinlock_t lock;
117 	} managed;
118 
119 	/** @driver: DRM driver managing the device */
120 	const struct drm_driver *driver;
121 
122 	/**
123 	 * @dev_private:
124 	 *
125 	 * DRM driver private data. This is deprecated and should be left set to
126 	 * NULL.
127 	 *
128 	 * Instead of using this pointer it is recommended that drivers use
129 	 * devm_drm_dev_alloc() and embed struct &drm_device in their larger
130 	 * per-device structure.
131 	 */
132 	void *dev_private;
133 
134 	/**
135 	 * @primary:
136 	 *
137 	 * Primary node. Drivers should not interact with this
138 	 * directly. debugfs interfaces can be registered with
139 	 * drm_debugfs_add_file(), and sysfs should be directly added on the
140 	 * hardware (and not character device node) struct device @dev.
141 	 */
142 	struct drm_minor *primary;
143 
144 	/**
145 	 * @render:
146 	 *
147 	 * Render node. Drivers should not interact with this directly ever.
148 	 * Drivers should not expose any additional interfaces in debugfs or
149 	 * sysfs on this node.
150 	 */
151 	struct drm_minor *render;
152 
153 	/** @accel: Compute Acceleration node */
154 	struct drm_minor *accel;
155 
156 	/**
157 	 * @registered:
158 	 *
159 	 * Internally used by drm_dev_register() and drm_connector_register().
160 	 */
161 	bool registered;
162 
163 	/**
164 	 * @master:
165 	 *
166 	 * Currently active master for this device.
167 	 * Protected by &master_mutex
168 	 */
169 	struct drm_master *master;
170 
171 	/**
172 	 * @driver_features: per-device driver features
173 	 *
174 	 * Drivers can clear specific flags here to disallow
175 	 * certain features on a per-device basis while still
176 	 * sharing a single &struct drm_driver instance across
177 	 * all devices.
178 	 */
179 	u32 driver_features;
180 
181 	/**
182 	 * @unplugged:
183 	 *
184 	 * Flag to tell if the device has been unplugged.
185 	 * See drm_dev_enter() and drm_dev_is_unplugged().
186 	 */
187 	bool unplugged;
188 
189 	/** @anon_inode: inode for private address-space */
190 	struct inode *anon_inode;
191 
192 	/** @unique: Unique name of the device */
193 	char *unique;
194 
195 	/**
196 	 * @master_mutex:
197 	 *
198 	 * Lock for &drm_minor.master and &drm_file.is_master
199 	 */
200 	struct mutex master_mutex;
201 
202 	/**
203 	 * @open_count:
204 	 *
205 	 * Usage counter for outstanding files open,
206 	 * protected by drm_global_mutex
207 	 */
208 	atomic_t open_count;
209 
210 	/** @filelist_mutex: Protects @filelist. */
211 	struct mutex filelist_mutex;
212 	/**
213 	 * @filelist:
214 	 *
215 	 * List of userspace clients, linked through &drm_file.lhead.
216 	 */
217 	struct list_head filelist;
218 
219 	/**
220 	 * @filelist_internal:
221 	 *
222 	 * List of open DRM files for in-kernel clients.
223 	 * Protected by &filelist_mutex.
224 	 */
225 	struct list_head filelist_internal;
226 
227 	/**
228 	 * @clientlist_mutex:
229 	 *
230 	 * Protects &clientlist access.
231 	 */
232 	struct mutex clientlist_mutex;
233 
234 	/**
235 	 * @clientlist:
236 	 *
237 	 * List of in-kernel clients. Protected by &clientlist_mutex.
238 	 */
239 	struct list_head clientlist;
240 
241 	/**
242 	 * @client_sysrq_list:
243 	 *
244 	 * Entry into list of devices registered for sysrq. Allows in-kernel
245 	 * clients on this device to handle sysrq keys.
246 	 */
247 	struct list_head client_sysrq_list;
248 
249 	/**
250 	 * @vblank_disable_immediate:
251 	 *
252 	 * If true, vblank interrupt will be disabled immediately when the
253 	 * refcount drops to zero, as opposed to via the vblank disable
254 	 * timer.
255 	 *
256 	 * This can be set to true it the hardware has a working vblank counter
257 	 * with high-precision timestamping (otherwise there are races) and the
258 	 * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
259 	 * appropriately. Also, see @max_vblank_count,
260 	 * &drm_crtc_funcs.get_vblank_counter and
261 	 * &drm_vblank_crtc_config.disable_immediate.
262 	 */
263 	bool vblank_disable_immediate;
264 
265 	/**
266 	 * @vblank:
267 	 *
268 	 * Array of vblank tracking structures, one per &struct drm_crtc. For
269 	 * historical reasons (vblank support predates kernel modesetting) this
270 	 * is free-standing and not part of &struct drm_crtc itself. It must be
271 	 * initialized explicitly by calling drm_vblank_init().
272 	 */
273 	struct drm_vblank_crtc *vblank;
274 
275 	/**
276 	 * @vblank_time_lock:
277 	 *
278 	 *  Protects vblank count and time updates during vblank enable/disable
279 	 */
280 	spinlock_t vblank_time_lock;
281 	/**
282 	 * @vbl_lock: Top-level vblank references lock, wraps the low-level
283 	 * @vblank_time_lock.
284 	 */
285 	spinlock_t vbl_lock;
286 
287 	/**
288 	 * @max_vblank_count:
289 	 *
290 	 * Maximum value of the vblank registers. This value +1 will result in a
291 	 * wrap-around of the vblank register. It is used by the vblank core to
292 	 * handle wrap-arounds.
293 	 *
294 	 * If set to zero the vblank core will try to guess the elapsed vblanks
295 	 * between times when the vblank interrupt is disabled through
296 	 * high-precision timestamps. That approach is suffering from small
297 	 * races and imprecision over longer time periods, hence exposing a
298 	 * hardware vblank counter is always recommended.
299 	 *
300 	 * This is the statically configured device wide maximum. The driver
301 	 * can instead choose to use a runtime configurable per-crtc value
302 	 * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
303 	 * must be left at zero. See drm_crtc_set_max_vblank_count() on how
304 	 * to use the per-crtc value.
305 	 *
306 	 * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
307 	 */
308 	u32 max_vblank_count;
309 
310 	/** @vblank_event_list: List of vblank events */
311 	struct list_head vblank_event_list;
312 
313 	/**
314 	 * @event_lock:
315 	 *
316 	 * Protects @vblank_event_list and event delivery in
317 	 * general. See drm_send_event() and drm_send_event_locked().
318 	 */
319 	spinlock_t event_lock;
320 
321 	/** @num_crtcs: Number of CRTCs on this device */
322 	unsigned int num_crtcs;
323 
324 	/** @mode_config: Current mode config */
325 	struct drm_mode_config mode_config;
326 
327 	/** @object_name_lock: GEM information */
328 	struct mutex object_name_lock;
329 
330 	/** @object_name_idr: GEM information */
331 	struct idr object_name_idr;
332 
333 	/** @vma_offset_manager: GEM information */
334 	struct drm_vma_offset_manager *vma_offset_manager;
335 
336 	/** @vram_mm: VRAM MM memory manager */
337 	struct drm_vram_mm *vram_mm;
338 
339 	/**
340 	 * @switch_power_state:
341 	 *
342 	 * Power state of the client.
343 	 * Used by drivers supporting the switcheroo driver.
344 	 * The state is maintained in the
345 	 * &vga_switcheroo_client_ops.set_gpu_state callback
346 	 */
347 	enum switch_power_state switch_power_state;
348 
349 	/**
350 	 * @fb_helper:
351 	 *
352 	 * Pointer to the fbdev emulation structure.
353 	 * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
354 	 */
355 	struct drm_fb_helper *fb_helper;
356 
357 	/**
358 	 * @debugfs_root:
359 	 *
360 	 * Root directory for debugfs files.
361 	 */
362 	struct dentry *debugfs_root;
363 };
364 
365 void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
366 
367 /**
368  * drm_dev_dma_dev - returns the DMA device for a DRM device
369  * @dev: DRM device
370  *
371  * Returns the DMA device of the given DRM device. By default, this
372  * the DRM device's parent. See drm_dev_set_dma_dev().
373  *
374  * Returns:
375  * A DMA-capable device for the DRM device.
376  */
drm_dev_dma_dev(struct drm_device * dev)377 static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
378 {
379 	if (dev->dma_dev)
380 		return dev->dma_dev;
381 	return dev->dev;
382 }
383 
384 #endif
385