xref: /linux/include/drm/drm_device.h (revision 92d6295a29dba56148406a8452c69ab49787741b)
1 #ifndef _DRM_DEVICE_H_
2 #define _DRM_DEVICE_H_
3 
4 #include <linux/list.h>
5 #include <linux/kref.h>
6 #include <linux/mutex.h>
7 #include <linux/idr.h>
8 #include <linux/sched.h>
9 
10 #include <drm/drm_mode_config.h>
11 
12 struct drm_driver;
13 struct drm_minor;
14 struct drm_master;
15 struct drm_vblank_crtc;
16 struct drm_vma_offset_manager;
17 struct drm_vram_mm;
18 struct drm_fb_helper;
19 
20 struct inode;
21 
22 struct pci_dev;
23 struct pci_controller;
24 
25 /*
26  * Recovery methods for wedged device in order of less to more side-effects.
27  * To be used with drm_dev_wedged_event() as recovery @method. Callers can
28  * use any one, multiple (or'd) or none depending on their needs.
29  *
30  * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
31  * details.
32  */
33 #define DRM_WEDGE_RECOVERY_NONE		BIT(0)	/* optional telemetry collection */
34 #define DRM_WEDGE_RECOVERY_REBIND	BIT(1)	/* unbind + bind driver */
35 #define DRM_WEDGE_RECOVERY_BUS_RESET	BIT(2)	/* unbind + reset bus device + bind */
36 #define DRM_WEDGE_RECOVERY_VENDOR	BIT(3)	/* vendor specific recovery method */
37 
38 /**
39  * struct drm_wedge_task_info - information about the guilty task of a wedge dev
40  */
41 struct drm_wedge_task_info {
42 	/** @pid: pid of the task */
43 	pid_t pid;
44 	/** @comm: command name of the task */
45 	char comm[TASK_COMM_LEN];
46 };
47 
48 /**
49  * enum switch_power_state - power state of drm device
50  */
51 
52 enum switch_power_state {
53 	/** @DRM_SWITCH_POWER_ON: Power state is ON */
54 	DRM_SWITCH_POWER_ON = 0,
55 
56 	/** @DRM_SWITCH_POWER_OFF: Power state is OFF */
57 	DRM_SWITCH_POWER_OFF = 1,
58 
59 	/** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
60 	DRM_SWITCH_POWER_CHANGING = 2,
61 
62 	/** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
63 	DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
64 };
65 
66 /**
67  * struct drm_device - DRM device structure
68  *
69  * This structure represent a complete card that
70  * may contain multiple heads.
71  */
72 struct drm_device {
73 	/** @if_version: Highest interface version set */
74 	int if_version;
75 
76 	/** @ref: Object ref-count */
77 	struct kref ref;
78 
79 	/** @dev: Device structure of bus-device */
80 	struct device *dev;
81 
82 	/**
83 	 * @dma_dev:
84 	 *
85 	 * Device for DMA operations. Only required if the device @dev
86 	 * cannot perform DMA by itself. Should be NULL otherwise. Call
87 	 * drm_dev_dma_dev() to get the DMA device instead of using this
88 	 * field directly. Call drm_dev_set_dma_dev() to set this field.
89 	 *
90 	 * DRM devices are sometimes bound to virtual devices that cannot
91 	 * perform DMA by themselves. Drivers should set this field to the
92 	 * respective DMA controller.
93 	 *
94 	 * Devices on USB and other peripheral busses also cannot perform
95 	 * DMA by themselves. The @dma_dev field should point the bus
96 	 * controller that does DMA on behalve of such a device. Required
97 	 * for importing buffers via dma-buf.
98 	 *
99 	 * If set, the DRM core automatically releases the reference on the
100 	 * device.
101 	 */
102 	struct device *dma_dev;
103 
104 	/**
105 	 * @managed:
106 	 *
107 	 * Managed resources linked to the lifetime of this &drm_device as
108 	 * tracked by @ref.
109 	 */
110 	struct {
111 		/** @managed.resources: managed resources list */
112 		struct list_head resources;
113 		/** @managed.final_kfree: pointer for final kfree() call */
114 		void *final_kfree;
115 		/** @managed.lock: protects @managed.resources */
116 		spinlock_t lock;
117 	} managed;
118 
119 	/** @driver: DRM driver managing the device */
120 	const struct drm_driver *driver;
121 
122 	/**
123 	 * @dev_private:
124 	 *
125 	 * DRM driver private data. This is deprecated and should be left set to
126 	 * NULL.
127 	 *
128 	 * Instead of using this pointer it is recommended that drivers use
129 	 * devm_drm_dev_alloc() and embed struct &drm_device in their larger
130 	 * per-device structure.
131 	 */
132 	void *dev_private;
133 
134 	/**
135 	 * @primary:
136 	 *
137 	 * Primary node. Drivers should not interact with this
138 	 * directly. debugfs interfaces can be registered with
139 	 * drm_debugfs_add_file(), and sysfs should be directly added on the
140 	 * hardware (and not character device node) struct device @dev.
141 	 */
142 	struct drm_minor *primary;
143 
144 	/**
145 	 * @render:
146 	 *
147 	 * Render node. Drivers should not interact with this directly ever.
148 	 * Drivers should not expose any additional interfaces in debugfs or
149 	 * sysfs on this node.
150 	 */
151 	struct drm_minor *render;
152 
153 	/** @accel: Compute Acceleration node */
154 	struct drm_minor *accel;
155 
156 	/**
157 	 * @registered:
158 	 *
159 	 * Internally used by drm_dev_register() and drm_connector_register().
160 	 */
161 	bool registered;
162 
163 	/**
164 	 * @master:
165 	 *
166 	 * Currently active master for this device.
167 	 * Protected by &master_mutex
168 	 */
169 	struct drm_master *master;
170 
171 	/**
172 	 * @driver_features: per-device driver features
173 	 *
174 	 * Drivers can clear specific flags here to disallow
175 	 * certain features on a per-device basis while still
176 	 * sharing a single &struct drm_driver instance across
177 	 * all devices.
178 	 */
179 	u32 driver_features;
180 
181 	/**
182 	 * @unplugged:
183 	 *
184 	 * Flag to tell if the device has been unplugged.
185 	 * See drm_dev_enter() and drm_dev_is_unplugged().
186 	 */
187 	bool unplugged;
188 
189 	/** @anon_inode: inode for private address-space */
190 	struct inode *anon_inode;
191 
192 	/** @unique: Unique name of the device */
193 	char *unique;
194 
195 	/**
196 	 * @struct_mutex:
197 	 *
198 	 * Lock for others (not &drm_minor.master and &drm_file.is_master)
199 	 *
200 	 * TODO: This lock used to be the BKL of the DRM subsystem. Move the
201 	 *       lock into i915, which is the only remaining user.
202 	 */
203 	struct mutex struct_mutex;
204 
205 	/**
206 	 * @master_mutex:
207 	 *
208 	 * Lock for &drm_minor.master and &drm_file.is_master
209 	 */
210 	struct mutex master_mutex;
211 
212 	/**
213 	 * @open_count:
214 	 *
215 	 * Usage counter for outstanding files open,
216 	 * protected by drm_global_mutex
217 	 */
218 	atomic_t open_count;
219 
220 	/** @filelist_mutex: Protects @filelist. */
221 	struct mutex filelist_mutex;
222 	/**
223 	 * @filelist:
224 	 *
225 	 * List of userspace clients, linked through &drm_file.lhead.
226 	 */
227 	struct list_head filelist;
228 
229 	/**
230 	 * @filelist_internal:
231 	 *
232 	 * List of open DRM files for in-kernel clients.
233 	 * Protected by &filelist_mutex.
234 	 */
235 	struct list_head filelist_internal;
236 
237 	/**
238 	 * @clientlist_mutex:
239 	 *
240 	 * Protects &clientlist access.
241 	 */
242 	struct mutex clientlist_mutex;
243 
244 	/**
245 	 * @clientlist:
246 	 *
247 	 * List of in-kernel clients. Protected by &clientlist_mutex.
248 	 */
249 	struct list_head clientlist;
250 
251 	/**
252 	 * @vblank_disable_immediate:
253 	 *
254 	 * If true, vblank interrupt will be disabled immediately when the
255 	 * refcount drops to zero, as opposed to via the vblank disable
256 	 * timer.
257 	 *
258 	 * This can be set to true it the hardware has a working vblank counter
259 	 * with high-precision timestamping (otherwise there are races) and the
260 	 * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
261 	 * appropriately. Also, see @max_vblank_count,
262 	 * &drm_crtc_funcs.get_vblank_counter and
263 	 * &drm_vblank_crtc_config.disable_immediate.
264 	 */
265 	bool vblank_disable_immediate;
266 
267 	/**
268 	 * @vblank:
269 	 *
270 	 * Array of vblank tracking structures, one per &struct drm_crtc. For
271 	 * historical reasons (vblank support predates kernel modesetting) this
272 	 * is free-standing and not part of &struct drm_crtc itself. It must be
273 	 * initialized explicitly by calling drm_vblank_init().
274 	 */
275 	struct drm_vblank_crtc *vblank;
276 
277 	/**
278 	 * @vblank_time_lock:
279 	 *
280 	 *  Protects vblank count and time updates during vblank enable/disable
281 	 */
282 	spinlock_t vblank_time_lock;
283 	/**
284 	 * @vbl_lock: Top-level vblank references lock, wraps the low-level
285 	 * @vblank_time_lock.
286 	 */
287 	spinlock_t vbl_lock;
288 
289 	/**
290 	 * @max_vblank_count:
291 	 *
292 	 * Maximum value of the vblank registers. This value +1 will result in a
293 	 * wrap-around of the vblank register. It is used by the vblank core to
294 	 * handle wrap-arounds.
295 	 *
296 	 * If set to zero the vblank core will try to guess the elapsed vblanks
297 	 * between times when the vblank interrupt is disabled through
298 	 * high-precision timestamps. That approach is suffering from small
299 	 * races and imprecision over longer time periods, hence exposing a
300 	 * hardware vblank counter is always recommended.
301 	 *
302 	 * This is the statically configured device wide maximum. The driver
303 	 * can instead choose to use a runtime configurable per-crtc value
304 	 * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
305 	 * must be left at zero. See drm_crtc_set_max_vblank_count() on how
306 	 * to use the per-crtc value.
307 	 *
308 	 * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
309 	 */
310 	u32 max_vblank_count;
311 
312 	/** @vblank_event_list: List of vblank events */
313 	struct list_head vblank_event_list;
314 
315 	/**
316 	 * @event_lock:
317 	 *
318 	 * Protects @vblank_event_list and event delivery in
319 	 * general. See drm_send_event() and drm_send_event_locked().
320 	 */
321 	spinlock_t event_lock;
322 
323 	/** @num_crtcs: Number of CRTCs on this device */
324 	unsigned int num_crtcs;
325 
326 	/** @mode_config: Current mode config */
327 	struct drm_mode_config mode_config;
328 
329 	/** @object_name_lock: GEM information */
330 	struct mutex object_name_lock;
331 
332 	/** @object_name_idr: GEM information */
333 	struct idr object_name_idr;
334 
335 	/** @vma_offset_manager: GEM information */
336 	struct drm_vma_offset_manager *vma_offset_manager;
337 
338 	/** @vram_mm: VRAM MM memory manager */
339 	struct drm_vram_mm *vram_mm;
340 
341 	/**
342 	 * @switch_power_state:
343 	 *
344 	 * Power state of the client.
345 	 * Used by drivers supporting the switcheroo driver.
346 	 * The state is maintained in the
347 	 * &vga_switcheroo_client_ops.set_gpu_state callback
348 	 */
349 	enum switch_power_state switch_power_state;
350 
351 	/**
352 	 * @fb_helper:
353 	 *
354 	 * Pointer to the fbdev emulation structure.
355 	 * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
356 	 */
357 	struct drm_fb_helper *fb_helper;
358 
359 	/**
360 	 * @debugfs_root:
361 	 *
362 	 * Root directory for debugfs files.
363 	 */
364 	struct dentry *debugfs_root;
365 };
366 
367 void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
368 
369 /**
370  * drm_dev_dma_dev - returns the DMA device for a DRM device
371  * @dev: DRM device
372  *
373  * Returns the DMA device of the given DRM device. By default, this
374  * the DRM device's parent. See drm_dev_set_dma_dev().
375  *
376  * Returns:
377  * A DMA-capable device for the DRM device.
378  */
379 static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
380 {
381 	if (dev->dma_dev)
382 		return dev->dma_dev;
383 	return dev->dev;
384 }
385 
386 #endif
387