xref: /linux/include/drm/drm_device.h (revision 6e0b1b82017b9ba16b87685e1e4902cd9dc762d2)
1 #ifndef _DRM_DEVICE_H_
2 #define _DRM_DEVICE_H_
3 
4 #include <linux/list.h>
5 #include <linux/kref.h>
6 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7 #include <linux/mount.h>
8 #endif
9 #include <linux/mutex.h>
10 #include <linux/idr.h>
11 #include <linux/sched.h>
12 
13 #include <drm/drm_mode_config.h>
14 
15 struct drm_driver;
16 struct drm_minor;
17 struct drm_master;
18 struct drm_vblank_crtc;
19 struct drm_vma_offset_manager;
20 struct drm_vram_mm;
21 struct drm_fb_helper;
22 
23 struct inode;
24 
25 struct pci_dev;
26 struct pci_controller;
27 
28 /*
29  * Recovery methods for wedged device in order of less to more side-effects.
30  * To be used with drm_dev_wedged_event() as recovery @method. Callers can
31  * use any one, multiple (or'd) or none depending on their needs.
32  *
33  * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
34  * details.
35  */
36 #define DRM_WEDGE_RECOVERY_NONE		BIT(0)	/* optional telemetry collection */
37 #define DRM_WEDGE_RECOVERY_REBIND	BIT(1)	/* unbind + bind driver */
38 #define DRM_WEDGE_RECOVERY_BUS_RESET	BIT(2)	/* unbind + reset bus device + bind */
39 #define DRM_WEDGE_RECOVERY_VENDOR	BIT(3)	/* vendor specific recovery method */
40 
41 /**
42  * struct drm_wedge_task_info - information about the guilty task of a wedge dev
43  */
44 struct drm_wedge_task_info {
45 	/** @pid: pid of the task */
46 	pid_t pid;
47 	/** @comm: command name of the task */
48 	char comm[TASK_COMM_LEN];
49 };
50 
51 /**
52  * enum switch_power_state - power state of drm device
53  */
54 
55 enum switch_power_state {
56 	/** @DRM_SWITCH_POWER_ON: Power state is ON */
57 	DRM_SWITCH_POWER_ON = 0,
58 
59 	/** @DRM_SWITCH_POWER_OFF: Power state is OFF */
60 	DRM_SWITCH_POWER_OFF = 1,
61 
62 	/** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
63 	DRM_SWITCH_POWER_CHANGING = 2,
64 
65 	/** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
66 	DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
67 };
68 
69 /**
70  * struct drm_device - DRM device structure
71  *
72  * This structure represent a complete card that
73  * may contain multiple heads.
74  */
75 struct drm_device {
76 	/** @if_version: Highest interface version set */
77 	int if_version;
78 
79 	/** @ref: Object ref-count */
80 	struct kref ref;
81 
82 	/** @dev: Device structure of bus-device */
83 	struct device *dev;
84 
85 	/**
86 	 * @dma_dev:
87 	 *
88 	 * Device for DMA operations. Only required if the device @dev
89 	 * cannot perform DMA by itself. Should be NULL otherwise. Call
90 	 * drm_dev_dma_dev() to get the DMA device instead of using this
91 	 * field directly. Call drm_dev_set_dma_dev() to set this field.
92 	 *
93 	 * DRM devices are sometimes bound to virtual devices that cannot
94 	 * perform DMA by themselves. Drivers should set this field to the
95 	 * respective DMA controller.
96 	 *
97 	 * Devices on USB and other peripheral busses also cannot perform
98 	 * DMA by themselves. The @dma_dev field should point the bus
99 	 * controller that does DMA on behalve of such a device. Required
100 	 * for importing buffers via dma-buf.
101 	 *
102 	 * If set, the DRM core automatically releases the reference on the
103 	 * device.
104 	 */
105 	struct device *dma_dev;
106 
107 	/**
108 	 * @managed:
109 	 *
110 	 * Managed resources linked to the lifetime of this &drm_device as
111 	 * tracked by @ref.
112 	 */
113 	struct {
114 		/** @managed.resources: managed resources list */
115 		struct list_head resources;
116 		/** @managed.final_kfree: pointer for final kfree() call */
117 		void *final_kfree;
118 		/** @managed.lock: protects @managed.resources */
119 		spinlock_t lock;
120 	} managed;
121 
122 	/** @driver: DRM driver managing the device */
123 	const struct drm_driver *driver;
124 
125 	/**
126 	 * @dev_private:
127 	 *
128 	 * DRM driver private data. This is deprecated and should be left set to
129 	 * NULL.
130 	 *
131 	 * Instead of using this pointer it is recommended that drivers use
132 	 * devm_drm_dev_alloc() and embed struct &drm_device in their larger
133 	 * per-device structure.
134 	 */
135 	void *dev_private;
136 
137 	/**
138 	 * @primary:
139 	 *
140 	 * Primary node. Drivers should not interact with this
141 	 * directly. debugfs interfaces can be registered with
142 	 * drm_debugfs_add_file(), and sysfs should be directly added on the
143 	 * hardware (and not character device node) struct device @dev.
144 	 */
145 	struct drm_minor *primary;
146 
147 	/**
148 	 * @render:
149 	 *
150 	 * Render node. Drivers should not interact with this directly ever.
151 	 * Drivers should not expose any additional interfaces in debugfs or
152 	 * sysfs on this node.
153 	 */
154 	struct drm_minor *render;
155 
156 	/** @accel: Compute Acceleration node */
157 	struct drm_minor *accel;
158 
159 	/**
160 	 * @registered:
161 	 *
162 	 * Internally used by drm_dev_register() and drm_connector_register().
163 	 */
164 	bool registered;
165 
166 	/**
167 	 * @master:
168 	 *
169 	 * Currently active master for this device.
170 	 * Protected by &master_mutex
171 	 */
172 	struct drm_master *master;
173 
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 	/**
176 	 * @huge_mnt:
177 	 *
178 	 * Huge tmpfs mountpoint used at GEM object initialization
179 	 * drm_gem_object_init(). Drivers can call drm_gem_huge_mnt_create() to
180 	 * create, mount and use it. The default tmpfs mountpoint (`shm_mnt`) is
181 	 * used if NULL.
182 	 */
183 	struct vfsmount *huge_mnt;
184 #endif
185 
186 	/**
187 	 * @driver_features: per-device driver features
188 	 *
189 	 * Drivers can clear specific flags here to disallow
190 	 * certain features on a per-device basis while still
191 	 * sharing a single &struct drm_driver instance across
192 	 * all devices.
193 	 */
194 	u32 driver_features;
195 
196 	/**
197 	 * @unplugged:
198 	 *
199 	 * Flag to tell if the device has been unplugged.
200 	 * See drm_dev_enter() and drm_dev_is_unplugged().
201 	 */
202 	bool unplugged;
203 
204 	/** @anon_inode: inode for private address-space */
205 	struct inode *anon_inode;
206 
207 	/** @unique: Unique name of the device */
208 	char *unique;
209 
210 	/**
211 	 * @master_mutex:
212 	 *
213 	 * Lock for &drm_minor.master and &drm_file.is_master
214 	 */
215 	struct mutex master_mutex;
216 
217 	/**
218 	 * @open_count:
219 	 *
220 	 * Usage counter for outstanding files open,
221 	 * protected by drm_global_mutex
222 	 */
223 	atomic_t open_count;
224 
225 	/** @filelist_mutex: Protects @filelist. */
226 	struct mutex filelist_mutex;
227 	/**
228 	 * @filelist:
229 	 *
230 	 * List of userspace clients, linked through &drm_file.lhead.
231 	 */
232 	struct list_head filelist;
233 
234 	/**
235 	 * @filelist_internal:
236 	 *
237 	 * List of open DRM files for in-kernel clients.
238 	 * Protected by &filelist_mutex.
239 	 */
240 	struct list_head filelist_internal;
241 
242 	/**
243 	 * @clientlist_mutex:
244 	 *
245 	 * Protects &clientlist access.
246 	 */
247 	struct mutex clientlist_mutex;
248 
249 	/**
250 	 * @clientlist:
251 	 *
252 	 * List of in-kernel clients. Protected by &clientlist_mutex.
253 	 */
254 	struct list_head clientlist;
255 
256 	/**
257 	 * @client_sysrq_list:
258 	 *
259 	 * Entry into list of devices registered for sysrq. Allows in-kernel
260 	 * clients on this device to handle sysrq keys.
261 	 */
262 	struct list_head client_sysrq_list;
263 
264 	/**
265 	 * @vblank_disable_immediate:
266 	 *
267 	 * If true, vblank interrupt will be disabled immediately when the
268 	 * refcount drops to zero, as opposed to via the vblank disable
269 	 * timer.
270 	 *
271 	 * This can be set to true it the hardware has a working vblank counter
272 	 * with high-precision timestamping (otherwise there are races) and the
273 	 * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
274 	 * appropriately. Also, see @max_vblank_count,
275 	 * &drm_crtc_funcs.get_vblank_counter and
276 	 * &drm_vblank_crtc_config.disable_immediate.
277 	 */
278 	bool vblank_disable_immediate;
279 
280 	/**
281 	 * @vblank:
282 	 *
283 	 * Array of vblank tracking structures, one per &struct drm_crtc. For
284 	 * historical reasons (vblank support predates kernel modesetting) this
285 	 * is free-standing and not part of &struct drm_crtc itself. It must be
286 	 * initialized explicitly by calling drm_vblank_init().
287 	 */
288 	struct drm_vblank_crtc *vblank;
289 
290 	/**
291 	 * @vblank_time_lock:
292 	 *
293 	 *  Protects vblank count and time updates during vblank enable/disable
294 	 */
295 	spinlock_t vblank_time_lock;
296 	/**
297 	 * @vbl_lock: Top-level vblank references lock, wraps the low-level
298 	 * @vblank_time_lock.
299 	 */
300 	spinlock_t vbl_lock;
301 
302 	/**
303 	 * @max_vblank_count:
304 	 *
305 	 * Maximum value of the vblank registers. This value +1 will result in a
306 	 * wrap-around of the vblank register. It is used by the vblank core to
307 	 * handle wrap-arounds.
308 	 *
309 	 * If set to zero the vblank core will try to guess the elapsed vblanks
310 	 * between times when the vblank interrupt is disabled through
311 	 * high-precision timestamps. That approach is suffering from small
312 	 * races and imprecision over longer time periods, hence exposing a
313 	 * hardware vblank counter is always recommended.
314 	 *
315 	 * This is the statically configured device wide maximum. The driver
316 	 * can instead choose to use a runtime configurable per-crtc value
317 	 * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
318 	 * must be left at zero. See drm_crtc_set_max_vblank_count() on how
319 	 * to use the per-crtc value.
320 	 *
321 	 * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
322 	 */
323 	u32 max_vblank_count;
324 
325 	/** @vblank_event_list: List of vblank events */
326 	struct list_head vblank_event_list;
327 
328 	/**
329 	 * @event_lock:
330 	 *
331 	 * Protects @vblank_event_list and event delivery in
332 	 * general. See drm_send_event() and drm_send_event_locked().
333 	 */
334 	spinlock_t event_lock;
335 
336 	/** @num_crtcs: Number of CRTCs on this device */
337 	unsigned int num_crtcs;
338 
339 	/** @mode_config: Current mode config */
340 	struct drm_mode_config mode_config;
341 
342 	/** @object_name_lock: GEM information */
343 	struct mutex object_name_lock;
344 
345 	/** @object_name_idr: GEM information */
346 	struct idr object_name_idr;
347 
348 	/** @vma_offset_manager: GEM information */
349 	struct drm_vma_offset_manager *vma_offset_manager;
350 
351 	/** @vram_mm: VRAM MM memory manager */
352 	struct drm_vram_mm *vram_mm;
353 
354 	/**
355 	 * @switch_power_state:
356 	 *
357 	 * Power state of the client.
358 	 * Used by drivers supporting the switcheroo driver.
359 	 * The state is maintained in the
360 	 * &vga_switcheroo_client_ops.set_gpu_state callback
361 	 */
362 	enum switch_power_state switch_power_state;
363 
364 	/**
365 	 * @fb_helper:
366 	 *
367 	 * Pointer to the fbdev emulation structure.
368 	 * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
369 	 */
370 	struct drm_fb_helper *fb_helper;
371 
372 	/**
373 	 * @debugfs_root:
374 	 *
375 	 * Root directory for debugfs files.
376 	 */
377 	struct dentry *debugfs_root;
378 };
379 
380 void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
381 
382 /**
383  * drm_dev_dma_dev - returns the DMA device for a DRM device
384  * @dev: DRM device
385  *
386  * Returns the DMA device of the given DRM device. By default, this
387  * the DRM device's parent. See drm_dev_set_dma_dev().
388  *
389  * Returns:
390  * A DMA-capable device for the DRM device.
391  */
392 static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
393 {
394 	if (dev->dma_dev)
395 		return dev->dma_dev;
396 	return dev->dev;
397 }
398 
399 #endif
400