xref: /linux/tools/include/uapi/drm/drm.h (revision 561add0da6d3d07c9bccb0832fb6ed5619167d26)
1 /*
2  * Header for the Direct Rendering Manager
3  *
4  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
5  *
6  * Acknowledgments:
7  * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
8  */
9 
10 /*
11  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13  * All rights reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the "Software"),
17  * to deal in the Software without restriction, including without limitation
18  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19  * and/or sell copies of the Software, and to permit persons to whom the
20  * Software is furnished to do so, subject to the following conditions:
21  *
22  * The above copyright notice and this permission notice (including the next
23  * paragraph) shall be included in all copies or substantial portions of the
24  * Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
29  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32  * OTHER DEALINGS IN THE SOFTWARE.
33  */
34 
35 #ifndef _DRM_H_
36 #define _DRM_H_
37 
38 #if defined(__KERNEL__)
39 
40 #include <linux/types.h>
41 #include <asm/ioctl.h>
42 typedef unsigned int drm_handle_t;
43 
44 #elif defined(__linux__)
45 
46 #include <linux/types.h>
47 #include <asm/ioctl.h>
48 typedef unsigned int drm_handle_t;
49 
50 #else /* One of the BSDs */
51 
52 #include <stdint.h>
53 #include <sys/ioccom.h>
54 #include <sys/types.h>
55 typedef int8_t   __s8;
56 typedef uint8_t  __u8;
57 typedef int16_t  __s16;
58 typedef uint16_t __u16;
59 typedef int32_t  __s32;
60 typedef uint32_t __u32;
61 typedef int64_t  __s64;
62 typedef uint64_t __u64;
63 typedef size_t   __kernel_size_t;
64 typedef unsigned long drm_handle_t;
65 
66 #endif
67 
68 #if defined(__cplusplus)
69 extern "C" {
70 #endif
71 
72 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
73 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
74 #define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
75 #define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
76 
77 #define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
78 #define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
79 #define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
80 #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
81 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
82 
83 typedef unsigned int drm_context_t;
84 typedef unsigned int drm_drawable_t;
85 typedef unsigned int drm_magic_t;
86 
87 /*
88  * Cliprect.
89  *
90  * \warning: If you change this structure, make sure you change
91  * XF86DRIClipRectRec in the server as well
92  *
93  * \note KW: Actually it's illegal to change either for
94  * backwards-compatibility reasons.
95  */
96 struct drm_clip_rect {
97 	unsigned short x1;
98 	unsigned short y1;
99 	unsigned short x2;
100 	unsigned short y2;
101 };
102 
103 /*
104  * Drawable information.
105  */
106 struct drm_drawable_info {
107 	unsigned int num_rects;
108 	struct drm_clip_rect *rects;
109 };
110 
111 /*
112  * Texture region,
113  */
114 struct drm_tex_region {
115 	unsigned char next;
116 	unsigned char prev;
117 	unsigned char in_use;
118 	unsigned char padding;
119 	unsigned int age;
120 };
121 
122 /*
123  * Hardware lock.
124  *
125  * The lock structure is a simple cache-line aligned integer.  To avoid
126  * processor bus contention on a multiprocessor system, there should not be any
127  * other data stored in the same cache line.
128  */
129 struct drm_hw_lock {
130 	__volatile__ unsigned int lock;		/**< lock variable */
131 	char padding[60];			/**< Pad to cache line */
132 };
133 
134 /*
135  * DRM_IOCTL_VERSION ioctl argument type.
136  *
137  * \sa drmGetVersion().
138  */
139 struct drm_version {
140 	int version_major;	  /**< Major version */
141 	int version_minor;	  /**< Minor version */
142 	int version_patchlevel;	  /**< Patch level */
143 	__kernel_size_t name_len;	  /**< Length of name buffer */
144 	char __user *name;	  /**< Name of driver */
145 	__kernel_size_t date_len;	  /**< Length of date buffer */
146 	char __user *date;	  /**< User-space buffer to hold date */
147 	__kernel_size_t desc_len;	  /**< Length of desc buffer */
148 	char __user *desc;	  /**< User-space buffer to hold desc */
149 };
150 
151 /*
152  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
153  *
154  * \sa drmGetBusid() and drmSetBusId().
155  */
156 struct drm_unique {
157 	__kernel_size_t unique_len;	  /**< Length of unique */
158 	char __user *unique;	  /**< Unique name for driver instantiation */
159 };
160 
161 struct drm_list {
162 	int count;		  /**< Length of user-space structures */
163 	struct drm_version __user *version;
164 };
165 
166 struct drm_block {
167 	int unused;
168 };
169 
170 /*
171  * DRM_IOCTL_CONTROL ioctl argument type.
172  *
173  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
174  */
175 struct drm_control {
176 	enum {
177 		DRM_ADD_COMMAND,
178 		DRM_RM_COMMAND,
179 		DRM_INST_HANDLER,
180 		DRM_UNINST_HANDLER
181 	} func;
182 	int irq;
183 };
184 
185 /*
186  * Type of memory to map.
187  */
188 enum drm_map_type {
189 	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
190 	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
191 	_DRM_SHM = 2,		  /**< shared, cached */
192 	_DRM_AGP = 3,		  /**< AGP/GART */
193 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
194 	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
195 };
196 
197 /*
198  * Memory mapping flags.
199  */
200 enum drm_map_flags {
201 	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
202 	_DRM_READ_ONLY = 0x02,
203 	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
204 	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
205 	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
206 	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
207 	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
208 	_DRM_DRIVER = 0x80	     /**< Managed by driver */
209 };
210 
211 struct drm_ctx_priv_map {
212 	unsigned int ctx_id;	 /**< Context requesting private mapping */
213 	void *handle;		 /**< Handle of map */
214 };
215 
216 /*
217  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
218  * argument type.
219  *
220  * \sa drmAddMap().
221  */
222 struct drm_map {
223 	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
224 	unsigned long size;	 /**< Requested physical size (bytes) */
225 	enum drm_map_type type;	 /**< Type of memory to map */
226 	enum drm_map_flags flags;	 /**< Flags */
227 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
228 				 /**< Kernel-space: kernel-virtual address */
229 	int mtrr;		 /**< MTRR slot used */
230 	/*   Private data */
231 };
232 
233 /*
234  * DRM_IOCTL_GET_CLIENT ioctl argument type.
235  */
236 struct drm_client {
237 	int idx;		/**< Which client desired? */
238 	int auth;		/**< Is client authenticated? */
239 	unsigned long pid;	/**< Process ID */
240 	unsigned long uid;	/**< User ID */
241 	unsigned long magic;	/**< Magic */
242 	unsigned long iocs;	/**< Ioctl count */
243 };
244 
245 enum drm_stat_type {
246 	_DRM_STAT_LOCK,
247 	_DRM_STAT_OPENS,
248 	_DRM_STAT_CLOSES,
249 	_DRM_STAT_IOCTLS,
250 	_DRM_STAT_LOCKS,
251 	_DRM_STAT_UNLOCKS,
252 	_DRM_STAT_VALUE,	/**< Generic value */
253 	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
254 	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
255 
256 	_DRM_STAT_IRQ,		/**< IRQ */
257 	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
258 	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
259 	_DRM_STAT_DMA,		/**< DMA */
260 	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
261 	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
262 	    /* Add to the *END* of the list */
263 };
264 
265 /*
266  * DRM_IOCTL_GET_STATS ioctl argument type.
267  */
268 struct drm_stats {
269 	unsigned long count;
270 	struct {
271 		unsigned long value;
272 		enum drm_stat_type type;
273 	} data[15];
274 };
275 
276 /*
277  * Hardware locking flags.
278  */
279 enum drm_lock_flags {
280 	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
281 	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
282 	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
283 	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
284 	/* These *HALT* flags aren't supported yet
285 	   -- they will be used to support the
286 	   full-screen DGA-like mode. */
287 	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
288 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
289 };
290 
291 /*
292  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
293  *
294  * \sa drmGetLock() and drmUnlock().
295  */
296 struct drm_lock {
297 	int context;
298 	enum drm_lock_flags flags;
299 };
300 
301 /*
302  * DMA flags
303  *
304  * \warning
305  * These values \e must match xf86drm.h.
306  *
307  * \sa drm_dma.
308  */
309 enum drm_dma_flags {
310 	/* Flags for DMA buffer dispatch */
311 	_DRM_DMA_BLOCK = 0x01,	      /**<
312 				       * Block until buffer dispatched.
313 				       *
314 				       * \note The buffer may not yet have
315 				       * been processed by the hardware --
316 				       * getting a hardware lock with the
317 				       * hardware quiescent will ensure
318 				       * that the buffer has been
319 				       * processed.
320 				       */
321 	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
322 	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
323 
324 	/* Flags for DMA buffer request */
325 	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
326 	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
327 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
328 };
329 
330 /*
331  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
332  *
333  * \sa drmAddBufs().
334  */
335 struct drm_buf_desc {
336 	int count;		 /**< Number of buffers of this size */
337 	int size;		 /**< Size in bytes */
338 	int low_mark;		 /**< Low water mark */
339 	int high_mark;		 /**< High water mark */
340 	enum {
341 		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
342 		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
343 		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
344 		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
345 		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
346 	} flags;
347 	unsigned long agp_start; /**<
348 				  * Start address of where the AGP buffers are
349 				  * in the AGP aperture
350 				  */
351 };
352 
353 /*
354  * DRM_IOCTL_INFO_BUFS ioctl argument type.
355  */
356 struct drm_buf_info {
357 	int count;		/**< Entries in list */
358 	struct drm_buf_desc __user *list;
359 };
360 
361 /*
362  * DRM_IOCTL_FREE_BUFS ioctl argument type.
363  */
364 struct drm_buf_free {
365 	int count;
366 	int __user *list;
367 };
368 
369 /*
370  * Buffer information
371  *
372  * \sa drm_buf_map.
373  */
374 struct drm_buf_pub {
375 	int idx;		       /**< Index into the master buffer list */
376 	int total;		       /**< Buffer size */
377 	int used;		       /**< Amount of buffer in use (for DMA) */
378 	void __user *address;	       /**< Address of buffer */
379 };
380 
381 /*
382  * DRM_IOCTL_MAP_BUFS ioctl argument type.
383  */
384 struct drm_buf_map {
385 	int count;		/**< Length of the buffer list */
386 #ifdef __cplusplus
387 	void __user *virt;
388 #else
389 	void __user *virtual;		/**< Mmap'd area in user-virtual */
390 #endif
391 	struct drm_buf_pub __user *list;	/**< Buffer information */
392 };
393 
394 /*
395  * DRM_IOCTL_DMA ioctl argument type.
396  *
397  * Indices here refer to the offset into the buffer list in drm_buf_get.
398  *
399  * \sa drmDMA().
400  */
401 struct drm_dma {
402 	int context;			  /**< Context handle */
403 	int send_count;			  /**< Number of buffers to send */
404 	int __user *send_indices;	  /**< List of handles to buffers */
405 	int __user *send_sizes;		  /**< Lengths of data to send */
406 	enum drm_dma_flags flags;	  /**< Flags */
407 	int request_count;		  /**< Number of buffers requested */
408 	int request_size;		  /**< Desired size for buffers */
409 	int __user *request_indices;	  /**< Buffer information */
410 	int __user *request_sizes;
411 	int granted_count;		  /**< Number of buffers granted */
412 };
413 
414 enum drm_ctx_flags {
415 	_DRM_CONTEXT_PRESERVED = 0x01,
416 	_DRM_CONTEXT_2DONLY = 0x02
417 };
418 
419 /*
420  * DRM_IOCTL_ADD_CTX ioctl argument type.
421  *
422  * \sa drmCreateContext() and drmDestroyContext().
423  */
424 struct drm_ctx {
425 	drm_context_t handle;
426 	enum drm_ctx_flags flags;
427 };
428 
429 /*
430  * DRM_IOCTL_RES_CTX ioctl argument type.
431  */
432 struct drm_ctx_res {
433 	int count;
434 	struct drm_ctx __user *contexts;
435 };
436 
437 /*
438  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
439  */
440 struct drm_draw {
441 	drm_drawable_t handle;
442 };
443 
444 /*
445  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
446  */
447 typedef enum {
448 	DRM_DRAWABLE_CLIPRECTS
449 } drm_drawable_info_type_t;
450 
451 struct drm_update_draw {
452 	drm_drawable_t handle;
453 	unsigned int type;
454 	unsigned int num;
455 	unsigned long long data;
456 };
457 
458 /*
459  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
460  */
461 struct drm_auth {
462 	drm_magic_t magic;
463 };
464 
465 /*
466  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
467  *
468  * \sa drmGetInterruptFromBusID().
469  */
470 struct drm_irq_busid {
471 	int irq;	/**< IRQ number */
472 	int busnum;	/**< bus number */
473 	int devnum;	/**< device number */
474 	int funcnum;	/**< function number */
475 };
476 
477 enum drm_vblank_seq_type {
478 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
479 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
480 	/* bits 1-6 are reserved for high crtcs */
481 	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
482 	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
483 	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
484 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
485 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
486 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking, unsupported */
487 };
488 #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
489 
490 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
491 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
492 				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
493 
494 struct drm_wait_vblank_request {
495 	enum drm_vblank_seq_type type;
496 	unsigned int sequence;
497 	unsigned long signal;
498 };
499 
500 struct drm_wait_vblank_reply {
501 	enum drm_vblank_seq_type type;
502 	unsigned int sequence;
503 	long tval_sec;
504 	long tval_usec;
505 };
506 
507 /*
508  * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
509  *
510  * \sa drmWaitVBlank().
511  */
512 union drm_wait_vblank {
513 	struct drm_wait_vblank_request request;
514 	struct drm_wait_vblank_reply reply;
515 };
516 
517 #define _DRM_PRE_MODESET 1
518 #define _DRM_POST_MODESET 2
519 
520 /*
521  * DRM_IOCTL_MODESET_CTL ioctl argument type
522  *
523  * \sa drmModesetCtl().
524  */
525 struct drm_modeset_ctl {
526 	__u32 crtc;
527 	__u32 cmd;
528 };
529 
530 /*
531  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
532  *
533  * \sa drmAgpEnable().
534  */
535 struct drm_agp_mode {
536 	unsigned long mode;	/**< AGP mode */
537 };
538 
539 /*
540  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
541  *
542  * \sa drmAgpAlloc() and drmAgpFree().
543  */
544 struct drm_agp_buffer {
545 	unsigned long size;	/**< In bytes -- will round to page boundary */
546 	unsigned long handle;	/**< Used for binding / unbinding */
547 	unsigned long type;	/**< Type of memory to allocate */
548 	unsigned long physical;	/**< Physical used by i810 */
549 };
550 
551 /*
552  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
553  *
554  * \sa drmAgpBind() and drmAgpUnbind().
555  */
556 struct drm_agp_binding {
557 	unsigned long handle;	/**< From drm_agp_buffer */
558 	unsigned long offset;	/**< In bytes -- will round to page boundary */
559 };
560 
561 /*
562  * DRM_IOCTL_AGP_INFO ioctl argument type.
563  *
564  * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
565  * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
566  * drmAgpVendorId() and drmAgpDeviceId().
567  */
568 struct drm_agp_info {
569 	int agp_version_major;
570 	int agp_version_minor;
571 	unsigned long mode;
572 	unsigned long aperture_base;	/* physical address */
573 	unsigned long aperture_size;	/* bytes */
574 	unsigned long memory_allowed;	/* bytes */
575 	unsigned long memory_used;
576 
577 	/* PCI information */
578 	unsigned short id_vendor;
579 	unsigned short id_device;
580 };
581 
582 /*
583  * DRM_IOCTL_SG_ALLOC ioctl argument type.
584  */
585 struct drm_scatter_gather {
586 	unsigned long size;	/**< In bytes -- will round to page boundary */
587 	unsigned long handle;	/**< Used for mapping / unmapping */
588 };
589 
590 /*
591  * DRM_IOCTL_SET_VERSION ioctl argument type.
592  */
593 struct drm_set_version {
594 	int drm_di_major;
595 	int drm_di_minor;
596 	int drm_dd_major;
597 	int drm_dd_minor;
598 };
599 
600 /* DRM_IOCTL_GEM_CLOSE ioctl argument type */
601 struct drm_gem_close {
602 	/** Handle of the object to be closed. */
603 	__u32 handle;
604 	__u32 pad;
605 };
606 
607 /* DRM_IOCTL_GEM_FLINK ioctl argument type */
608 struct drm_gem_flink {
609 	/** Handle for the object being named */
610 	__u32 handle;
611 
612 	/** Returned global name */
613 	__u32 name;
614 };
615 
616 /* DRM_IOCTL_GEM_OPEN ioctl argument type */
617 struct drm_gem_open {
618 	/** Name of object being opened */
619 	__u32 name;
620 
621 	/** Returned handle for the object */
622 	__u32 handle;
623 
624 	/** Returned size of the object */
625 	__u64 size;
626 };
627 
628 /**
629  * DRM_CAP_DUMB_BUFFER
630  *
631  * If set to 1, the driver supports creating dumb buffers via the
632  * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
633  */
634 #define DRM_CAP_DUMB_BUFFER		0x1
635 /**
636  * DRM_CAP_VBLANK_HIGH_CRTC
637  *
638  * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
639  * in the high bits of &drm_wait_vblank_request.type.
640  *
641  * Starting kernel version 2.6.39, this capability is always set to 1.
642  */
643 #define DRM_CAP_VBLANK_HIGH_CRTC	0x2
644 /**
645  * DRM_CAP_DUMB_PREFERRED_DEPTH
646  *
647  * The preferred bit depth for dumb buffers.
648  *
649  * The bit depth is the number of bits used to indicate the color of a single
650  * pixel excluding any padding. This is different from the number of bits per
651  * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
652  * pixel.
653  *
654  * Note that this preference only applies to dumb buffers, it's irrelevant for
655  * other types of buffers.
656  */
657 #define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
658 /**
659  * DRM_CAP_DUMB_PREFER_SHADOW
660  *
661  * If set to 1, the driver prefers userspace to render to a shadow buffer
662  * instead of directly rendering to a dumb buffer. For best speed, userspace
663  * should do streaming ordered memory copies into the dumb buffer and never
664  * read from it.
665  *
666  * Note that this preference only applies to dumb buffers, it's irrelevant for
667  * other types of buffers.
668  */
669 #define DRM_CAP_DUMB_PREFER_SHADOW	0x4
670 /**
671  * DRM_CAP_PRIME
672  *
673  * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
674  * and &DRM_PRIME_CAP_EXPORT.
675  *
676  * PRIME buffers are exposed as dma-buf file descriptors. See
677  * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
678  */
679 #define DRM_CAP_PRIME			0x5
680 /**
681  * DRM_PRIME_CAP_IMPORT
682  *
683  * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
684  * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
685  */
686 #define  DRM_PRIME_CAP_IMPORT		0x1
687 /**
688  * DRM_PRIME_CAP_EXPORT
689  *
690  * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
691  * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
692  */
693 #define  DRM_PRIME_CAP_EXPORT		0x2
694 /**
695  * DRM_CAP_TIMESTAMP_MONOTONIC
696  *
697  * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
698  * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
699  * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
700  * clocks.
701  *
702  * Starting from kernel version 2.6.39, the default value for this capability
703  * is 1. Starting kernel version 4.15, this capability is always set to 1.
704  */
705 #define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
706 /**
707  * DRM_CAP_ASYNC_PAGE_FLIP
708  *
709  * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
710  */
711 #define DRM_CAP_ASYNC_PAGE_FLIP		0x7
712 /**
713  * DRM_CAP_CURSOR_WIDTH
714  *
715  * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
716  * width x height combination for the hardware cursor. The intention is that a
717  * hardware agnostic userspace can query a cursor plane size to use.
718  *
719  * Note that the cross-driver contract is to merely return a valid size;
720  * drivers are free to attach another meaning on top, eg. i915 returns the
721  * maximum plane size.
722  */
723 #define DRM_CAP_CURSOR_WIDTH		0x8
724 /**
725  * DRM_CAP_CURSOR_HEIGHT
726  *
727  * See &DRM_CAP_CURSOR_WIDTH.
728  */
729 #define DRM_CAP_CURSOR_HEIGHT		0x9
730 /**
731  * DRM_CAP_ADDFB2_MODIFIERS
732  *
733  * If set to 1, the driver supports supplying modifiers in the
734  * &DRM_IOCTL_MODE_ADDFB2 ioctl.
735  */
736 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
737 /**
738  * DRM_CAP_PAGE_FLIP_TARGET
739  *
740  * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
741  * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
742  * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
743  * ioctl.
744  */
745 #define DRM_CAP_PAGE_FLIP_TARGET	0x11
746 /**
747  * DRM_CAP_CRTC_IN_VBLANK_EVENT
748  *
749  * If set to 1, the kernel supports reporting the CRTC ID in
750  * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
751  * &DRM_EVENT_FLIP_COMPLETE events.
752  *
753  * Starting kernel version 4.12, this capability is always set to 1.
754  */
755 #define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
756 /**
757  * DRM_CAP_SYNCOBJ
758  *
759  * If set to 1, the driver supports sync objects. See
760  * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
761  */
762 #define DRM_CAP_SYNCOBJ		0x13
763 /**
764  * DRM_CAP_SYNCOBJ_TIMELINE
765  *
766  * If set to 1, the driver supports timeline operations on sync objects. See
767  * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
768  */
769 #define DRM_CAP_SYNCOBJ_TIMELINE	0x14
770 
771 /* DRM_IOCTL_GET_CAP ioctl argument type */
772 struct drm_get_cap {
773 	__u64 capability;
774 	__u64 value;
775 };
776 
777 /**
778  * DRM_CLIENT_CAP_STEREO_3D
779  *
780  * If set to 1, the DRM core will expose the stereo 3D capabilities of the
781  * monitor by advertising the supported 3D layouts in the flags of struct
782  * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
783  *
784  * This capability is always supported for all drivers starting from kernel
785  * version 3.13.
786  */
787 #define DRM_CLIENT_CAP_STEREO_3D	1
788 
789 /**
790  * DRM_CLIENT_CAP_UNIVERSAL_PLANES
791  *
792  * If set to 1, the DRM core will expose all planes (overlay, primary, and
793  * cursor) to userspace.
794  *
795  * This capability has been introduced in kernel version 3.15. Starting from
796  * kernel version 3.17, this capability is always supported for all drivers.
797  */
798 #define DRM_CLIENT_CAP_UNIVERSAL_PLANES  2
799 
800 /**
801  * DRM_CLIENT_CAP_ATOMIC
802  *
803  * If set to 1, the DRM core will expose atomic properties to userspace. This
804  * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
805  * &DRM_CLIENT_CAP_ASPECT_RATIO.
806  *
807  * If the driver doesn't support atomic mode-setting, enabling this capability
808  * will fail with -EOPNOTSUPP.
809  *
810  * This capability has been introduced in kernel version 4.0. Starting from
811  * kernel version 4.2, this capability is always supported for atomic-capable
812  * drivers.
813  */
814 #define DRM_CLIENT_CAP_ATOMIC	3
815 
816 /**
817  * DRM_CLIENT_CAP_ASPECT_RATIO
818  *
819  * If set to 1, the DRM core will provide aspect ratio information in modes.
820  * See ``DRM_MODE_FLAG_PIC_AR_*``.
821  *
822  * This capability is always supported for all drivers starting from kernel
823  * version 4.18.
824  */
825 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
826 
827 /**
828  * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
829  *
830  * If set to 1, the DRM core will expose special connectors to be used for
831  * writing back to memory the scene setup in the commit. The client must enable
832  * &DRM_CLIENT_CAP_ATOMIC first.
833  *
834  * This capability is always supported for atomic-capable drivers starting from
835  * kernel version 4.19.
836  */
837 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
838 
839 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
840 struct drm_set_client_cap {
841 	__u64 capability;
842 	__u64 value;
843 };
844 
845 #define DRM_RDWR O_RDWR
846 #define DRM_CLOEXEC O_CLOEXEC
847 struct drm_prime_handle {
848 	__u32 handle;
849 
850 	/** Flags.. only applicable for handle->fd */
851 	__u32 flags;
852 
853 	/** Returned dmabuf file descriptor */
854 	__s32 fd;
855 };
856 
857 struct drm_syncobj_create {
858 	__u32 handle;
859 #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
860 	__u32 flags;
861 };
862 
863 struct drm_syncobj_destroy {
864 	__u32 handle;
865 	__u32 pad;
866 };
867 
868 #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
869 #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
870 struct drm_syncobj_handle {
871 	__u32 handle;
872 	__u32 flags;
873 
874 	__s32 fd;
875 	__u32 pad;
876 };
877 
878 struct drm_syncobj_transfer {
879 	__u32 src_handle;
880 	__u32 dst_handle;
881 	__u64 src_point;
882 	__u64 dst_point;
883 	__u32 flags;
884 	__u32 pad;
885 };
886 
887 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
888 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
889 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
890 struct drm_syncobj_wait {
891 	__u64 handles;
892 	/* absolute timeout */
893 	__s64 timeout_nsec;
894 	__u32 count_handles;
895 	__u32 flags;
896 	__u32 first_signaled; /* only valid when not waiting all */
897 	__u32 pad;
898 };
899 
900 struct drm_syncobj_timeline_wait {
901 	__u64 handles;
902 	/* wait on specific timeline point for every handles*/
903 	__u64 points;
904 	/* absolute timeout */
905 	__s64 timeout_nsec;
906 	__u32 count_handles;
907 	__u32 flags;
908 	__u32 first_signaled; /* only valid when not waiting all */
909 	__u32 pad;
910 };
911 
912 
913 struct drm_syncobj_array {
914 	__u64 handles;
915 	__u32 count_handles;
916 	__u32 pad;
917 };
918 
919 #define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
920 struct drm_syncobj_timeline_array {
921 	__u64 handles;
922 	__u64 points;
923 	__u32 count_handles;
924 	__u32 flags;
925 };
926 
927 
928 /* Query current scanout sequence number */
929 struct drm_crtc_get_sequence {
930 	__u32 crtc_id;		/* requested crtc_id */
931 	__u32 active;		/* return: crtc output is active */
932 	__u64 sequence;		/* return: most recent vblank sequence */
933 	__s64 sequence_ns;	/* return: most recent time of first pixel out */
934 };
935 
936 /* Queue event to be delivered at specified sequence. Time stamp marks
937  * when the first pixel of the refresh cycle leaves the display engine
938  * for the display
939  */
940 #define DRM_CRTC_SEQUENCE_RELATIVE		0x00000001	/* sequence is relative to current */
941 #define DRM_CRTC_SEQUENCE_NEXT_ON_MISS		0x00000002	/* Use next sequence if we've missed */
942 
943 struct drm_crtc_queue_sequence {
944 	__u32 crtc_id;
945 	__u32 flags;
946 	__u64 sequence;		/* on input, target sequence. on output, actual sequence */
947 	__u64 user_data;	/* user data passed to event */
948 };
949 
950 #if defined(__cplusplus)
951 }
952 #endif
953 
954 #include "drm_mode.h"
955 
956 #if defined(__cplusplus)
957 extern "C" {
958 #endif
959 
960 #define DRM_IOCTL_BASE			'd'
961 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
962 #define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
963 #define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
964 #define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
965 
966 #define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
967 #define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
968 #define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
969 #define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
970 #define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
971 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
972 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
973 #define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
974 #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
975 /**
976  * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
977  *
978  * GEM handles are not reference-counted by the kernel. User-space is
979  * responsible for managing their lifetime. For example, if user-space imports
980  * the same memory object twice on the same DRM file description, the same GEM
981  * handle is returned by both imports, and user-space needs to ensure
982  * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
983  * when a memory object is allocated, then exported and imported again on the
984  * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
985  * and always returns fresh new GEM handles even if an existing GEM handle
986  * already refers to the same memory object before the IOCTL is performed.
987  */
988 #define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
989 #define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
990 #define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
991 #define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
992 #define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
993 
994 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
995 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
996 #define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
997 #define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
998 #define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
999 #define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
1000 #define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
1001 #define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
1002 #define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
1003 #define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
1004 #define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
1005 
1006 #define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
1007 
1008 #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1009 #define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1010 
1011 #define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
1012 #define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
1013 
1014 #define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1015 #define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1016 #define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1017 #define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1018 #define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1019 #define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1020 #define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1021 #define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1022 #define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1023 #define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1024 #define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1025 #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1026 #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1027 
1028 /**
1029  * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1030  *
1031  * User-space sets &drm_prime_handle.handle with the GEM handle to export and
1032  * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1033  * &drm_prime_handle.fd.
1034  *
1035  * The export can fail for any driver-specific reason, e.g. because export is
1036  * not supported for this specific GEM handle (but might be for others).
1037  *
1038  * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1039  */
1040 #define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
1041 /**
1042  * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1043  *
1044  * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1045  * import, and gets back a GEM handle in &drm_prime_handle.handle.
1046  * &drm_prime_handle.flags is unused.
1047  *
1048  * If an existing GEM handle refers to the memory object backing the DMA-BUF,
1049  * that GEM handle is returned. Therefore user-space which needs to handle
1050  * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1051  * reference-count duplicated GEM handles. For more information see
1052  * &DRM_IOCTL_GEM_CLOSE.
1053  *
1054  * The import can fail for any driver-specific reason, e.g. because import is
1055  * only supported for DMA-BUFs allocated on this DRM device.
1056  *
1057  * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1058  */
1059 #define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
1060 
1061 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1062 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1063 #define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1064 #define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1065 #define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1066 #define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1067 #define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1068 #define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1069 
1070 #define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1071 #define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1072 
1073 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1074 
1075 #define DRM_IOCTL_CRTC_GET_SEQUENCE	DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1076 #define DRM_IOCTL_CRTC_QUEUE_SEQUENCE	DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1077 
1078 #define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
1079 
1080 #define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
1081 #define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
1082 #define DRM_IOCTL_MODE_SETCRTC		DRM_IOWR(0xA2, struct drm_mode_crtc)
1083 #define DRM_IOCTL_MODE_CURSOR		DRM_IOWR(0xA3, struct drm_mode_cursor)
1084 #define DRM_IOCTL_MODE_GETGAMMA		DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1085 #define DRM_IOCTL_MODE_SETGAMMA		DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1086 #define DRM_IOCTL_MODE_GETENCODER	DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1087 #define DRM_IOCTL_MODE_GETCONNECTOR	DRM_IOWR(0xA7, struct drm_mode_get_connector)
1088 #define DRM_IOCTL_MODE_ATTACHMODE	DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1089 #define DRM_IOCTL_MODE_DETACHMODE	DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1090 
1091 #define DRM_IOCTL_MODE_GETPROPERTY	DRM_IOWR(0xAA, struct drm_mode_get_property)
1092 #define DRM_IOCTL_MODE_SETPROPERTY	DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1093 #define DRM_IOCTL_MODE_GETPROPBLOB	DRM_IOWR(0xAC, struct drm_mode_get_blob)
1094 #define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1095 #define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1096 /**
1097  * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1098  *
1099  * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1100  * argument is a framebuffer object ID.
1101  *
1102  * Warning: removing a framebuffer currently in-use on an enabled plane will
1103  * disable that plane. The CRTC the plane is linked to may also be disabled
1104  * (depending on driver capabilities).
1105  */
1106 #define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
1107 #define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1108 #define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1109 
1110 #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1111 #define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1112 #define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1113 #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1114 #define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
1115 #define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
1116 #define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1117 #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1118 #define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1119 #define DRM_IOCTL_MODE_CURSOR2		DRM_IOWR(0xBB, struct drm_mode_cursor2)
1120 #define DRM_IOCTL_MODE_ATOMIC		DRM_IOWR(0xBC, struct drm_mode_atomic)
1121 #define DRM_IOCTL_MODE_CREATEPROPBLOB	DRM_IOWR(0xBD, struct drm_mode_create_blob)
1122 #define DRM_IOCTL_MODE_DESTROYPROPBLOB	DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1123 
1124 #define DRM_IOCTL_SYNCOBJ_CREATE	DRM_IOWR(0xBF, struct drm_syncobj_create)
1125 #define DRM_IOCTL_SYNCOBJ_DESTROY	DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1126 #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD	DRM_IOWR(0xC1, struct drm_syncobj_handle)
1127 #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE	DRM_IOWR(0xC2, struct drm_syncobj_handle)
1128 #define DRM_IOCTL_SYNCOBJ_WAIT		DRM_IOWR(0xC3, struct drm_syncobj_wait)
1129 #define DRM_IOCTL_SYNCOBJ_RESET		DRM_IOWR(0xC4, struct drm_syncobj_array)
1130 #define DRM_IOCTL_SYNCOBJ_SIGNAL	DRM_IOWR(0xC5, struct drm_syncobj_array)
1131 
1132 #define DRM_IOCTL_MODE_CREATE_LEASE	DRM_IOWR(0xC6, struct drm_mode_create_lease)
1133 #define DRM_IOCTL_MODE_LIST_LESSEES	DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1134 #define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
1135 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1136 
1137 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1138 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1139 #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1140 #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1141 
1142 /**
1143  * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1144  *
1145  * This queries metadata about a framebuffer. User-space fills
1146  * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1147  * struct as the output.
1148  *
1149  * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1150  * will be filled with GEM buffer handles. Fresh new GEM handles are always
1151  * returned, even if another GEM handle referring to the same memory object
1152  * already exists on the DRM file description. The caller is responsible for
1153  * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1154  * new handle will be returned for multiple planes in case they use the same
1155  * memory object. Planes are valid until one has a zero handle -- this can be
1156  * used to compute the number of planes.
1157  *
1158  * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1159  * until one has a zero &drm_mode_fb_cmd2.pitches.
1160  *
1161  * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1162  * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1163  * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1164  *
1165  * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1166  * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1167  * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1168  * double-close handles which are specified multiple times in the array.
1169  */
1170 #define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1171 
1172 /*
1173  * Device specific ioctls should only be in their respective headers
1174  * The device specific ioctl range is from 0x40 to 0x9f.
1175  * Generic IOCTLS restart at 0xA0.
1176  *
1177  * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1178  * drmCommandReadWrite().
1179  */
1180 #define DRM_COMMAND_BASE                0x40
1181 #define DRM_COMMAND_END			0xA0
1182 
1183 /*
1184  * Header for events written back to userspace on the drm fd.  The
1185  * type defines the type of event, the length specifies the total
1186  * length of the event (including the header), and user_data is
1187  * typically a 64 bit value passed with the ioctl that triggered the
1188  * event.  A read on the drm fd will always only return complete
1189  * events, that is, if for example the read buffer is 100 bytes, and
1190  * there are two 64 byte events pending, only one will be returned.
1191  *
1192  * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
1193  * up are chipset specific.
1194  */
1195 struct drm_event {
1196 	__u32 type;
1197 	__u32 length;
1198 };
1199 
1200 #define DRM_EVENT_VBLANK 0x01
1201 #define DRM_EVENT_FLIP_COMPLETE 0x02
1202 #define DRM_EVENT_CRTC_SEQUENCE	0x03
1203 
1204 struct drm_event_vblank {
1205 	struct drm_event base;
1206 	__u64 user_data;
1207 	__u32 tv_sec;
1208 	__u32 tv_usec;
1209 	__u32 sequence;
1210 	__u32 crtc_id; /* 0 on older kernels that do not support this */
1211 };
1212 
1213 /* Event delivered at sequence. Time stamp marks when the first pixel
1214  * of the refresh cycle leaves the display engine for the display
1215  */
1216 struct drm_event_crtc_sequence {
1217 	struct drm_event	base;
1218 	__u64			user_data;
1219 	__s64			time_ns;
1220 	__u64			sequence;
1221 };
1222 
1223 /* typedef area */
1224 #ifndef __KERNEL__
1225 typedef struct drm_clip_rect drm_clip_rect_t;
1226 typedef struct drm_drawable_info drm_drawable_info_t;
1227 typedef struct drm_tex_region drm_tex_region_t;
1228 typedef struct drm_hw_lock drm_hw_lock_t;
1229 typedef struct drm_version drm_version_t;
1230 typedef struct drm_unique drm_unique_t;
1231 typedef struct drm_list drm_list_t;
1232 typedef struct drm_block drm_block_t;
1233 typedef struct drm_control drm_control_t;
1234 typedef enum drm_map_type drm_map_type_t;
1235 typedef enum drm_map_flags drm_map_flags_t;
1236 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1237 typedef struct drm_map drm_map_t;
1238 typedef struct drm_client drm_client_t;
1239 typedef enum drm_stat_type drm_stat_type_t;
1240 typedef struct drm_stats drm_stats_t;
1241 typedef enum drm_lock_flags drm_lock_flags_t;
1242 typedef struct drm_lock drm_lock_t;
1243 typedef enum drm_dma_flags drm_dma_flags_t;
1244 typedef struct drm_buf_desc drm_buf_desc_t;
1245 typedef struct drm_buf_info drm_buf_info_t;
1246 typedef struct drm_buf_free drm_buf_free_t;
1247 typedef struct drm_buf_pub drm_buf_pub_t;
1248 typedef struct drm_buf_map drm_buf_map_t;
1249 typedef struct drm_dma drm_dma_t;
1250 typedef union drm_wait_vblank drm_wait_vblank_t;
1251 typedef struct drm_agp_mode drm_agp_mode_t;
1252 typedef enum drm_ctx_flags drm_ctx_flags_t;
1253 typedef struct drm_ctx drm_ctx_t;
1254 typedef struct drm_ctx_res drm_ctx_res_t;
1255 typedef struct drm_draw drm_draw_t;
1256 typedef struct drm_update_draw drm_update_draw_t;
1257 typedef struct drm_auth drm_auth_t;
1258 typedef struct drm_irq_busid drm_irq_busid_t;
1259 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1260 
1261 typedef struct drm_agp_buffer drm_agp_buffer_t;
1262 typedef struct drm_agp_binding drm_agp_binding_t;
1263 typedef struct drm_agp_info drm_agp_info_t;
1264 typedef struct drm_scatter_gather drm_scatter_gather_t;
1265 typedef struct drm_set_version drm_set_version_t;
1266 #endif
1267 
1268 #if defined(__cplusplus)
1269 }
1270 #endif
1271 
1272 #endif
1273