xref: /titanic_41/usr/src/uts/common/io/drm/drmP.h (revision fc7a376ec44146b66b5d6bc4ed92ba311773e202)
1 /*
2  * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
3  * Created: Mon Jan  4 10:05:05 1999 by faith@precisioninsight.com
4  */
5 /*
6  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * Copyright (c) 2009, Intel Corporation.
9  * All rights reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  *
30  * Authors:
31  *    Rickard E. (Rik) Faith <faith@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *
34  */
35 
36 /*
37  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
38  * Use is subject to license terms.
39  */
40 
41 #ifndef	_DRMP_H
42 #define	_DRMP_H
43 
44 #include <sys/sysmacros.h>
45 #include <sys/types.h>
46 #include <sys/conf.h>
47 #include <sys/modctl.h>
48 #include <sys/stat.h>
49 #include <sys/file.h>
50 #include <sys/cmn_err.h>
51 #include <sys/varargs.h>
52 #include <sys/pci.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunldi.h>
56 #include <sys/pmem.h>
57 #include <sys/agpgart.h>
58 #include <sys/time.h>
59 #include "drm_atomic.h"
60 #include "drm.h"
61 #include "queue.h"
62 #include "drm_linux_list.h"
63 
64 #ifndef __inline__
65 #define	__inline__	inline
66 #endif
67 
68 #if !defined(__FUNCTION__)
69 #if defined(C99)
70 #define	__FUNCTION__ __func__
71 #else
72 #define	__FUNCTION__	" "
73 #endif
74 #endif
75 
76 /* DRM space units */
77 #define	DRM_PAGE_SHIFT			PAGESHIFT
78 #define	DRM_PAGE_SIZE			(1 << DRM_PAGE_SHIFT)
79 #define	DRM_PAGE_OFFSET			(DRM_PAGE_SIZE - 1)
80 #define	DRM_PAGE_MASK			~(DRM_PAGE_SIZE - 1)
81 #define	DRM_MB2PAGES(x)			((x) << 8)
82 #define	DRM_PAGES2BYTES(x)		((x) << DRM_PAGE_SHIFT)
83 #define	DRM_BYTES2PAGES(x)		((x) >> DRM_PAGE_SHIFT)
84 #define	DRM_PAGES2KB(x)			((x) << 2)
85 #define	DRM_ALIGNED(offset)		(((offset) & DRM_PAGE_OFFSET) == 0)
86 
87 #define	PAGE_SHIFT			DRM_PAGE_SHIFT
88 #define	PAGE_SIZE			DRM_PAGE_SIZE
89 
90 #define	DRM_MAX_INSTANCES	8
91 #define	DRM_DEVNODE		"drm"
92 #define	DRM_UNOPENED		0
93 #define	DRM_OPENED		1
94 
95 #define	DRM_HASH_SIZE		16 /* Size of key hash table */
96 #define	DRM_KERNEL_CONTEXT	0  /* Change drm_resctx if changed */
97 #define	DRM_RESERVED_CONTEXTS	1  /* Change drm_resctx if changed */
98 
99 #define	DRM_MEM_DMA	   0
100 #define	DRM_MEM_SAREA	   1
101 #define	DRM_MEM_DRIVER	   2
102 #define	DRM_MEM_MAGIC	   3
103 #define	DRM_MEM_IOCTLS	   4
104 #define	DRM_MEM_MAPS	   5
105 #define	DRM_MEM_BUFS	   6
106 #define	DRM_MEM_SEGS	   7
107 #define	DRM_MEM_PAGES	   8
108 #define	DRM_MEM_FILES	  9
109 #define	DRM_MEM_QUEUES	  10
110 #define	DRM_MEM_CMDS	  11
111 #define	DRM_MEM_MAPPINGS  12
112 #define	DRM_MEM_BUFLISTS  13
113 #define	DRM_MEM_DRMLISTS  14
114 #define	DRM_MEM_TOTALDRM  15
115 #define	DRM_MEM_BOUNDDRM  16
116 #define	DRM_MEM_CTXBITMAP 17
117 #define	DRM_MEM_STUB	  18
118 #define	DRM_MEM_SGLISTS	  19
119 #define	DRM_MEM_AGPLISTS  20
120 #define	DRM_MEM_CTXLIST   21
121 #define	DRM_MEM_MM		22
122 #define	DRM_MEM_HASHTAB		23
123 #define	DRM_MEM_OBJECTS		24
124 
125 #define	DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
126 #define	DRM_MAP_HASH_OFFSET 0x10000000
127 #define	DRM_MAP_HASH_ORDER 12
128 #define	DRM_OBJECT_HASH_ORDER 12
129 #define	DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
130 #define	DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
131 #define	DRM_MM_INIT_MAX_PAGES 256
132 
133 
134 /* Internal types and structures */
135 #define	DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
136 #define	DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
137 #define	DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
138 
139 #define	DRM_IF_VERSION(maj, min) (maj << 16 | min)
140 
141 #define	__OS_HAS_AGP	1
142 
143 #define	DRM_DEV_MOD	(S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
144 #define	DRM_DEV_UID	0
145 #define	DRM_DEV_GID	0
146 
147 #define	DRM_CURRENTPID		ddi_get_pid()
148 #define	DRM_SPINLOCK(l)		mutex_enter(l)
149 #define	DRM_SPINUNLOCK(u)	mutex_exit(u)
150 #define	DRM_SPINLOCK_ASSERT(l)
151 #define	DRM_LOCK()	mutex_enter(&dev->dev_lock)
152 #define	DRM_UNLOCK()	mutex_exit(&dev->dev_lock)
153 #define	DRM_LOCK_OWNED()	ASSERT(mutex_owned(&dev->dev_lock))
154 #define	spin_lock_irqsave(l, flag)		mutex_enter(l)
155 #define	spin_unlock_irqrestore(u, flag) mutex_exit(u)
156 #define	spin_lock(l)	mutex_enter(l)
157 #define	spin_unlock(u)	mutex_exit(u)
158 
159 
160 #define	DRM_UDELAY(sec)  delay(drv_usectohz(sec *1000))
161 #define	DRM_MEMORYBARRIER()
162 
163 typedef	struct drm_file		drm_file_t;
164 typedef struct drm_device	drm_device_t;
165 typedef struct drm_driver_info drm_driver_t;
166 
167 #define	DRM_DEVICE	drm_device_t *dev = dev1
168 #define	DRM_IOCTL_ARGS	\
169 	drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
170 
171 #define	DRM_COPYFROM_WITH_RETURN(dest, src, size)	\
172 	if (ddi_copyin((src), (dest), (size), 0)) {	\
173 		DRM_ERROR("%s: copy from user failed", __func__);	\
174 		return (EFAULT);	\
175 	}
176 
177 #define	DRM_COPYTO_WITH_RETURN(dest, src, size)	\
178 	if (ddi_copyout((src), (dest), (size), 0)) {	\
179 		DRM_ERROR("%s: copy to user failed", __func__);	\
180 		return (EFAULT);	\
181 	}
182 
183 #define	DRM_COPY_FROM_USER(dest, src, size) \
184 	ddi_copyin((src), (dest), (size), 0) /* flag for src */
185 
186 #define	DRM_COPY_TO_USER(dest, src, size) \
187 	ddi_copyout((src), (dest), (size), 0) /* flags for dest */
188 
189 #define	DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3)  \
190 	ddi_copyin((arg2), (arg1), (arg3), 0)
191 
192 #define	DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)        \
193 	ddi_copyout((arg2), arg1, arg3, 0)
194 
195 #define	DRM_READ8(map, offset) \
196 	*(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
197 #define	DRM_READ16(map, offset) \
198 	*(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
199 #define	DRM_READ32(map, offset) \
200 	*(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
201 #define	DRM_WRITE8(map, offset, val) \
202 	*(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
203 #define	DRM_WRITE16(map, offset, val) \
204 	*(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
205 #define	DRM_WRITE32(map, offset, val) \
206 	*(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
207 
208 typedef struct drm_wait_queue {
209 	kcondvar_t	cv;
210 	kmutex_t	lock;
211 }wait_queue_head_t;
212 
213 #define	DRM_INIT_WAITQUEUE(q, pri)	\
214 { \
215 	mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
216 	cv_init(&(q)->cv, NULL, CV_DRIVER, NULL);	\
217 }
218 
219 #define	DRM_FINI_WAITQUEUE(q)	\
220 { \
221 	mutex_destroy(&(q)->lock);	\
222 	cv_destroy(&(q)->cv);	\
223 }
224 
225 #define	DRM_WAKEUP(q)	\
226 { \
227 	mutex_enter(&(q)->lock); \
228 	cv_broadcast(&(q)->cv);	\
229 	mutex_exit(&(q)->lock);	\
230 }
231 
232 #define	jiffies	ddi_get_lbolt()
233 
234 #define	DRM_WAIT_ON(ret, q, timeout, condition)  			\
235 	mutex_enter(&(q)->lock);					\
236 	while (!(condition)) {						\
237 		ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
238 		    TR_CLOCK_TICK);					\
239 		if (ret == -1) {					\
240 			ret = EBUSY;					\
241 			break;						\
242 		} else if (ret == 0) {					\
243 			ret = EINTR;  					\
244 			break; 						\
245 		} else { 						\
246 			ret = 0; 					\
247 		} 							\
248 	} 								\
249 	mutex_exit(&(q)->lock);
250 
251 #define	DRM_WAIT(ret, q, condition)  \
252 mutex_enter(&(q)->lock);	\
253 if (!(condition)) {	\
254 	ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
255 	if (ret == -1) {				\
256 		/* gfx maybe hang */	\
257 		if (!(condition)) 	\
258 			ret = -2;	\
259 	} else {	\
260 		ret = 0;	\
261 	}	\
262 } \
263 mutex_exit(&(q)->lock);
264 
265 
266 #define	DRM_GETSAREA()  					\
267 {                                				\
268 	drm_local_map_t *map;					\
269 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);			\
270 	TAILQ_FOREACH(map, &dev->maplist, link) {		\
271 		if (map->type == _DRM_SHM &&			\
272 			map->flags & _DRM_CONTAINS_LOCK) {	\
273 			dev_priv->sarea = map;			\
274 			break;					\
275 		}						\
276 	}							\
277 }
278 
279 #define	LOCK_TEST_WITH_RETURN(dev, fpriv)				\
280 	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||		\
281 	    dev->lock.filp != fpriv) {					\
282 		DRM_DEBUG("%s called without lock held", __func__);	\
283 		return (EINVAL);	\
284 	}
285 
286 #define	DRM_IRQ_ARGS	caddr_t arg
287 #define	IRQ_HANDLED		DDI_INTR_CLAIMED
288 #define	IRQ_NONE		DDI_INTR_UNCLAIMED
289 
290 enum {
291 	DRM_IS_NOT_AGP,
292 	DRM_IS_AGP,
293 	DRM_MIGHT_BE_AGP
294 };
295 
296 /* Capabilities taken from src/sys/dev/pci/pcireg.h. */
297 #ifndef PCIY_AGP
298 #define	PCIY_AGP		0x02
299 #endif
300 
301 #ifndef PCIY_EXPRESS
302 #define	PCIY_EXPRESS		0x10
303 #endif
304 
305 #define	PAGE_ALIGN(addr)	(((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
306 #define	DRM_SUSER(p)		(crgetsgid(p) == 0 || crgetsuid(p) == 0)
307 
308 #define	DRM_GEM_OBJIDR_HASHNODE	1024
309 #define	idr_list_for_each(entry, head) \
310 	for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
311 		list_for_each(entry, &(head)->next[key])
312 
313 /*
314  * wait for 400 milliseconds
315  */
316 #define	DRM_HZ			drv_usectohz(400000)
317 
318 typedef unsigned long dma_addr_t;
319 typedef uint64_t	u64;
320 typedef uint32_t	u32;
321 typedef uint16_t	u16;
322 typedef uint8_t		u8;
323 typedef uint_t		irqreturn_t;
324 
325 #define	DRM_SUPPORT	1
326 #define	DRM_UNSUPPORT	0
327 
328 #define	__OS_HAS_AGP	1
329 
330 typedef struct drm_pci_id_list
331 {
332 	int vendor;
333 	int device;
334 	long driver_private;
335 	char *name;
336 } drm_pci_id_list_t;
337 
338 #define	DRM_AUTH	0x1
339 #define	DRM_MASTER	0x2
340 #define	DRM_ROOT_ONLY	0x4
341 typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
342 typedef struct drm_ioctl_desc {
343 	int	(*func)(DRM_IOCTL_ARGS);
344 	int	flags;
345 } drm_ioctl_desc_t;
346 
347 typedef struct drm_magic_entry {
348 	drm_magic_t		magic;
349 	struct drm_file		*priv;
350 	struct drm_magic_entry	*next;
351 } drm_magic_entry_t;
352 
353 typedef struct drm_magic_head {
354 	struct drm_magic_entry *head;
355 	struct drm_magic_entry *tail;
356 } drm_magic_head_t;
357 
358 typedef struct drm_buf {
359 	int		idx;		/* Index into master buflist */
360 	int		total;		/* Buffer size */
361 	int		order;		/* log-base-2(total) */
362 	int		used;		/* Amount of buffer in use (for DMA) */
363 	unsigned long	offset;		/* Byte offset (used internally) */
364 	void		*address;	/* Address of buffer */
365 	unsigned long	bus_address;	/* Bus address of buffer */
366 	struct drm_buf	*next;		/* Kernel-only: used for free list */
367 	volatile int	pending;	/* On hardware DMA queue */
368 	drm_file_t		*filp;
369 				/* Uniq. identifier of holding process */
370 	int		context;	/* Kernel queue for this buffer */
371 	enum {
372 		DRM_LIST_NONE	 = 0,
373 		DRM_LIST_FREE	 = 1,
374 		DRM_LIST_WAIT	 = 2,
375 		DRM_LIST_PEND	 = 3,
376 		DRM_LIST_PRIO	 = 4,
377 		DRM_LIST_RECLAIM = 5
378 	}		list;		/* Which list we're on */
379 
380 	int		dev_priv_size;	/* Size of buffer private stoarge */
381 	void		*dev_private;	/* Per-buffer private storage */
382 } drm_buf_t;
383 
384 typedef struct drm_freelist {
385 	int		  initialized;	/* Freelist in use		*/
386 	uint32_t	  count;	/* Number of free buffers	*/
387 	drm_buf_t	  *next;	/* End pointer			*/
388 
389 	int		  low_mark;	/* Low water mark		*/
390 	int		  high_mark;	/* High water mark		*/
391 } drm_freelist_t;
392 
393 typedef struct drm_buf_entry {
394 	int		  buf_size;
395 	int		  buf_count;
396 	drm_buf_t	  *buflist;
397 	int		  seg_count;
398 	int		  page_order;
399 
400 	uint32_t	  *seglist;
401 	unsigned long	  *seglist_bus;
402 
403 	drm_freelist_t	  freelist;
404 } drm_buf_entry_t;
405 
406 typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
407 
408 /* BEGIN CSTYLED */
409 typedef struct drm_local_map {
410 	unsigned long	offset;  /*  Physical address (0 for SAREA)	*/
411 	unsigned long	size;	 /* Physical size (bytes)		*/
412 	drm_map_type_t	type;	 /* Type of memory mapped		*/
413 	drm_map_flags_t flags;	 /* Flags				*/
414 	void		*handle; /* User-space: "Handle" to pass to mmap */
415 				 /* Kernel-space: kernel-virtual address */
416 	int		mtrr;	 /* Boolean: MTRR used 			*/
417 				 /* Private data			*/
418 	int		rid;	 /* PCI resource ID for bus_space 	*/
419 	int		kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
420 	caddr_t		dev_addr;	  /* base device address 	*/
421 	ddi_acc_handle_t  dev_handle;	  /* The data access handle 	*/
422 	ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free  */
423 	TAILQ_ENTRY(drm_local_map) link;
424 } drm_local_map_t;
425 /* END CSTYLED */
426 
427 /*
428  * This structure defines the drm_mm memory object, which will be used by the
429  * DRM for its buffer objects.
430  */
431 struct drm_gem_object {
432 	/* Reference count of this object */
433 	atomic_t refcount;
434 
435 	/* Handle count of this object. Each handle also holds a reference */
436 	atomic_t handlecount;
437 
438 	/* Related drm device */
439 	struct drm_device *dev;
440 
441 	int flink;
442 	/*
443 	 * Size of the object, in bytes.  Immutable over the object's
444 	 * lifetime.
445 	 */
446 	size_t size;
447 
448 	/*
449 	 * Global name for this object, starts at 1. 0 means unnamed.
450 	 * Access is covered by the object_name_lock in the related drm_device
451 	 */
452 	int name;
453 
454 	/*
455 	 * Memory domains. These monitor which caches contain read/write data
456 	 * related to the object. When transitioning from one set of domains
457 	 * to another, the driver is called to ensure that caches are suitably
458 	 * flushed and invalidated
459 	 */
460 	uint32_t read_domains;
461 	uint32_t write_domain;
462 
463 	/*
464 	 * While validating an exec operation, the
465 	 * new read/write domain values are computed here.
466 	 * They will be transferred to the above values
467 	 * at the point that any cache flushing occurs
468 	 */
469 	uint32_t pending_read_domains;
470 	uint32_t pending_write_domain;
471 
472 	void *driver_private;
473 
474 	drm_local_map_t *map;
475 	ddi_dma_handle_t dma_hdl;
476 	ddi_acc_handle_t acc_hdl;
477 	caddr_t kaddr;
478 	size_t real_size;	/* real size of memory */
479 	pfn_t *pfnarray;
480 };
481 
482 struct idr_list {
483 	struct idr_list *next, *prev;
484 	struct drm_gem_object *obj;
485 	uint32_t	handle;
486 	caddr_t	contain_ptr;
487 };
488 
489 struct drm_file {
490 	TAILQ_ENTRY(drm_file) link;
491 	int		  authenticated;
492 	int		  master;
493 	int		  minor;
494 	pid_t		  pid;
495 	uid_t		  uid;
496 	int		  refs;
497 	drm_magic_t	  magic;
498 	unsigned long	  ioctl_count;
499 	void		 *driver_priv;
500 	/* Mapping of mm object handles to object pointers. */
501 	struct idr_list object_idr;
502 	/* Lock for synchronization of access to object_idr. */
503 	kmutex_t table_lock;
504 
505 	dev_t dev;
506 	cred_t *credp;
507 };
508 
509 typedef struct drm_lock_data {
510 	drm_hw_lock_t	*hw_lock;	/* Hardware lock		*/
511 	drm_file_t	*filp;
512 	/* Uniq. identifier of holding process */
513 	kcondvar_t	lock_cv;	/* lock queue - SOLARIS Specific */
514 	kmutex_t	lock_mutex;	/* lock - SOLARIS Specific */
515 	unsigned long	lock_time;	/* Time of last lock in clock ticks */
516 } drm_lock_data_t;
517 
518 /*
519  * This structure, in drm_device_t, is always initialized while the device
520  * is open.  dev->dma_lock protects the incrementing of dev->buf_use, which
521  * when set marks that no further bufs may be allocated until device teardown
522  * occurs (when the last open of the device has closed).  The high/low
523  * watermarks of bufs are only touched by the X Server, and thus not
524  * concurrently accessed, so no locking is needed.
525  */
526 typedef struct drm_device_dma {
527 	drm_buf_entry_t	bufs[DRM_MAX_ORDER+1];
528 	int		buf_count;
529 	drm_buf_t	**buflist;	/* Vector of pointers info bufs	   */
530 	int		seg_count;
531 	int		page_count;
532 	unsigned long	*pagelist;
533 	unsigned long	byte_count;
534 	enum {
535 		_DRM_DMA_USE_AGP = 0x01,
536 		_DRM_DMA_USE_SG  = 0x02
537 	} flags;
538 } drm_device_dma_t;
539 
540 typedef struct drm_agp_mem {
541 	void		*handle;
542 	unsigned long	bound; /* address */
543 	int		pages;
544 	caddr_t		phys_addr;
545 	struct drm_agp_mem *prev;
546 	struct drm_agp_mem *next;
547 } drm_agp_mem_t;
548 
549 typedef struct drm_agp_head {
550 	agp_info_t	agp_info;
551 	const char	*chipset;
552 	drm_agp_mem_t	*memory;
553 	unsigned long	mode;
554 	int		enabled;
555 	int		acquired;
556 	unsigned long	base;
557 	int		mtrr;
558 	int		cant_use_aperture;
559 	unsigned long	page_mask;
560 	ldi_ident_t	agpgart_li;
561 	ldi_handle_t	agpgart_lh;
562 } drm_agp_head_t;
563 
564 
565 typedef struct drm_dma_handle {
566 	ddi_dma_handle_t	dma_hdl;
567 	ddi_acc_handle_t	acc_hdl;
568 	ddi_dma_cookie_t	cookie;
569 	uint_t		cookie_num;
570 	uintptr_t		vaddr;   /* virtual addr */
571 	uintptr_t		paddr;   /* physical addr */
572 	size_t		real_sz; /* real size of memory */
573 } drm_dma_handle_t;
574 
575 typedef struct drm_sg_mem {
576 	unsigned long	handle;
577 	void		*virtual;
578 	int		pages;
579 	dma_addr_t  	*busaddr;
580 	ddi_umem_cookie_t	*umem_cookie;
581 	drm_dma_handle_t	*dmah_sg;
582 	drm_dma_handle_t	*dmah_gart; /* Handle to PCI memory */
583 } drm_sg_mem_t;
584 
585 /*
586  * Generic memory manager structs
587  */
588 
589 struct drm_mm_node {
590 	struct list_head fl_entry;
591 	struct list_head ml_entry;
592 	int free;
593 	unsigned long start;
594 	unsigned long size;
595 	struct drm_mm *mm;
596 	void *private;
597 };
598 
599 struct drm_mm {
600 	struct list_head fl_entry;
601 	struct list_head ml_entry;
602 };
603 
604 typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
605 
606 typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
607 typedef struct drm_vbl_sig {
608 	TAILQ_ENTRY(drm_vbl_sig) link;
609 	unsigned int	sequence;
610 	int		signo;
611 	int		pid;
612 } drm_vbl_sig_t;
613 
614 
615 /* used for clone device */
616 typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
617 typedef struct drm_cminor {
618 	TAILQ_ENTRY(drm_cminor) link;
619 	drm_file_t		*fpriv;
620 	int			minor;
621 } drm_cminor_t;
622 
623 /* location of GART table */
624 #define	DRM_ATI_GART_MAIN	1
625 #define	DRM_ATI_GART_FB		2
626 
627 typedef struct ati_pcigart_info {
628 	int gart_table_location;
629 	int is_pcie;
630 	void *addr;
631 	dma_addr_t bus_addr;
632 	drm_local_map_t mapping;
633 } drm_ati_pcigart_info;
634 
635 /* DRM device structure */
636 struct drm_device;
637 struct drm_driver_info {
638 	int (*load)(struct drm_device *, unsigned long);
639 	int (*firstopen)(struct drm_device *);
640 	int (*open)(struct drm_device *, drm_file_t *);
641 	void (*preclose)(struct drm_device *, drm_file_t *);
642 	void (*postclose)(struct drm_device *, drm_file_t *);
643 	void (*lastclose)(struct drm_device *);
644 	int (*unload)(struct drm_device *);
645 	void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
646 	int (*presetup)(struct drm_device *);
647 	int (*postsetup)(struct drm_device *);
648 	int (*open_helper)(struct drm_device *, drm_file_t *);
649 	void (*free_filp_priv)(struct drm_device *, drm_file_t *);
650 	void (*release)(struct drm_device *, void *);
651 	int (*dma_ioctl)(DRM_IOCTL_ARGS);
652 	void (*dma_ready)(struct drm_device *);
653 	int (*dma_quiescent)(struct drm_device *);
654 	int (*dma_flush_block_and_flush)(struct drm_device *,
655 			int, drm_lock_flags_t);
656 	int (*dma_flush_unblock)(struct drm_device *, int,
657 					drm_lock_flags_t);
658 	int (*context_ctor)(struct drm_device *, int);
659 	int (*context_dtor)(struct drm_device *, int);
660 	int (*kernel_context_switch)(struct drm_device *, int, int);
661 	int (*kernel_context_switch_unlock)(struct drm_device *);
662 	int (*device_is_agp) (struct drm_device *);
663 	int (*irq_preinstall)(struct drm_device *);
664 	void (*irq_postinstall)(struct drm_device *);
665 	void (*irq_uninstall)(struct drm_device *dev);
666 	uint_t (*irq_handler)(DRM_IRQ_ARGS);
667 	int (*vblank_wait)(struct drm_device *, unsigned int *);
668 	int (*vblank_wait2)(struct drm_device *, unsigned int *);
669 	/* added for intel minimized vblank */
670 	u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
671 	int (*enable_vblank)(struct drm_device *dev, int crtc);
672 	void (*disable_vblank)(struct drm_device *dev, int crtc);
673 
674 	/*
675 	 * Driver-specific constructor for drm_gem_objects, to set up
676 	 * obj->driver_private.
677 	 *
678 	 * Returns 0 on success.
679 	 */
680 	int (*gem_init_object) (struct drm_gem_object *obj);
681 	void (*gem_free_object) (struct drm_gem_object *obj);
682 
683 
684 	drm_ioctl_desc_t *driver_ioctls;
685 	int	max_driver_ioctl;
686 
687 	int	buf_priv_size;
688 	int	driver_major;
689 	int	driver_minor;
690 	int	driver_patchlevel;
691 	const char *driver_name;	/* Simple driver name		   */
692 	const char *driver_desc;	/* Longer driver name		   */
693 	const char *driver_date;	/* Date of last major changes.	   */
694 
695 	unsigned use_agp :1;
696 	unsigned require_agp :1;
697 	unsigned use_sg :1;
698 	unsigned use_dma :1;
699 	unsigned use_pci_dma :1;
700 	unsigned use_dma_queue :1;
701 	unsigned use_irq :1;
702 	unsigned use_vbl_irq :1;
703 	unsigned use_vbl_irq2 :1;
704 	unsigned use_mtrr :1;
705 	unsigned use_gem;
706 };
707 
708 /*
709  * hardware-specific code needs to initialize mutexes which
710  * can be used in interrupt context, so they need to know
711  * the interrupt priority. Interrupt cookie in drm_device
712  * structure is the intr_block field.
713  */
714 #define	DRM_INTR_PRI(dev) \
715 	DDI_INTR_PRI((dev)->intr_block)
716 
717 struct drm_device {
718 	drm_driver_t	*driver;
719 	drm_cminor_list_t	minordevs;
720 	dev_info_t *dip;
721 	void	*drm_handle;
722 	int drm_supported;
723 	const char *desc; /* current driver description */
724 	kmutex_t *irq_mutex;
725 	kcondvar_t *irq_cv;
726 
727 	ddi_iblock_cookie_t intr_block;
728 	uint32_t	pci_device;	/* PCI device id */
729 	uint32_t	pci_vendor;
730 	char		*unique;	/* Unique identifier: e.g., busid  */
731 	int		unique_len;	/* Length of unique field	   */
732 	int		if_version;	/* Highest interface version set */
733 	int		flags;	/* Flags to open(2)		   */
734 
735 	/* Locks */
736 	kmutex_t	vbl_lock;	/* protects vblank operations */
737 	kmutex_t	dma_lock;	/* protects dev->dma */
738 	kmutex_t	irq_lock;	/* protects irq condition checks */
739 	kmutex_t	dev_lock;	/* protects everything else */
740 	drm_lock_data_t   lock;		/* Information on hardware lock    */
741 	kmutex_t	struct_mutex;	/* < For others	*/
742 
743 	/* Usage Counters */
744 	int		  open_count;	/* Outstanding files open	   */
745 	int		  buf_use;	/* Buffers in use -- cannot alloc  */
746 
747 	/* Performance counters */
748 	unsigned long	  counters;
749 	drm_stat_type_t	  types[15];
750 	uint32_t	  counts[15];
751 
752 	/* Authentication */
753 	drm_file_list_t   files;
754 	drm_magic_head_t  magiclist[DRM_HASH_SIZE];
755 
756 	/* Linked list of mappable regions. Protected by dev_lock */
757 	drm_map_list_t	  maplist;
758 
759 	drm_local_map_t	  **context_sareas;
760 	int		  max_context;
761 
762 	/* DMA queues (contexts) */
763 	drm_device_dma_t  *dma;		/* Optional pointer for DMA support */
764 
765 	/* Context support */
766 	int		  irq;		/* Interrupt used by board	   */
767 	int		  irq_enabled;	/* True if the irq handler is enabled */
768 	int		  pci_domain;
769 	int		  pci_bus;
770 	int		  pci_slot;
771 	int		  pci_func;
772 	atomic_t	  context_flag;	/* Context swapping flag	   */
773 	int		  last_context;	/* Last current context		   */
774 
775 	/* Only used for Radeon */
776 	atomic_t	vbl_received;
777 	atomic_t	vbl_received2;
778 
779 	drm_vbl_sig_list_t vbl_sig_list;
780 	drm_vbl_sig_list_t vbl_sig_list2;
781 	/*
782 	 * At load time, disabling the vblank interrupt won't be allowed since
783 	 * old clients may not call the modeset ioctl and therefore misbehave.
784 	 * Once the modeset ioctl *has* been called though, we can safely
785 	 * disable them when unused.
786 	 */
787 	int vblank_disable_allowed;
788 
789 	wait_queue_head_t	vbl_queue;	/* vbl wait channel */
790 	/* vbl wait channel array */
791 	wait_queue_head_t	*vbl_queues;
792 
793 	/* number of VBLANK interrupts */
794 	/* (driver must alloc the right number of counters) */
795 	atomic_t	  *_vblank_count;
796 	/* signal list to send on VBLANK */
797 	struct drm_vbl_sig_list *vbl_sigs;
798 
799 	/* number of signals pending on all crtcs */
800 	atomic_t	  vbl_signal_pending;
801 	/* number of users of vblank interrupts per crtc */
802 	atomic_t	  *vblank_refcount;
803 	/* protected by dev->vbl_lock, used for wraparound handling */
804 	u32		  *last_vblank;
805 	/* so we don't call enable more than */
806 	atomic_t	  *vblank_enabled;
807 	/* Display driver is setting mode */
808 	int		*vblank_inmodeset;
809 	/* Don't wait while crtc is likely disabled */
810 	int		*vblank_suspend;
811 	/* size of vblank counter register */
812 	u32		max_vblank_count;
813 	int		num_crtcs;
814 	kmutex_t	tasklet_lock;
815 	void (*locked_tasklet_func)(struct drm_device *dev);
816 
817 	pid_t		  buf_pgid;
818 	drm_agp_head_t    *agp;
819 	drm_sg_mem_t	  *sg;  /* Scatter gather memory */
820 	uint32_t	  *ctx_bitmap;
821 	void		  *dev_private;
822 	unsigned int	  agp_buffer_token;
823 	drm_local_map_t   *agp_buffer_map;
824 
825 	kstat_t		  *asoft_ksp; /* kstat support */
826 
827 	/* name Drawable information */
828 	kmutex_t	drw_lock;
829 	unsigned int drw_bitfield_length;
830 	u32 *drw_bitfield;
831 	unsigned int drw_info_length;
832 	drm_drawable_info_t **drw_info;
833 
834 	/* \name GEM information */
835 	/* @{ */
836 	kmutex_t object_name_lock;
837 	struct idr_list	object_name_idr;
838 	atomic_t object_count;
839 	atomic_t object_memory;
840 	atomic_t pin_count;
841 	atomic_t pin_memory;
842 	atomic_t gtt_count;
843 	atomic_t gtt_memory;
844 	uint32_t gtt_total;
845 	uint32_t invalidate_domains;	/* domains pending invalidation */
846 	uint32_t flush_domains;	/* domains pending flush */
847 	/* @} */
848 
849 	/*
850 	 * Saving S3 context
851 	 */
852 	void		  *s3_private;
853 };
854 
855 /* Memory management support (drm_memory.c) */
856 void	drm_mem_init(void);
857 void	drm_mem_uninit(void);
858 void	*drm_alloc(size_t, int);
859 void	*drm_calloc(size_t, size_t, int);
860 void	*drm_realloc(void *, size_t, size_t, int);
861 void	drm_free(void *, size_t, int);
862 int 	drm_ioremap(drm_device_t *, drm_local_map_t *);
863 void	drm_ioremapfree(drm_local_map_t *);
864 
865 void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
866 void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
867 
868 void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
869 void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
870 
871 struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
872 
873 int	drm_context_switch(drm_device_t *, int, int);
874 int	drm_context_switch_complete(drm_device_t *, int);
875 int	drm_ctxbitmap_init(drm_device_t *);
876 void	drm_ctxbitmap_cleanup(drm_device_t *);
877 void	drm_ctxbitmap_free(drm_device_t *, int);
878 int	drm_ctxbitmap_next(drm_device_t *);
879 
880 /* Locking IOCTL support (drm_lock.c) */
881 int	drm_lock_take(drm_lock_data_t *, unsigned int);
882 int	drm_lock_transfer(drm_device_t *,
883 			drm_lock_data_t *, unsigned int);
884 int	drm_lock_free(drm_device_t *,
885 		    volatile unsigned int *, unsigned int);
886 
887 /* Buffer management support (drm_bufs.c) */
888 unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
889 unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
890 int	drm_initmap(drm_device_t *, unsigned long, unsigned long,
891     unsigned int, int, int);
892 void	drm_rmmap(drm_device_t *, drm_local_map_t *);
893 int	drm_addmap(drm_device_t *, unsigned long, unsigned long,
894     drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
895 int	drm_order(unsigned long);
896 
897 /* DMA support (drm_dma.c) */
898 int	drm_dma_setup(drm_device_t *);
899 void	drm_dma_takedown(drm_device_t *);
900 void	drm_free_buffer(drm_device_t *, drm_buf_t *);
901 void	drm_reclaim_buffers(drm_device_t *, drm_file_t *);
902 #define	drm_core_reclaim_buffers	drm_reclaim_buffers
903 
904 /* IRQ support (drm_irq.c) */
905 int	drm_irq_install(drm_device_t *);
906 int	drm_irq_uninstall(drm_device_t *);
907 uint_t	drm_irq_handler(DRM_IRQ_ARGS);
908 void	drm_driver_irq_preinstall(drm_device_t *);
909 void	drm_driver_irq_postinstall(drm_device_t *);
910 void	drm_driver_irq_uninstall(drm_device_t *);
911 int	drm_vblank_wait(drm_device_t *, unsigned int *);
912 void	drm_vbl_send_signals(drm_device_t *);
913 void    drm_handle_vblank(struct drm_device *dev, int crtc);
914 u32	drm_vblank_count(struct drm_device *dev, int crtc);
915 int	drm_vblank_get(struct drm_device *dev, int crtc);
916 void	drm_vblank_put(struct drm_device *dev, int crtc);
917 int	drm_vblank_init(struct drm_device *dev, int num_crtcs);
918 void	drm_vblank_cleanup(struct drm_device *dev);
919 int    drm_modeset_ctl(DRM_IOCTL_ARGS);
920 
921 /* AGP/GART support (drm_agpsupport.c) */
922 int	drm_device_is_agp(drm_device_t *);
923 int 	drm_device_is_pcie(drm_device_t *);
924 drm_agp_head_t *drm_agp_init(drm_device_t *);
925 void	drm_agp_fini(drm_device_t *);
926 int 	drm_agp_do_release(drm_device_t *);
927 void	*drm_agp_allocate_memory(size_t pages,
928 	    uint32_t type, drm_device_t *dev);
929 int	drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
930 int	drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
931 int	drm_agp_unbind_memory(unsigned long, drm_device_t *);
932 int	drm_agp_bind_pages(drm_device_t *dev,
933 		    pfn_t *pages,
934 		    unsigned long num_pages,
935 		    uint32_t gtt_offset);
936 int	drm_agp_unbind_pages(drm_device_t *dev,
937 		    unsigned long num_pages,
938 		    uint32_t gtt_offset,
939 		    uint32_t type);
940 void drm_agp_chipset_flush(struct drm_device *dev);
941 void drm_agp_rebind(struct drm_device *dev);
942 
943 /* kstat support (drm_kstats.c) */
944 int	drm_init_kstats(drm_device_t *);
945 void	drm_fini_kstats(drm_device_t *);
946 
947 /* Scatter Gather Support (drm_scatter.c) */
948 void	drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
949 
950 /* ATI PCIGART support (ati_pcigart.c) */
951 int	drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
952 int	drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
953 
954 /* Locking IOCTL support (drm_drv.c) */
955 int	drm_lock(DRM_IOCTL_ARGS);
956 int	drm_unlock(DRM_IOCTL_ARGS);
957 int	drm_version(DRM_IOCTL_ARGS);
958 int	drm_setversion(DRM_IOCTL_ARGS);
959 /* Cache management (drm_cache.c) */
960 void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
961 
962 /* Misc. IOCTL support (drm_ioctl.c) */
963 int	drm_irq_by_busid(DRM_IOCTL_ARGS);
964 int	drm_getunique(DRM_IOCTL_ARGS);
965 int	drm_setunique(DRM_IOCTL_ARGS);
966 int	drm_getmap(DRM_IOCTL_ARGS);
967 int	drm_getclient(DRM_IOCTL_ARGS);
968 int	drm_getstats(DRM_IOCTL_ARGS);
969 int	drm_noop(DRM_IOCTL_ARGS);
970 
971 /* Context IOCTL support (drm_context.c) */
972 int	drm_resctx(DRM_IOCTL_ARGS);
973 int	drm_addctx(DRM_IOCTL_ARGS);
974 int	drm_modctx(DRM_IOCTL_ARGS);
975 int	drm_getctx(DRM_IOCTL_ARGS);
976 int	drm_switchctx(DRM_IOCTL_ARGS);
977 int	drm_newctx(DRM_IOCTL_ARGS);
978 int	drm_rmctx(DRM_IOCTL_ARGS);
979 int	drm_setsareactx(DRM_IOCTL_ARGS);
980 int	drm_getsareactx(DRM_IOCTL_ARGS);
981 
982 /* Drawable IOCTL support (drm_drawable.c) */
983 int	drm_adddraw(DRM_IOCTL_ARGS);
984 int	drm_rmdraw(DRM_IOCTL_ARGS);
985 int	drm_update_draw(DRM_IOCTL_ARGS);
986 
987 /* Authentication IOCTL support (drm_auth.c) */
988 int	drm_getmagic(DRM_IOCTL_ARGS);
989 int	drm_authmagic(DRM_IOCTL_ARGS);
990 int	drm_remove_magic(drm_device_t *, drm_magic_t);
991 drm_file_t	*drm_find_file(drm_device_t *, drm_magic_t);
992 /* Buffer management support (drm_bufs.c) */
993 int	drm_addmap_ioctl(DRM_IOCTL_ARGS);
994 int	drm_rmmap_ioctl(DRM_IOCTL_ARGS);
995 int	drm_addbufs_ioctl(DRM_IOCTL_ARGS);
996 int	drm_infobufs(DRM_IOCTL_ARGS);
997 int	drm_markbufs(DRM_IOCTL_ARGS);
998 int	drm_freebufs(DRM_IOCTL_ARGS);
999 int	drm_mapbufs(DRM_IOCTL_ARGS);
1000 
1001 /* DMA support (drm_dma.c) */
1002 int	drm_dma(DRM_IOCTL_ARGS);
1003 
1004 /* IRQ support (drm_irq.c) */
1005 int	drm_control(DRM_IOCTL_ARGS);
1006 int	drm_wait_vblank(DRM_IOCTL_ARGS);
1007 
1008 /* AGP/GART support (drm_agpsupport.c) */
1009 int	drm_agp_acquire(DRM_IOCTL_ARGS);
1010 int	drm_agp_release(DRM_IOCTL_ARGS);
1011 int	drm_agp_enable(DRM_IOCTL_ARGS);
1012 int	drm_agp_info(DRM_IOCTL_ARGS);
1013 int	drm_agp_alloc(DRM_IOCTL_ARGS);
1014 int	drm_agp_free(DRM_IOCTL_ARGS);
1015 int	drm_agp_unbind(DRM_IOCTL_ARGS);
1016 int	drm_agp_bind(DRM_IOCTL_ARGS);
1017 
1018 /* Scatter Gather Support (drm_scatter.c) */
1019 int	drm_sg_alloc(DRM_IOCTL_ARGS);
1020 int	drm_sg_free(DRM_IOCTL_ARGS);
1021 
1022 /*	drm_mm.c	*/
1023 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
1024 				    unsigned long size, unsigned alignment);
1025 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
1026 				    unsigned long size,
1027 				    unsigned alignment, int best_match);
1028 
1029 extern void drm_mm_clean_ml(const struct drm_mm *mm);
1030 extern int drm_debug_flag;
1031 
1032 /* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
1033 extern void drm_debug(const char *fmt, ...);
1034 extern void drm_error(const char *fmt, ...);
1035 extern void drm_info(const char *fmt, ...);
1036 
1037 #ifdef DEBUG
1038 #define	DRM_DEBUG		if (drm_debug_flag >= 2) drm_debug
1039 #define	DRM_INFO		if (drm_debug_flag >= 1) drm_info
1040 #else
1041 #define	DRM_DEBUG(...)
1042 #define	DRM_INFO(...)
1043 #endif
1044 
1045 #define	DRM_ERROR		drm_error
1046 
1047 
1048 #define	MAX_INSTNUMS 16
1049 
1050 extern int drm_dev_to_instance(dev_t);
1051 extern int drm_dev_to_minor(dev_t);
1052 extern void *drm_supp_register(dev_info_t *, drm_device_t *);
1053 extern int drm_supp_unregister(void *);
1054 
1055 extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
1056 extern int drm_close(drm_device_t *, int, int, int, cred_t *);
1057 extern int drm_attach(drm_device_t *);
1058 extern int drm_detach(drm_device_t *);
1059 extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
1060 
1061 extern int drm_pci_init(drm_device_t *);
1062 extern void drm_pci_end(drm_device_t *);
1063 extern int pci_get_info(drm_device_t *, int *, int *, int *);
1064 extern int pci_get_irq(drm_device_t *);
1065 extern int pci_get_vendor(drm_device_t *);
1066 extern int pci_get_device(drm_device_t *);
1067 
1068 extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
1069 							drm_drawable_t);
1070 /* File Operations helpers (drm_fops.c) */
1071 extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
1072 extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
1073 extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
1074     cred_t *);
1075 
1076 /* Graphics Execution Manager library functions (drm_gem.c) */
1077 int drm_gem_init(struct drm_device *dev);
1078 void drm_gem_object_free(struct drm_gem_object *obj);
1079 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1080 					    size_t size);
1081 void drm_gem_object_handle_free(struct drm_gem_object *obj);
1082 
1083 void drm_gem_object_reference(struct drm_gem_object *obj);
1084 void drm_gem_object_unreference(struct drm_gem_object *obj);
1085 
1086 int drm_gem_handle_create(struct drm_file *file_priv,
1087 			    struct drm_gem_object *obj,
1088 			    int *handlep);
1089 void drm_gem_object_handle_reference(struct drm_gem_object *obj);
1090 
1091 void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
1092 
1093 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
1094 					    int handle);
1095 int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
1096 int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
1097 int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
1098 void drm_gem_open(struct drm_file *file_private);
1099 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1100 
1101 
1102 #endif	/* _DRMP_H */
1103