xref: /titanic_50/usr/src/uts/common/io/drm/drmP.h (revision 5d0e1406420f52cc4d3d0543044034c4894b5865)
1 /*
2  * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
3  * Created: Mon Jan  4 10:05:05 1999 by faith@precisioninsight.com
4  */
5 /*
6  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8  * Copyright (c) 2009, Intel Corporation.
9  * All rights reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  *
30  * Authors:
31  *    Rickard E. (Rik) Faith <faith@valinux.com>
32  *    Gareth Hughes <gareth@valinux.com>
33  *
34  */
35 
36 /*
37  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
38  * Use is subject to license terms.
39  */
40 
41 #ifndef	_DRMP_H
42 #define	_DRMP_H
43 
44 #include <sys/sysmacros.h>
45 #include <sys/types.h>
46 #include <sys/conf.h>
47 #include <sys/modctl.h>
48 #include <sys/stat.h>
49 #include <sys/file.h>
50 #include <sys/cmn_err.h>
51 #include <sys/varargs.h>
52 #include <sys/pci.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunldi.h>
56 #include <sys/pmem.h>
57 #include <sys/agpgart.h>
58 #include <sys/time.h>
59 #include <sys/sysmacros.h>
60 #include "drm_atomic.h"
61 #include "drm.h"
62 #include "queue.h"
63 #include "drm_linux_list.h"
64 
65 #ifndef __inline__
66 #define	__inline__	inline
67 #endif
68 
69 #if !defined(__FUNCTION__)
70 #if defined(C99)
71 #define	__FUNCTION__ __func__
72 #else
73 #define	__FUNCTION__	" "
74 #endif
75 #endif
76 
77 /* DRM space units */
78 #define	DRM_PAGE_SHIFT			PAGESHIFT
79 #define	DRM_PAGE_SIZE			(1 << DRM_PAGE_SHIFT)
80 #define	DRM_PAGE_OFFSET			(DRM_PAGE_SIZE - 1)
81 #define	DRM_PAGE_MASK			~(DRM_PAGE_SIZE - 1)
82 #define	DRM_MB2PAGES(x)			((x) << 8)
83 #define	DRM_PAGES2BYTES(x)		((x) << DRM_PAGE_SHIFT)
84 #define	DRM_BYTES2PAGES(x)		((x) >> DRM_PAGE_SHIFT)
85 #define	DRM_PAGES2KB(x)			((x) << 2)
86 #define	DRM_ALIGNED(offset)		(((offset) & DRM_PAGE_OFFSET) == 0)
87 
88 #define	PAGE_SHIFT			DRM_PAGE_SHIFT
89 #define	PAGE_SIZE			DRM_PAGE_SIZE
90 
91 #define	DRM_MAX_INSTANCES	8
92 #define	DRM_DEVNODE		"drm"
93 #define	DRM_UNOPENED		0
94 #define	DRM_OPENED		1
95 
96 #define	DRM_HASH_SIZE		16 /* Size of key hash table */
97 #define	DRM_KERNEL_CONTEXT	0  /* Change drm_resctx if changed */
98 #define	DRM_RESERVED_CONTEXTS	1  /* Change drm_resctx if changed */
99 
100 #define	DRM_MEM_DMA	   0
101 #define	DRM_MEM_SAREA	   1
102 #define	DRM_MEM_DRIVER	   2
103 #define	DRM_MEM_MAGIC	   3
104 #define	DRM_MEM_IOCTLS	   4
105 #define	DRM_MEM_MAPS	   5
106 #define	DRM_MEM_BUFS	   6
107 #define	DRM_MEM_SEGS	   7
108 #define	DRM_MEM_PAGES	   8
109 #define	DRM_MEM_FILES	  9
110 #define	DRM_MEM_QUEUES	  10
111 #define	DRM_MEM_CMDS	  11
112 #define	DRM_MEM_MAPPINGS  12
113 #define	DRM_MEM_BUFLISTS  13
114 #define	DRM_MEM_DRMLISTS  14
115 #define	DRM_MEM_TOTALDRM  15
116 #define	DRM_MEM_BOUNDDRM  16
117 #define	DRM_MEM_CTXBITMAP 17
118 #define	DRM_MEM_STUB	  18
119 #define	DRM_MEM_SGLISTS	  19
120 #define	DRM_MEM_AGPLISTS  20
121 #define	DRM_MEM_CTXLIST   21
122 #define	DRM_MEM_MM		22
123 #define	DRM_MEM_HASHTAB		23
124 #define	DRM_MEM_OBJECTS		24
125 
126 #define	DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
127 #define	DRM_MAP_HASH_OFFSET 0x10000000
128 #define	DRM_MAP_HASH_ORDER 12
129 #define	DRM_OBJECT_HASH_ORDER 12
130 #define	DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
131 #define	DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
132 #define	DRM_MM_INIT_MAX_PAGES 256
133 
134 
135 /* Internal types and structures */
136 #define	DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
137 #define	DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
138 #define	DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
139 
140 #define	DRM_IF_VERSION(maj, min) (maj << 16 | min)
141 
142 #define	__OS_HAS_AGP	1
143 
144 #define	DRM_DEV_MOD	(S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
145 #define	DRM_DEV_UID	0
146 #define	DRM_DEV_GID	0
147 
148 #define	DRM_CURRENTPID		ddi_get_pid()
149 #define	DRM_SPINLOCK(l)		mutex_enter(l)
150 #define	DRM_SPINUNLOCK(u)	mutex_exit(u)
151 #define	DRM_SPINLOCK_ASSERT(l)
152 #define	DRM_LOCK()	mutex_enter(&dev->dev_lock)
153 #define	DRM_UNLOCK()	mutex_exit(&dev->dev_lock)
154 #define	DRM_LOCK_OWNED()	ASSERT(mutex_owned(&dev->dev_lock))
155 #define	spin_lock_irqsave(l, flag)		mutex_enter(l)
156 #define	spin_unlock_irqrestore(u, flag) mutex_exit(u)
157 #define	spin_lock(l)	mutex_enter(l)
158 #define	spin_unlock(u)	mutex_exit(u)
159 
160 
161 #define	DRM_UDELAY(sec)  delay(drv_usectohz(sec *1000))
162 #define	DRM_MEMORYBARRIER()
163 
164 typedef	struct drm_file		drm_file_t;
165 typedef struct drm_device	drm_device_t;
166 typedef struct drm_driver_info drm_driver_t;
167 
168 #define	DRM_DEVICE	drm_device_t *dev = dev1
169 #define	DRM_IOCTL_ARGS	\
170 	drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
171 
172 #define	DRM_COPYFROM_WITH_RETURN(dest, src, size)	\
173 	if (ddi_copyin((src), (dest), (size), 0)) {	\
174 		DRM_ERROR("%s: copy from user failed", __func__);	\
175 		return (EFAULT);	\
176 	}
177 
178 #define	DRM_COPYTO_WITH_RETURN(dest, src, size)	\
179 	if (ddi_copyout((src), (dest), (size), 0)) {	\
180 		DRM_ERROR("%s: copy to user failed", __func__);	\
181 		return (EFAULT);	\
182 	}
183 
184 #define	DRM_COPY_FROM_USER(dest, src, size) \
185 	ddi_copyin((src), (dest), (size), 0) /* flag for src */
186 
187 #define	DRM_COPY_TO_USER(dest, src, size) \
188 	ddi_copyout((src), (dest), (size), 0) /* flags for dest */
189 
190 #define	DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3)  \
191 	ddi_copyin((arg2), (arg1), (arg3), 0)
192 
193 #define	DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)        \
194 	ddi_copyout((arg2), arg1, arg3, 0)
195 
196 #define	DRM_READ8(map, offset) \
197 	*(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
198 #define	DRM_READ16(map, offset) \
199 	*(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
200 #define	DRM_READ32(map, offset) \
201 	*(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
202 #define	DRM_WRITE8(map, offset, val) \
203 	*(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
204 #define	DRM_WRITE16(map, offset, val) \
205 	*(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
206 #define	DRM_WRITE32(map, offset, val) \
207 	*(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
208 
209 typedef struct drm_wait_queue {
210 	kcondvar_t	cv;
211 	kmutex_t	lock;
212 }wait_queue_head_t;
213 
214 #define	DRM_INIT_WAITQUEUE(q, pri)	\
215 { \
216 	mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
217 	cv_init(&(q)->cv, NULL, CV_DRIVER, NULL);	\
218 }
219 
220 #define	DRM_FINI_WAITQUEUE(q)	\
221 { \
222 	mutex_destroy(&(q)->lock);	\
223 	cv_destroy(&(q)->cv);	\
224 }
225 
226 #define	DRM_WAKEUP(q)	\
227 { \
228 	mutex_enter(&(q)->lock); \
229 	cv_broadcast(&(q)->cv);	\
230 	mutex_exit(&(q)->lock);	\
231 }
232 
233 #define	jiffies	ddi_get_lbolt()
234 
235 #define	DRM_WAIT_ON(ret, q, timeout, condition)  			\
236 	mutex_enter(&(q)->lock);					\
237 	while (!(condition)) {						\
238 		ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
239 		    TR_CLOCK_TICK);					\
240 		if (ret == -1) {					\
241 			ret = EBUSY;					\
242 			break;						\
243 		} else if (ret == 0) {					\
244 			ret = EINTR;  					\
245 			break; 						\
246 		} else { 						\
247 			ret = 0; 					\
248 		} 							\
249 	} 								\
250 	mutex_exit(&(q)->lock);
251 
252 #define	DRM_WAIT(ret, q, condition)  \
253 mutex_enter(&(q)->lock);	\
254 if (!(condition)) {	\
255 	ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
256 	if (ret == -1) {				\
257 		/* gfx maybe hang */	\
258 		if (!(condition)) 	\
259 			ret = -2;	\
260 	} else {	\
261 		ret = 0;	\
262 	}	\
263 } \
264 mutex_exit(&(q)->lock);
265 
266 
267 #define	DRM_GETSAREA()  					\
268 {                                				\
269 	drm_local_map_t *map;					\
270 	DRM_SPINLOCK_ASSERT(&dev->dev_lock);			\
271 	TAILQ_FOREACH(map, &dev->maplist, link) {		\
272 		if (map->type == _DRM_SHM &&			\
273 			map->flags & _DRM_CONTAINS_LOCK) {	\
274 			dev_priv->sarea = map;			\
275 			break;					\
276 		}						\
277 	}							\
278 }
279 
280 #define	LOCK_TEST_WITH_RETURN(dev, fpriv)				\
281 	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||		\
282 	    dev->lock.filp != fpriv) {					\
283 		DRM_DEBUG("%s called without lock held", __func__);	\
284 		return (EINVAL);	\
285 	}
286 
287 #define	DRM_IRQ_ARGS	caddr_t arg
288 #define	IRQ_HANDLED		DDI_INTR_CLAIMED
289 #define	IRQ_NONE		DDI_INTR_UNCLAIMED
290 
291 enum {
292 	DRM_IS_NOT_AGP,
293 	DRM_IS_AGP,
294 	DRM_MIGHT_BE_AGP
295 };
296 
297 /* Capabilities taken from src/sys/dev/pci/pcireg.h. */
298 #ifndef PCIY_AGP
299 #define	PCIY_AGP		0x02
300 #endif
301 
302 #ifndef PCIY_EXPRESS
303 #define	PCIY_EXPRESS		0x10
304 #endif
305 
306 #define	PAGE_ALIGN(addr)	(((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
307 #define	DRM_SUSER(p)		(crgetsgid(p) == 0 || crgetsuid(p) == 0)
308 
309 #define	DRM_GEM_OBJIDR_HASHNODE	1024
310 #define	idr_list_for_each(entry, head) \
311 	for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
312 		list_for_each(entry, &(head)->next[key])
313 
314 /*
315  * wait for 400 milliseconds
316  */
317 #define	DRM_HZ			drv_usectohz(400000)
318 
319 typedef unsigned long dma_addr_t;
320 typedef uint64_t	u64;
321 typedef uint32_t	u32;
322 typedef uint16_t	u16;
323 typedef uint8_t		u8;
324 typedef uint_t		irqreturn_t;
325 
326 #define	DRM_SUPPORT	1
327 #define	DRM_UNSUPPORT	0
328 
329 #define	__OS_HAS_AGP	1
330 
331 typedef struct drm_pci_id_list
332 {
333 	int vendor;
334 	int device;
335 	long driver_private;
336 	char *name;
337 } drm_pci_id_list_t;
338 
339 #define	DRM_AUTH	0x1
340 #define	DRM_MASTER	0x2
341 #define	DRM_ROOT_ONLY	0x4
342 typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
343 typedef struct drm_ioctl_desc {
344 	int	(*func)(DRM_IOCTL_ARGS);
345 	int	flags;
346 } drm_ioctl_desc_t;
347 
348 typedef struct drm_magic_entry {
349 	drm_magic_t		magic;
350 	struct drm_file		*priv;
351 	struct drm_magic_entry	*next;
352 } drm_magic_entry_t;
353 
354 typedef struct drm_magic_head {
355 	struct drm_magic_entry *head;
356 	struct drm_magic_entry *tail;
357 } drm_magic_head_t;
358 
359 typedef struct drm_buf {
360 	int		idx;		/* Index into master buflist */
361 	int		total;		/* Buffer size */
362 	int		order;		/* log-base-2(total) */
363 	int		used;		/* Amount of buffer in use (for DMA) */
364 	unsigned long	offset;		/* Byte offset (used internally) */
365 	void		*address;	/* Address of buffer */
366 	unsigned long	bus_address;	/* Bus address of buffer */
367 	struct drm_buf	*next;		/* Kernel-only: used for free list */
368 	volatile int	pending;	/* On hardware DMA queue */
369 	drm_file_t		*filp;
370 				/* Uniq. identifier of holding process */
371 	int		context;	/* Kernel queue for this buffer */
372 	enum {
373 		DRM_LIST_NONE	 = 0,
374 		DRM_LIST_FREE	 = 1,
375 		DRM_LIST_WAIT	 = 2,
376 		DRM_LIST_PEND	 = 3,
377 		DRM_LIST_PRIO	 = 4,
378 		DRM_LIST_RECLAIM = 5
379 	}		list;		/* Which list we're on */
380 
381 	int		dev_priv_size;	/* Size of buffer private stoarge */
382 	void		*dev_private;	/* Per-buffer private storage */
383 } drm_buf_t;
384 
385 typedef struct drm_freelist {
386 	int		  initialized;	/* Freelist in use		*/
387 	uint32_t	  count;	/* Number of free buffers	*/
388 	drm_buf_t	  *next;	/* End pointer			*/
389 
390 	int		  low_mark;	/* Low water mark		*/
391 	int		  high_mark;	/* High water mark		*/
392 } drm_freelist_t;
393 
394 typedef struct drm_buf_entry {
395 	int		  buf_size;
396 	int		  buf_count;
397 	drm_buf_t	  *buflist;
398 	int		  seg_count;
399 	int		  page_order;
400 
401 	uint32_t	  *seglist;
402 	unsigned long	  *seglist_bus;
403 
404 	drm_freelist_t	  freelist;
405 } drm_buf_entry_t;
406 
407 typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
408 
409 /* BEGIN CSTYLED */
410 typedef struct drm_local_map {
411 	unsigned long	offset;  /*  Physical address (0 for SAREA)	*/
412 	unsigned long	size;	 /* Physical size (bytes)		*/
413 	drm_map_type_t	type;	 /* Type of memory mapped		*/
414 	drm_map_flags_t flags;	 /* Flags				*/
415 	void		*handle; /* User-space: "Handle" to pass to mmap */
416 				 /* Kernel-space: kernel-virtual address */
417 	int		mtrr;	 /* Boolean: MTRR used 			*/
418 				 /* Private data			*/
419 	int		rid;	 /* PCI resource ID for bus_space 	*/
420 	int		kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
421 	caddr_t		dev_addr;	  /* base device address 	*/
422 	ddi_acc_handle_t  dev_handle;	  /* The data access handle 	*/
423 	ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free  */
424 	TAILQ_ENTRY(drm_local_map) link;
425 } drm_local_map_t;
426 /* END CSTYLED */
427 
428 /*
429  * This structure defines the drm_mm memory object, which will be used by the
430  * DRM for its buffer objects.
431  */
432 struct drm_gem_object {
433 	/* Reference count of this object */
434 	atomic_t refcount;
435 
436 	/* Handle count of this object. Each handle also holds a reference */
437 	atomic_t handlecount;
438 
439 	/* Related drm device */
440 	struct drm_device *dev;
441 
442 	int flink;
443 	/*
444 	 * Size of the object, in bytes.  Immutable over the object's
445 	 * lifetime.
446 	 */
447 	size_t size;
448 
449 	/*
450 	 * Global name for this object, starts at 1. 0 means unnamed.
451 	 * Access is covered by the object_name_lock in the related drm_device
452 	 */
453 	int name;
454 
455 	/*
456 	 * Memory domains. These monitor which caches contain read/write data
457 	 * related to the object. When transitioning from one set of domains
458 	 * to another, the driver is called to ensure that caches are suitably
459 	 * flushed and invalidated
460 	 */
461 	uint32_t read_domains;
462 	uint32_t write_domain;
463 
464 	/*
465 	 * While validating an exec operation, the
466 	 * new read/write domain values are computed here.
467 	 * They will be transferred to the above values
468 	 * at the point that any cache flushing occurs
469 	 */
470 	uint32_t pending_read_domains;
471 	uint32_t pending_write_domain;
472 
473 	void *driver_private;
474 
475 	drm_local_map_t *map;
476 	ddi_dma_handle_t dma_hdl;
477 	ddi_acc_handle_t acc_hdl;
478 	caddr_t kaddr;
479 	size_t real_size;	/* real size of memory */
480 	pfn_t *pfnarray;
481 };
482 
483 struct idr_list {
484 	struct idr_list *next, *prev;
485 	struct drm_gem_object *obj;
486 	uint32_t	handle;
487 	caddr_t	contain_ptr;
488 };
489 
490 struct drm_file {
491 	TAILQ_ENTRY(drm_file) link;
492 	int		  authenticated;
493 	int		  master;
494 	int		  minor;
495 	pid_t		  pid;
496 	uid_t		  uid;
497 	int		  refs;
498 	drm_magic_t	  magic;
499 	unsigned long	  ioctl_count;
500 	void		 *driver_priv;
501 	/* Mapping of mm object handles to object pointers. */
502 	struct idr_list object_idr;
503 	/* Lock for synchronization of access to object_idr. */
504 	kmutex_t table_lock;
505 
506 	dev_t dev;
507 	cred_t *credp;
508 };
509 
510 typedef struct drm_lock_data {
511 	drm_hw_lock_t	*hw_lock;	/* Hardware lock		*/
512 	drm_file_t	*filp;
513 	/* Uniq. identifier of holding process */
514 	kcondvar_t	lock_cv;	/* lock queue - SOLARIS Specific */
515 	kmutex_t	lock_mutex;	/* lock - SOLARIS Specific */
516 	unsigned long	lock_time;	/* Time of last lock in clock ticks */
517 } drm_lock_data_t;
518 
519 /*
520  * This structure, in drm_device_t, is always initialized while the device
521  * is open.  dev->dma_lock protects the incrementing of dev->buf_use, which
522  * when set marks that no further bufs may be allocated until device teardown
523  * occurs (when the last open of the device has closed).  The high/low
524  * watermarks of bufs are only touched by the X Server, and thus not
525  * concurrently accessed, so no locking is needed.
526  */
527 typedef struct drm_device_dma {
528 	drm_buf_entry_t	bufs[DRM_MAX_ORDER+1];
529 	int		buf_count;
530 	drm_buf_t	**buflist;	/* Vector of pointers info bufs	   */
531 	int		seg_count;
532 	int		page_count;
533 	unsigned long	*pagelist;
534 	unsigned long	byte_count;
535 	enum {
536 		_DRM_DMA_USE_AGP = 0x01,
537 		_DRM_DMA_USE_SG  = 0x02
538 	} flags;
539 } drm_device_dma_t;
540 
541 typedef struct drm_agp_mem {
542 	void		*handle;
543 	unsigned long	bound; /* address */
544 	int		pages;
545 	caddr_t		phys_addr;
546 	struct drm_agp_mem *prev;
547 	struct drm_agp_mem *next;
548 } drm_agp_mem_t;
549 
550 typedef struct drm_agp_head {
551 	agp_info_t	agp_info;
552 	const char	*chipset;
553 	drm_agp_mem_t	*memory;
554 	unsigned long	mode;
555 	int		enabled;
556 	int		acquired;
557 	unsigned long	base;
558 	int		mtrr;
559 	int		cant_use_aperture;
560 	unsigned long	page_mask;
561 	ldi_ident_t	agpgart_li;
562 	ldi_handle_t	agpgart_lh;
563 } drm_agp_head_t;
564 
565 
566 typedef struct drm_dma_handle {
567 	ddi_dma_handle_t	dma_hdl;
568 	ddi_acc_handle_t	acc_hdl;
569 	ddi_dma_cookie_t	cookie;
570 	uint_t		cookie_num;
571 	uintptr_t		vaddr;   /* virtual addr */
572 	uintptr_t		paddr;   /* physical addr */
573 	size_t		real_sz; /* real size of memory */
574 } drm_dma_handle_t;
575 
576 typedef struct drm_sg_mem {
577 	unsigned long	handle;
578 	void		*virtual;
579 	int		pages;
580 	dma_addr_t  	*busaddr;
581 	ddi_umem_cookie_t	*umem_cookie;
582 	drm_dma_handle_t	*dmah_sg;
583 	drm_dma_handle_t	*dmah_gart; /* Handle to PCI memory */
584 } drm_sg_mem_t;
585 
586 /*
587  * Generic memory manager structs
588  */
589 
590 struct drm_mm_node {
591 	struct list_head fl_entry;
592 	struct list_head ml_entry;
593 	int free;
594 	unsigned long start;
595 	unsigned long size;
596 	struct drm_mm *mm;
597 	void *private;
598 };
599 
600 struct drm_mm {
601 	struct list_head fl_entry;
602 	struct list_head ml_entry;
603 };
604 
605 typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
606 
607 typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
608 typedef struct drm_vbl_sig {
609 	TAILQ_ENTRY(drm_vbl_sig) link;
610 	unsigned int	sequence;
611 	int		signo;
612 	int		pid;
613 } drm_vbl_sig_t;
614 
615 
616 /* used for clone device */
617 typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
618 typedef struct drm_cminor {
619 	TAILQ_ENTRY(drm_cminor) link;
620 	drm_file_t		*fpriv;
621 	int			minor;
622 } drm_cminor_t;
623 
624 /* location of GART table */
625 #define	DRM_ATI_GART_MAIN	1
626 #define	DRM_ATI_GART_FB		2
627 
628 typedef struct ati_pcigart_info {
629 	int gart_table_location;
630 	int is_pcie;
631 	void *addr;
632 	dma_addr_t bus_addr;
633 	drm_local_map_t mapping;
634 } drm_ati_pcigart_info;
635 
636 /* DRM device structure */
637 struct drm_device;
638 struct drm_driver_info {
639 	int (*load)(struct drm_device *, unsigned long);
640 	int (*firstopen)(struct drm_device *);
641 	int (*open)(struct drm_device *, drm_file_t *);
642 	void (*preclose)(struct drm_device *, drm_file_t *);
643 	void (*postclose)(struct drm_device *, drm_file_t *);
644 	void (*lastclose)(struct drm_device *);
645 	int (*unload)(struct drm_device *);
646 	void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
647 	int (*presetup)(struct drm_device *);
648 	int (*postsetup)(struct drm_device *);
649 	int (*open_helper)(struct drm_device *, drm_file_t *);
650 	void (*free_filp_priv)(struct drm_device *, drm_file_t *);
651 	void (*release)(struct drm_device *, void *);
652 	int (*dma_ioctl)(DRM_IOCTL_ARGS);
653 	void (*dma_ready)(struct drm_device *);
654 	int (*dma_quiescent)(struct drm_device *);
655 	int (*dma_flush_block_and_flush)(struct drm_device *,
656 			int, drm_lock_flags_t);
657 	int (*dma_flush_unblock)(struct drm_device *, int,
658 					drm_lock_flags_t);
659 	int (*context_ctor)(struct drm_device *, int);
660 	int (*context_dtor)(struct drm_device *, int);
661 	int (*kernel_context_switch)(struct drm_device *, int, int);
662 	int (*kernel_context_switch_unlock)(struct drm_device *);
663 	int (*device_is_agp) (struct drm_device *);
664 	int (*irq_preinstall)(struct drm_device *);
665 	void (*irq_postinstall)(struct drm_device *);
666 	void (*irq_uninstall)(struct drm_device *dev);
667 	uint_t (*irq_handler)(DRM_IRQ_ARGS);
668 	int (*vblank_wait)(struct drm_device *, unsigned int *);
669 	int (*vblank_wait2)(struct drm_device *, unsigned int *);
670 	/* added for intel minimized vblank */
671 	u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
672 	int (*enable_vblank)(struct drm_device *dev, int crtc);
673 	void (*disable_vblank)(struct drm_device *dev, int crtc);
674 
675 	/*
676 	 * Driver-specific constructor for drm_gem_objects, to set up
677 	 * obj->driver_private.
678 	 *
679 	 * Returns 0 on success.
680 	 */
681 	int (*gem_init_object) (struct drm_gem_object *obj);
682 	void (*gem_free_object) (struct drm_gem_object *obj);
683 
684 
685 	drm_ioctl_desc_t *driver_ioctls;
686 	int	max_driver_ioctl;
687 
688 	int	buf_priv_size;
689 	int	driver_major;
690 	int	driver_minor;
691 	int	driver_patchlevel;
692 	const char *driver_name;	/* Simple driver name		   */
693 	const char *driver_desc;	/* Longer driver name		   */
694 	const char *driver_date;	/* Date of last major changes.	   */
695 
696 	unsigned use_agp :1;
697 	unsigned require_agp :1;
698 	unsigned use_sg :1;
699 	unsigned use_dma :1;
700 	unsigned use_pci_dma :1;
701 	unsigned use_dma_queue :1;
702 	unsigned use_irq :1;
703 	unsigned use_vbl_irq :1;
704 	unsigned use_vbl_irq2 :1;
705 	unsigned use_mtrr :1;
706 	unsigned use_gem;
707 };
708 
709 /*
710  * hardware-specific code needs to initialize mutexes which
711  * can be used in interrupt context, so they need to know
712  * the interrupt priority. Interrupt cookie in drm_device
713  * structure is the intr_block field.
714  */
715 #define	DRM_INTR_PRI(dev) \
716 	DDI_INTR_PRI((dev)->intr_block)
717 
718 struct drm_device {
719 	drm_driver_t	*driver;
720 	drm_cminor_list_t	minordevs;
721 	dev_info_t *dip;
722 	void	*drm_handle;
723 	int drm_supported;
724 	const char *desc; /* current driver description */
725 	kmutex_t *irq_mutex;
726 	kcondvar_t *irq_cv;
727 
728 	ddi_iblock_cookie_t intr_block;
729 	uint32_t	pci_device;	/* PCI device id */
730 	uint32_t	pci_vendor;
731 	char		*unique;	/* Unique identifier: e.g., busid  */
732 	int		unique_len;	/* Length of unique field	   */
733 	int		if_version;	/* Highest interface version set */
734 	int		flags;	/* Flags to open(2)		   */
735 
736 	/* Locks */
737 	kmutex_t	vbl_lock;	/* protects vblank operations */
738 	kmutex_t	dma_lock;	/* protects dev->dma */
739 	kmutex_t	irq_lock;	/* protects irq condition checks */
740 	kmutex_t	dev_lock;	/* protects everything else */
741 	drm_lock_data_t   lock;		/* Information on hardware lock    */
742 	kmutex_t	struct_mutex;	/* < For others	*/
743 
744 	/* Usage Counters */
745 	int		  open_count;	/* Outstanding files open	   */
746 	int		  buf_use;	/* Buffers in use -- cannot alloc  */
747 
748 	/* Performance counters */
749 	unsigned long	  counters;
750 	drm_stat_type_t	  types[15];
751 	uint32_t	  counts[15];
752 
753 	/* Authentication */
754 	drm_file_list_t   files;
755 	drm_magic_head_t  magiclist[DRM_HASH_SIZE];
756 
757 	/* Linked list of mappable regions. Protected by dev_lock */
758 	drm_map_list_t	  maplist;
759 
760 	drm_local_map_t	  **context_sareas;
761 	int		  max_context;
762 
763 	/* DMA queues (contexts) */
764 	drm_device_dma_t  *dma;		/* Optional pointer for DMA support */
765 
766 	/* Context support */
767 	int		  irq;		/* Interrupt used by board	   */
768 	int		  irq_enabled;	/* True if the irq handler is enabled */
769 	int		  pci_domain;
770 	int		  pci_bus;
771 	int		  pci_slot;
772 	int		  pci_func;
773 	atomic_t	  context_flag;	/* Context swapping flag	   */
774 	int		  last_context;	/* Last current context		   */
775 
776 	/* Only used for Radeon */
777 	atomic_t	vbl_received;
778 	atomic_t	vbl_received2;
779 
780 	drm_vbl_sig_list_t vbl_sig_list;
781 	drm_vbl_sig_list_t vbl_sig_list2;
782 	/*
783 	 * At load time, disabling the vblank interrupt won't be allowed since
784 	 * old clients may not call the modeset ioctl and therefore misbehave.
785 	 * Once the modeset ioctl *has* been called though, we can safely
786 	 * disable them when unused.
787 	 */
788 	int vblank_disable_allowed;
789 
790 	wait_queue_head_t	vbl_queue;	/* vbl wait channel */
791 	/* vbl wait channel array */
792 	wait_queue_head_t	*vbl_queues;
793 
794 	/* number of VBLANK interrupts */
795 	/* (driver must alloc the right number of counters) */
796 	atomic_t	  *_vblank_count;
797 	/* signal list to send on VBLANK */
798 	struct drm_vbl_sig_list *vbl_sigs;
799 
800 	/* number of signals pending on all crtcs */
801 	atomic_t	  vbl_signal_pending;
802 	/* number of users of vblank interrupts per crtc */
803 	atomic_t	  *vblank_refcount;
804 	/* protected by dev->vbl_lock, used for wraparound handling */
805 	u32		  *last_vblank;
806 	/* so we don't call enable more than */
807 	atomic_t	  *vblank_enabled;
808 	/* Display driver is setting mode */
809 	int		*vblank_inmodeset;
810 	/* Don't wait while crtc is likely disabled */
811 	int		*vblank_suspend;
812 	/* size of vblank counter register */
813 	u32		max_vblank_count;
814 	int		num_crtcs;
815 	kmutex_t	tasklet_lock;
816 	void (*locked_tasklet_func)(struct drm_device *dev);
817 
818 	pid_t		  buf_pgid;
819 	drm_agp_head_t    *agp;
820 	drm_sg_mem_t	  *sg;  /* Scatter gather memory */
821 	uint32_t	  *ctx_bitmap;
822 	void		  *dev_private;
823 	unsigned int	  agp_buffer_token;
824 	drm_local_map_t   *agp_buffer_map;
825 
826 	kstat_t		  *asoft_ksp; /* kstat support */
827 
828 	/* name Drawable information */
829 	kmutex_t	drw_lock;
830 	unsigned int drw_bitfield_length;
831 	u32 *drw_bitfield;
832 	unsigned int drw_info_length;
833 	drm_drawable_info_t **drw_info;
834 
835 	/* \name GEM information */
836 	/* @{ */
837 	kmutex_t object_name_lock;
838 	struct idr_list	object_name_idr;
839 	atomic_t object_count;
840 	atomic_t object_memory;
841 	atomic_t pin_count;
842 	atomic_t pin_memory;
843 	atomic_t gtt_count;
844 	atomic_t gtt_memory;
845 	uint32_t gtt_total;
846 	uint32_t invalidate_domains;	/* domains pending invalidation */
847 	uint32_t flush_domains;	/* domains pending flush */
848 	/* @} */
849 
850 	/*
851 	 * Saving S3 context
852 	 */
853 	void		  *s3_private;
854 };
855 
856 /* Memory management support (drm_memory.c) */
857 void	drm_mem_init(void);
858 void	drm_mem_uninit(void);
859 void	*drm_alloc(size_t, int);
860 void	*drm_calloc(size_t, size_t, int);
861 void	*drm_realloc(void *, size_t, size_t, int);
862 void	drm_free(void *, size_t, int);
863 int 	drm_ioremap(drm_device_t *, drm_local_map_t *);
864 void	drm_ioremapfree(drm_local_map_t *);
865 
866 void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
867 void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
868 
869 void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
870 void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
871 
872 struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
873 
874 int	drm_context_switch(drm_device_t *, int, int);
875 int	drm_context_switch_complete(drm_device_t *, int);
876 int	drm_ctxbitmap_init(drm_device_t *);
877 void	drm_ctxbitmap_cleanup(drm_device_t *);
878 void	drm_ctxbitmap_free(drm_device_t *, int);
879 int	drm_ctxbitmap_next(drm_device_t *);
880 
881 /* Locking IOCTL support (drm_lock.c) */
882 int	drm_lock_take(drm_lock_data_t *, unsigned int);
883 int	drm_lock_transfer(drm_device_t *,
884 			drm_lock_data_t *, unsigned int);
885 int	drm_lock_free(drm_device_t *,
886 		    volatile unsigned int *, unsigned int);
887 
888 /* Buffer management support (drm_bufs.c) */
889 unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
890 unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
891 int	drm_initmap(drm_device_t *, unsigned long, unsigned long,
892     unsigned int, int, int);
893 void	drm_rmmap(drm_device_t *, drm_local_map_t *);
894 int	drm_addmap(drm_device_t *, unsigned long, unsigned long,
895     drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
896 int	drm_order(unsigned long);
897 
898 /* DMA support (drm_dma.c) */
899 int	drm_dma_setup(drm_device_t *);
900 void	drm_dma_takedown(drm_device_t *);
901 void	drm_free_buffer(drm_device_t *, drm_buf_t *);
902 void	drm_reclaim_buffers(drm_device_t *, drm_file_t *);
903 #define	drm_core_reclaim_buffers	drm_reclaim_buffers
904 
905 /* IRQ support (drm_irq.c) */
906 int	drm_irq_install(drm_device_t *);
907 int	drm_irq_uninstall(drm_device_t *);
908 uint_t	drm_irq_handler(DRM_IRQ_ARGS);
909 void	drm_driver_irq_preinstall(drm_device_t *);
910 void	drm_driver_irq_postinstall(drm_device_t *);
911 void	drm_driver_irq_uninstall(drm_device_t *);
912 int	drm_vblank_wait(drm_device_t *, unsigned int *);
913 void	drm_vbl_send_signals(drm_device_t *);
914 void    drm_handle_vblank(struct drm_device *dev, int crtc);
915 u32	drm_vblank_count(struct drm_device *dev, int crtc);
916 int	drm_vblank_get(struct drm_device *dev, int crtc);
917 void	drm_vblank_put(struct drm_device *dev, int crtc);
918 int	drm_vblank_init(struct drm_device *dev, int num_crtcs);
919 void	drm_vblank_cleanup(struct drm_device *dev);
920 int    drm_modeset_ctl(DRM_IOCTL_ARGS);
921 
922 /* AGP/GART support (drm_agpsupport.c) */
923 int	drm_device_is_agp(drm_device_t *);
924 int 	drm_device_is_pcie(drm_device_t *);
925 drm_agp_head_t *drm_agp_init(drm_device_t *);
926 void	drm_agp_fini(drm_device_t *);
927 int 	drm_agp_do_release(drm_device_t *);
928 void	*drm_agp_allocate_memory(size_t pages,
929 	    uint32_t type, drm_device_t *dev);
930 int	drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
931 int	drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
932 int	drm_agp_unbind_memory(unsigned long, drm_device_t *);
933 int	drm_agp_bind_pages(drm_device_t *dev,
934 		    pfn_t *pages,
935 		    unsigned long num_pages,
936 		    uint32_t gtt_offset);
937 int	drm_agp_unbind_pages(drm_device_t *dev,
938 		    unsigned long num_pages,
939 		    uint32_t gtt_offset,
940 		    uint32_t type);
941 void drm_agp_chipset_flush(struct drm_device *dev);
942 void drm_agp_rebind(struct drm_device *dev);
943 
944 /* kstat support (drm_kstats.c) */
945 int	drm_init_kstats(drm_device_t *);
946 void	drm_fini_kstats(drm_device_t *);
947 
948 /* Scatter Gather Support (drm_scatter.c) */
949 void	drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
950 
951 /* ATI PCIGART support (ati_pcigart.c) */
952 int	drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
953 int	drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
954 
955 /* Locking IOCTL support (drm_drv.c) */
956 int	drm_lock(DRM_IOCTL_ARGS);
957 int	drm_unlock(DRM_IOCTL_ARGS);
958 int	drm_version(DRM_IOCTL_ARGS);
959 int	drm_setversion(DRM_IOCTL_ARGS);
960 /* Cache management (drm_cache.c) */
961 void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
962 
963 /* Misc. IOCTL support (drm_ioctl.c) */
964 int	drm_irq_by_busid(DRM_IOCTL_ARGS);
965 int	drm_getunique(DRM_IOCTL_ARGS);
966 int	drm_setunique(DRM_IOCTL_ARGS);
967 int	drm_getmap(DRM_IOCTL_ARGS);
968 int	drm_getclient(DRM_IOCTL_ARGS);
969 int	drm_getstats(DRM_IOCTL_ARGS);
970 int	drm_noop(DRM_IOCTL_ARGS);
971 
972 /* Context IOCTL support (drm_context.c) */
973 int	drm_resctx(DRM_IOCTL_ARGS);
974 int	drm_addctx(DRM_IOCTL_ARGS);
975 int	drm_modctx(DRM_IOCTL_ARGS);
976 int	drm_getctx(DRM_IOCTL_ARGS);
977 int	drm_switchctx(DRM_IOCTL_ARGS);
978 int	drm_newctx(DRM_IOCTL_ARGS);
979 int	drm_rmctx(DRM_IOCTL_ARGS);
980 int	drm_setsareactx(DRM_IOCTL_ARGS);
981 int	drm_getsareactx(DRM_IOCTL_ARGS);
982 
983 /* Drawable IOCTL support (drm_drawable.c) */
984 int	drm_adddraw(DRM_IOCTL_ARGS);
985 int	drm_rmdraw(DRM_IOCTL_ARGS);
986 int	drm_update_draw(DRM_IOCTL_ARGS);
987 
988 /* Authentication IOCTL support (drm_auth.c) */
989 int	drm_getmagic(DRM_IOCTL_ARGS);
990 int	drm_authmagic(DRM_IOCTL_ARGS);
991 int	drm_remove_magic(drm_device_t *, drm_magic_t);
992 drm_file_t	*drm_find_file(drm_device_t *, drm_magic_t);
993 /* Buffer management support (drm_bufs.c) */
994 int	drm_addmap_ioctl(DRM_IOCTL_ARGS);
995 int	drm_rmmap_ioctl(DRM_IOCTL_ARGS);
996 int	drm_addbufs_ioctl(DRM_IOCTL_ARGS);
997 int	drm_infobufs(DRM_IOCTL_ARGS);
998 int	drm_markbufs(DRM_IOCTL_ARGS);
999 int	drm_freebufs(DRM_IOCTL_ARGS);
1000 int	drm_mapbufs(DRM_IOCTL_ARGS);
1001 
1002 /* DMA support (drm_dma.c) */
1003 int	drm_dma(DRM_IOCTL_ARGS);
1004 
1005 /* IRQ support (drm_irq.c) */
1006 int	drm_control(DRM_IOCTL_ARGS);
1007 int	drm_wait_vblank(DRM_IOCTL_ARGS);
1008 
1009 /* AGP/GART support (drm_agpsupport.c) */
1010 int	drm_agp_acquire(DRM_IOCTL_ARGS);
1011 int	drm_agp_release(DRM_IOCTL_ARGS);
1012 int	drm_agp_enable(DRM_IOCTL_ARGS);
1013 int	drm_agp_info(DRM_IOCTL_ARGS);
1014 int	drm_agp_alloc(DRM_IOCTL_ARGS);
1015 int	drm_agp_free(DRM_IOCTL_ARGS);
1016 int	drm_agp_unbind(DRM_IOCTL_ARGS);
1017 int	drm_agp_bind(DRM_IOCTL_ARGS);
1018 
1019 /* Scatter Gather Support (drm_scatter.c) */
1020 int	drm_sg_alloc(DRM_IOCTL_ARGS);
1021 int	drm_sg_free(DRM_IOCTL_ARGS);
1022 
1023 /*	drm_mm.c	*/
1024 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
1025 				    unsigned long size, unsigned alignment);
1026 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
1027 				    unsigned long size,
1028 				    unsigned alignment, int best_match);
1029 
1030 extern void drm_mm_clean_ml(const struct drm_mm *mm);
1031 extern int drm_debug_flag;
1032 
1033 /* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
1034 extern void drm_debug(const char *fmt, ...);
1035 extern void drm_error(const char *fmt, ...);
1036 extern void drm_info(const char *fmt, ...);
1037 
1038 #ifdef DEBUG
1039 #define	DRM_DEBUG		if (drm_debug_flag >= 2) drm_debug
1040 #define	DRM_INFO		if (drm_debug_flag >= 1) drm_info
1041 #else
1042 #define	DRM_DEBUG(...)
1043 #define	DRM_INFO(...)
1044 #endif
1045 
1046 #define	DRM_ERROR		drm_error
1047 
1048 
1049 #define	MAX_INSTNUMS 16
1050 
1051 extern int drm_dev_to_instance(dev_t);
1052 extern int drm_dev_to_minor(dev_t);
1053 extern void *drm_supp_register(dev_info_t *, drm_device_t *);
1054 extern int drm_supp_unregister(void *);
1055 
1056 extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
1057 extern int drm_close(drm_device_t *, int, int, int, cred_t *);
1058 extern int drm_attach(drm_device_t *);
1059 extern int drm_detach(drm_device_t *);
1060 extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
1061 
1062 extern int drm_pci_init(drm_device_t *);
1063 extern void drm_pci_end(drm_device_t *);
1064 extern int pci_get_info(drm_device_t *, int *, int *, int *);
1065 extern int pci_get_irq(drm_device_t *);
1066 extern int pci_get_vendor(drm_device_t *);
1067 extern int pci_get_device(drm_device_t *);
1068 
1069 extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
1070 							drm_drawable_t);
1071 /* File Operations helpers (drm_fops.c) */
1072 extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
1073 extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
1074 extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
1075     cred_t *);
1076 
1077 /* Graphics Execution Manager library functions (drm_gem.c) */
1078 int drm_gem_init(struct drm_device *dev);
1079 void drm_gem_object_free(struct drm_gem_object *obj);
1080 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1081 					    size_t size);
1082 void drm_gem_object_handle_free(struct drm_gem_object *obj);
1083 
1084 void drm_gem_object_reference(struct drm_gem_object *obj);
1085 void drm_gem_object_unreference(struct drm_gem_object *obj);
1086 
1087 int drm_gem_handle_create(struct drm_file *file_priv,
1088 			    struct drm_gem_object *obj,
1089 			    int *handlep);
1090 void drm_gem_object_handle_reference(struct drm_gem_object *obj);
1091 
1092 void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
1093 
1094 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
1095 					    int handle);
1096 int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
1097 int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
1098 int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
1099 void drm_gem_open(struct drm_file *file_private);
1100 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1101 
1102 
1103 #endif	/* _DRMP_H */
1104