xref: /titanic_50/usr/src/uts/intel/io/drm/i915_dma.c (revision 4e942d8cd27c7f8bb80549d7c2564445f19ba4a3)
1 /*
2  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /* BEGIN CSTYLED */
7 
8 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
9  */
10 /**************************************************************************
11  *
12  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
13  * All Rights Reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the
17  * "Software"), to deal in the Software without restriction, including
18  * without limitation the rights to use, copy, modify, merge, publish,
19  * distribute, sub license, and/or sell copies of the Software, and to
20  * permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
28  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
30  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
31  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
32  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
33  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34  *
35  **************************************************************************/
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #include "drmP.h"
40 #include "drm.h"
41 #include "i915_drm.h"
42 #include "i915_drv.h"
43 
44 /* Really want an OS-independent resettable timer.  Would like to have
45  * this loop run for (eg) 3 sec, but have the timer reset every time
46  * the head pointer changes, so that EBUSY only happens if the ring
47  * actually stalls for (eg) 3 seconds.
48  */
49 /*ARGSUSED*/
50 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
51 {
52 	drm_i915_private_t *dev_priv = dev->dev_private;
53 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
54 	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
55 	int i;
56 
57 	for (i = 0; i < 10000; i++) {
58 		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
59 		ring->space = ring->head - (ring->tail + 8);
60 		if (ring->space < 0)
61 			ring->space += ring->Size;
62 		if (ring->space >= n)
63 			return 0;
64 
65 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
66 
67 		if (ring->head != last_head)
68 			i = 0;
69 
70 		last_head = ring->head;
71 	}
72 
73 	return DRM_ERR(EBUSY);
74 }
75 
76 void i915_kernel_lost_context(drm_device_t * dev)
77 {
78 	drm_i915_private_t *dev_priv = dev->dev_private;
79 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
80 
81 	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
82 	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
83 	ring->space = ring->head - (ring->tail + 8);
84 	if (ring->space < 0)
85 		ring->space += ring->Size;
86 
87 	if (ring->head == ring->tail)
88 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
89 }
90 
91 static int i915_dma_cleanup(drm_device_t * dev)
92 {
93 	/* Make sure interrupts are disabled here because the uninstall ioctl
94 	 * may not have been called from userspace and after dev_private
95 	 * is freed, it's too late.
96 	 */
97 	if (dev->irq)
98 		(void) drm_irq_uninstall(dev);
99 
100 	if (dev->dev_private) {
101 		drm_i915_private_t *dev_priv =
102 		    (drm_i915_private_t *) dev->dev_private;
103 
104 		if (dev_priv->ring.virtual_start) {
105 			drm_core_ioremapfree(&dev_priv->ring.map, dev);
106 		}
107 
108 #if defined(__SOLARIS__) || defined(sun)
109 		if (dev_priv->hw_status_page) {
110 			drm_pci_free(dev);
111 #else
112 		if (dev_priv->status_page_dmah) {
113 			drm_pci_free(dev, dev_priv->status_page_dmah);
114 #endif
115 			/* Need to rewrite hardware status page */
116 			I915_WRITE(0x02080, 0x1ffff000);
117 		}
118 
119 		drm_free(dev->dev_private, sizeof(drm_i915_private_t),
120 			 DRM_MEM_DRIVER);
121 
122 		dev->dev_private = NULL;
123 	}
124 
125 	return 0;
126 }
127 
128 static int i915_initialize(drm_device_t * dev,
129 			   drm_i915_private_t * dev_priv,
130 			   drm_i915_init_t * init)
131 {
132 	(void) memset(dev_priv, 0, sizeof(drm_i915_private_t));
133 
134 	DRM_GETSAREA();
135 	if (!dev_priv->sarea) {
136 		DRM_ERROR("can not find sarea!\n");
137 		dev->dev_private = (void *)dev_priv;
138 		(void) i915_dma_cleanup(dev);
139 		return DRM_ERR(EINVAL);
140 	}
141 
142 	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
143 	if (!dev_priv->mmio_map) {
144 		dev->dev_private = (void *)dev_priv;
145 		(void) i915_dma_cleanup(dev);
146 		DRM_ERROR("can not find mmio map!\n");
147 		return DRM_ERR(EINVAL);
148 	}
149 
150 	dev_priv->sarea_priv = (drm_i915_sarea_t *)
151 	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
152 
153 	dev_priv->ring.Start = init->ring_start;
154 	dev_priv->ring.End = init->ring_end;
155 	dev_priv->ring.Size = init->ring_size;
156 	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
157 
158 	dev_priv->ring.map.offset.off = (u_offset_t)init->ring_start;
159 	dev_priv->ring.map.size = init->ring_size;
160 	dev_priv->ring.map.type = 0;
161 	dev_priv->ring.map.flags = 0;
162 	dev_priv->ring.map.mtrr = 0;
163 
164 	drm_core_ioremap(&dev_priv->ring.map, dev);
165 
166 	if (dev_priv->ring.map.handle == NULL) {
167 		dev->dev_private = (void *)dev_priv;
168 		(void) i915_dma_cleanup(dev);
169 		DRM_ERROR("can not ioremap virtual address for"
170 			  " ring buffer\n");
171 		return DRM_ERR(ENOMEM);
172 	}
173 
174 	dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr;
175 
176 	dev_priv->back_offset = init->back_offset;
177 	dev_priv->front_offset = init->front_offset;
178 	dev_priv->current_page = 0;
179 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
180 
181 	/* We are using separate values as placeholders for mechanisms for
182 	 * private backbuffer/depthbuffer usage.
183 	 */
184 	dev_priv->use_mi_batchbuffer_start = 0;
185 
186 	/* Allow hardware batchbuffers unless told otherwise.
187 	 */
188 	dev_priv->allow_batchbuffer = 1;
189 
190 	/* Program Hardware Status Page */
191 #if defined(__SOLARIS__) || defined(sun)
192 	dev_priv->hw_status_page =
193 	    drm_pci_alloc(dev, DRM_PAGE_SIZE, &dev_priv->dma_status_page);
194 #else
195 	dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
196 	    0xffffffff);
197 #endif
198 
199 #if defined(__SOLARIS__) || defined(sun)
200 	if (!dev_priv->hw_status_page) {
201 #else
202 	if (!dev_priv->status_page_dmah) {
203 #endif
204 		dev->dev_private = (void *)dev_priv;
205 		(void) i915_dma_cleanup(dev);
206 		DRM_ERROR("Can not allocate hardware status page\n");
207 		return DRM_ERR(ENOMEM);
208 	}
209 
210 #if !defined(__SOLARIS__) && !defined(sun)
211 	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
212 	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
213 #endif
214 	(void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
215 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
216 
217 	I915_WRITE(0x02080, dev_priv->dma_status_page);
218 	DRM_DEBUG("Enabled hardware status page\n");
219 
220 	dev->dev_private = (void *)dev_priv;
221 
222 	return 0;
223 }
224 
225 static int i915_dma_resume(drm_device_t * dev)
226 {
227 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
228 
229 	DRM_DEBUG("%s", "i915_dma_resume");
230 
231 	if (!dev_priv->sarea) {
232 		DRM_ERROR("can not find sarea!\n");
233 		return DRM_ERR(EINVAL);
234 	}
235 
236 	if (!dev_priv->mmio_map) {
237 		DRM_ERROR("can not find mmio map!\n");
238 		return DRM_ERR(EINVAL);
239 	}
240 
241 	if (dev_priv->ring.map.handle == NULL) {
242 		DRM_ERROR("can not ioremap virtual address for"
243 			  " ring buffer\n");
244 		return DRM_ERR(ENOMEM);
245 	}
246 
247 	/* Program Hardware Status Page */
248 	if (!dev_priv->hw_status_page) {
249 		DRM_ERROR("Can not find hardware status page\n");
250 		return DRM_ERR(EINVAL);
251 	}
252 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
253 
254 	I915_WRITE(0x02080, dev_priv->dma_status_page);
255 	DRM_DEBUG("Enabled hardware status page\n");
256 
257 	return 0;
258 }
259 
260 /*ARGSUSED*/
261 static int i915_dma_init(DRM_IOCTL_ARGS)
262 {
263 	DRM_DEVICE;
264 	drm_i915_private_t *dev_priv;
265 	drm_i915_init_t init;
266 	int retcode = 0;
267 
268 	DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
269 				 sizeof(init));
270 
271 	switch (init.func) {
272 	case I915_INIT_DMA:
273 		dev_priv = drm_alloc(sizeof(drm_i915_private_t),
274 				     DRM_MEM_DRIVER);
275 		if (dev_priv == NULL)
276 			return DRM_ERR(ENOMEM);
277 		retcode = i915_initialize(dev, dev_priv, &init);
278 		break;
279 	case I915_CLEANUP_DMA:
280 		retcode = i915_dma_cleanup(dev);
281 		break;
282 	case I915_RESUME_DMA:
283 		retcode = i915_dma_resume(dev);
284 		break;
285 	default:
286 		retcode = -EINVAL;
287 		break;
288 	}
289 
290 	return retcode;
291 }
292 
293 /* Implement basically the same security restrictions as hardware does
294  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
295  *
296  * Most of the calculations below involve calculating the size of a
297  * particular instruction.  It's important to get the size right as
298  * that tells us where the next instruction to check is.  Any illegal
299  * instruction detected will be given a size of zero, which is a
300  * signal to abort the rest of the buffer.
301  */
302 static int do_validate_cmd(int cmd)
303 {
304 	switch (((cmd >> 29) & 0x7)) {
305 	case 0x0:
306 		switch ((cmd >> 23) & 0x3f) {
307 		case 0x0:
308 			return 1;	/* MI_NOOP */
309 		case 0x4:
310 			return 1;	/* MI_FLUSH */
311 		default:
312 			return 0;	/* disallow everything else */
313 		}
314 #ifndef __SUNPRO_C
315 		break;
316 #endif
317 	case 0x1:
318 		return 0;	/* reserved */
319 	case 0x2:
320 		return (cmd & 0xff) + 2;	/* 2d commands */
321 	case 0x3:
322 		if (((cmd >> 24) & 0x1f) <= 0x18)
323 			return 1;
324 
325 		switch ((cmd >> 24) & 0x1f) {
326 		case 0x1c:
327 			return 1;
328 		case 0x1d:
329 			switch ((cmd >> 16) & 0xff) {
330 			case 0x3:
331 				return (cmd & 0x1f) + 2;
332 			case 0x4:
333 				return (cmd & 0xf) + 2;
334 			default:
335 				return (cmd & 0xffff) + 2;
336 			}
337 		case 0x1e:
338 			if (cmd & (1 << 23))
339 				return (cmd & 0xffff) + 1;
340 			else
341 				return 1;
342 		case 0x1f:
343 			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
344 				return (cmd & 0x1ffff) + 2;
345 			else if (cmd & (1 << 17))	/* indirect random */
346 				if ((cmd & 0xffff) == 0)
347 					return 0;	/* unknown length, too hard */
348 				else
349 					return (((cmd & 0xffff) + 1) / 2) + 1;
350 			else
351 				return 2;	/* indirect sequential */
352 		default:
353 			return 0;
354 		}
355 	default:
356 		return 0;
357 	}
358 
359 #ifndef __SUNPRO_C
360 	return 0;
361 #endif
362 }
363 
364 static int validate_cmd(int cmd)
365 {
366 	int ret = do_validate_cmd(cmd);
367 
368 /* 	printk("validate_cmd( %x ): %d\n", cmd, ret); */
369 
370 	return ret;
371 }
372 
373 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords, int mode)
374 {
375 	drm_i915_private_t *dev_priv = dev->dev_private;
376 	int i;
377 	RING_LOCALS;
378 
379 	for (i = 0; i < dwords;) {
380 		int cmd, sz;
381 
382 		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
383 			return DRM_ERR(EINVAL);
384 
385 /* 		printk("%d/%d ", i, dwords); */
386 
387 		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
388 			return DRM_ERR(EINVAL);
389 
390 		BEGIN_LP_RING(sz);
391 		OUT_RING(cmd);
392 
393 		while (++i, --sz) {
394 			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
395 							 sizeof(cmd))) {
396 				return DRM_ERR(EINVAL);
397 			}
398 			OUT_RING(cmd);
399 		}
400 		ADVANCE_LP_RING();
401 	}
402 
403 	return 0;
404 }
405 
406 static int i915_emit_box(drm_device_t * dev,
407 			 drm_clip_rect_t __user * boxes,
408 			 int i, int DR1, int DR4, int mode)
409 {
410 	drm_i915_private_t *dev_priv = dev->dev_private;
411 	drm_clip_rect_t box;
412 	RING_LOCALS;
413 
414 	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
415 		return EFAULT;
416 	}
417 
418 	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
419 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
420 			  box.x1, box.y1, box.x2, box.y2);
421 		return DRM_ERR(EINVAL);
422 	}
423 
424 	BEGIN_LP_RING(6);
425 	OUT_RING(GFX_OP_DRAWRECT_INFO);
426 	OUT_RING(DR1);
427 	OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
428 	OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
429 	OUT_RING(DR4);
430 	OUT_RING(0);
431 	ADVANCE_LP_RING();
432 
433 	return 0;
434 }
435 
436 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
437 				   drm_i915_cmdbuffer_t * cmd, int mode)
438 {
439 	int nbox = cmd->num_cliprects;
440 	int i = 0, count, ret;
441 
442 	if (cmd->sz & 0x3) {
443 		DRM_ERROR("alignment");
444 		return DRM_ERR(EINVAL);
445 	}
446 
447 	i915_kernel_lost_context(dev);
448 
449 	count = nbox ? nbox : 1;
450 
451 	for (i = 0; i < count; i++) {
452 		if (i < nbox) {
453 			ret = i915_emit_box(dev, cmd->cliprects, i,
454 					    cmd->DR1, cmd->DR4, mode);
455 			if (ret)
456 				return ret;
457 		}
458 
459 		ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4, mode);
460 		if (ret)
461 			return ret;
462 	}
463 
464 	return 0;
465 }
466 
467 static int i915_dispatch_batchbuffer(drm_device_t * dev,
468 				     drm_i915_batchbuffer_t * batch, int mode)
469 {
470 	drm_i915_private_t *dev_priv = dev->dev_private;
471 	drm_clip_rect_t __user *boxes = batch->cliprects;
472 	int nbox = batch->num_cliprects;
473 	int i = 0, count;
474 	RING_LOCALS;
475 
476 	if ((batch->start | batch->used) & 0x7) {
477 		DRM_ERROR("alignment");
478 		return DRM_ERR(EINVAL);
479 	}
480 
481 	i915_kernel_lost_context(dev);
482 
483 	count = nbox ? nbox : 1;
484 
485 	for (i = 0; i < count; i++) {
486 		if (i < nbox) {
487 			int ret = i915_emit_box(dev, boxes, i,
488 						batch->DR1, batch->DR4, mode);
489 			if (ret)
490 				return ret;
491 		}
492 
493 		if (dev_priv->use_mi_batchbuffer_start) {
494 			BEGIN_LP_RING(2);
495 			OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
496 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
497 			ADVANCE_LP_RING();
498 		} else {
499 			BEGIN_LP_RING(4);
500 			OUT_RING(MI_BATCH_BUFFER);
501 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
502 			OUT_RING(batch->start + batch->used - 4);
503 			OUT_RING(0);
504 			ADVANCE_LP_RING();
505 		}
506 	}
507 
508 	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
509 
510 	BEGIN_LP_RING(4);
511 	OUT_RING(CMD_STORE_DWORD_IDX);
512 	OUT_RING(20);
513 	OUT_RING(dev_priv->counter);
514 	OUT_RING(0);
515 	ADVANCE_LP_RING();
516 
517 	return 0;
518 }
519 
520 static int i915_dispatch_flip(drm_device_t * dev)
521 {
522 	drm_i915_private_t *dev_priv = dev->dev_private;
523 	RING_LOCALS;
524 
525 	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
526 		  __FUNCTION__,
527 		  dev_priv->current_page,
528 		  dev_priv->sarea_priv->pf_current_page);
529 
530 	i915_kernel_lost_context(dev);
531 
532 	BEGIN_LP_RING(2);
533 	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
534 	OUT_RING(0);
535 	ADVANCE_LP_RING();
536 
537 	BEGIN_LP_RING(6);
538 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
539 	OUT_RING(0);
540 	if (dev_priv->current_page == 0) {
541 		OUT_RING(dev_priv->back_offset);
542 		dev_priv->current_page = 1;
543 	} else {
544 		OUT_RING(dev_priv->front_offset);
545 		dev_priv->current_page = 0;
546 	}
547 	OUT_RING(0);
548 	ADVANCE_LP_RING();
549 
550 	BEGIN_LP_RING(2);
551 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
552 	OUT_RING(0);
553 	ADVANCE_LP_RING();
554 
555 	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
556 
557 	BEGIN_LP_RING(4);
558 	OUT_RING(CMD_STORE_DWORD_IDX);
559 	OUT_RING(20);
560 	OUT_RING(dev_priv->counter);
561 	OUT_RING(0);
562 	ADVANCE_LP_RING();
563 
564 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
565 	return 0;
566 }
567 
568 static int i915_quiescent(drm_device_t * dev)
569 {
570 	drm_i915_private_t *dev_priv = dev->dev_private;
571 
572 	i915_kernel_lost_context(dev);
573 	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
574 }
575 
576 /*ARGSUSED*/
577 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
578 {
579 	DRM_DEVICE;
580 
581 	LOCK_TEST_WITH_RETURN(dev, filp);
582 
583 	return i915_quiescent(dev);
584 }
585 
586 /*ARGSUSED*/
587 static int i915_batchbuffer(DRM_IOCTL_ARGS)
588 {
589 	DRM_DEVICE;
590 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
591 	u32 *hw_status = dev_priv->hw_status_page;
592 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
593 	    dev_priv->sarea_priv;
594 	drm_i915_batchbuffer_t batch;
595 	int ret;
596 
597 	if (!dev_priv->allow_batchbuffer) {
598 		DRM_ERROR("Batchbuffer ioctl disabled\n");
599 		return DRM_ERR(EINVAL);
600 	}
601 
602 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
603 		drm_i915_batchbuffer32_t batchbuffer32_t;
604 
605 		DRM_COPY_FROM_USER_IOCTL(batchbuffer32_t,
606 			(drm_i915_batchbuffer32_t __user *) data,
607 			sizeof (drm_i915_batchbuffer32_t));
608 
609 		batch.start = batchbuffer32_t.start;
610 		batch.used = batchbuffer32_t.used;
611 		batch.DR1 = batchbuffer32_t.DR1;
612 		batch.DR4 = batchbuffer32_t.DR4;
613 		batch.num_cliprects = batchbuffer32_t.num_cliprects;
614 		batch.cliprects = (drm_clip_rect_t __user *)
615 			(uintptr_t)batchbuffer32_t.cliprects;
616 	} else
617 		DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
618 			sizeof(batch));
619 
620 	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
621 		  batch.start, batch.used, batch.num_cliprects);
622 
623 	LOCK_TEST_WITH_RETURN(dev, filp);
624 	/*
625 
626 	if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
627 						       batch.num_cliprects *
628 						       sizeof(drm_clip_rect_t)))
629 		return DRM_ERR(EFAULT);
630 		*/
631 
632 	ret = i915_dispatch_batchbuffer(dev, &batch, mode);
633 
634 	sarea_priv->last_dispatch = (int)hw_status[5];
635 	return ret;
636 }
637 
638 /*ARGSUSED*/
639 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
640 {
641 	DRM_DEVICE;
642 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
643 	u32 *hw_status = dev_priv->hw_status_page;
644 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
645 	    dev_priv->sarea_priv;
646 	drm_i915_cmdbuffer_t cmdbuf;
647 	int ret;
648 
649 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
650 		drm_i915_cmdbuffer32_t cmdbuffer32_t;
651 
652 		DRM_COPY_FROM_USER_IOCTL(cmdbuffer32_t,
653 			(drm_i915_cmdbuffer32_t __user *) data,
654 			sizeof (drm_i915_cmdbuffer32_t));
655 
656 		cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf;
657 		cmdbuf.sz = cmdbuffer32_t.sz;
658 		cmdbuf.DR1 = cmdbuffer32_t.DR1;
659 		cmdbuf.DR4 = cmdbuffer32_t.DR4;
660 		cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects;
661 		cmdbuf.cliprects = (drm_clip_rect_t __user *)
662 			(uintptr_t)cmdbuffer32_t.cliprects;
663 	} else
664 		DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
665 			sizeof(cmdbuf));
666 
667 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
668 		  cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
669 
670 	LOCK_TEST_WITH_RETURN(dev, filp);
671 	/*
672 
673 	if (cmdbuf.num_cliprects &&
674 	    DRM_VERIFYAREA_READ(cmdbuf.cliprects,
675 				cmdbuf.num_cliprects *
676 				sizeof(drm_clip_rect_t))) {
677 		DRM_ERROR("Fault accessing cliprects\n");
678 		return DRM_ERR(EFAULT);
679 	}
680 	*/
681 
682 	ret = i915_dispatch_cmdbuffer(dev, &cmdbuf, mode);
683 	if (ret) {
684 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
685 		return ret;
686 	}
687 
688 	sarea_priv->last_dispatch = (int)hw_status[5];
689 	return 0;
690 }
691 
692 static int i915_do_cleanup_pageflip(drm_device_t * dev)
693 {
694 	drm_i915_private_t *dev_priv = dev->dev_private;
695 
696 	DRM_DEBUG("i915_do_cleanup_pageflip\n");
697 	if (dev_priv->current_page != 0)
698 		(void) i915_dispatch_flip(dev);
699 
700 	return 0;
701 }
702 
703 /*ARGSUSED*/
704 static int i915_flip_bufs(DRM_IOCTL_ARGS)
705 {
706 	DRM_DEVICE;
707 
708 	DRM_DEBUG("%s\n", __FUNCTION__);
709 
710 	LOCK_TEST_WITH_RETURN(dev, filp);
711 
712 	return i915_dispatch_flip(dev);
713 }
714 
715 /*ARGSUSED*/
716 static int i915_getparam(DRM_IOCTL_ARGS)
717 {
718 	DRM_DEVICE;
719 	drm_i915_private_t *dev_priv = dev->dev_private;
720 	drm_i915_getparam_t param;
721 	int value;
722 
723 	if (!dev_priv) {
724 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
725 		return DRM_ERR(EINVAL);
726 	}
727 
728 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
729 		drm_i915_getparam32_t getparam32_t;
730 
731 		DRM_COPY_FROM_USER_IOCTL(getparam32_t,
732 			(drm_i915_getparam32_t __user *) data,
733 			sizeof (drm_i915_getparam32_t));
734 
735 		param.param = getparam32_t.param;
736 		param.value = (int __user *)(uintptr_t)getparam32_t.value;
737 	} else
738 		DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
739 			sizeof(param));
740 
741 	switch (param.param) {
742 	case I915_PARAM_IRQ_ACTIVE:
743 		value = dev->irq ? 1 : 0;
744 		break;
745 	case I915_PARAM_ALLOW_BATCHBUFFER:
746 		value = dev_priv->allow_batchbuffer ? 1 : 0;
747 		break;
748 	default:
749 		DRM_ERROR("Unkown parameter %d\n", param.param);
750 		return DRM_ERR(EINVAL);
751 	}
752 
753 	if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
754 		DRM_ERROR("i915_getparam failed\n");
755 		return DRM_ERR(EFAULT);
756 	}
757 	return 0;
758 }
759 
760 /*ARGSUSED*/
761 static int i915_setparam(DRM_IOCTL_ARGS)
762 {
763 	DRM_DEVICE;
764 	drm_i915_private_t *dev_priv = dev->dev_private;
765 	drm_i915_setparam_t param;
766 
767 	if (!dev_priv) {
768 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
769 		return DRM_ERR(EINVAL);
770 	}
771 
772 	DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
773 				 sizeof(param));
774 
775 	switch (param.param) {
776 	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
777 		dev_priv->use_mi_batchbuffer_start = param.value;
778 		break;
779 	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
780 		dev_priv->tex_lru_log_granularity = param.value;
781 		break;
782 	case I915_SETPARAM_ALLOW_BATCHBUFFER:
783 		dev_priv->allow_batchbuffer = param.value;
784 		break;
785 	default:
786 		DRM_ERROR("unknown parameter %d\n", param.param);
787 		return DRM_ERR(EINVAL);
788 	}
789 
790 	return 0;
791 }
792 
793 /*ARGSUSED*/
794 int i915_driver_load(drm_device_t *dev, unsigned long flags)
795 {
796 	/* i915 has 4 more counters */
797 	dev->counters += 4;
798 	dev->types[6] = _DRM_STAT_IRQ;
799 	dev->types[7] = _DRM_STAT_PRIMARY;
800 	dev->types[8] = _DRM_STAT_SECONDARY;
801 	dev->types[9] = _DRM_STAT_DMA;
802 
803 	return 0;
804 }
805 
806 void i915_driver_lastclose(drm_device_t * dev)
807 {
808 	if (dev->dev_private) {
809 		drm_i915_private_t *dev_priv = dev->dev_private;
810 		i915_mem_takedown(&(dev_priv->agp_heap));
811 	}
812 	(void) i915_dma_cleanup(dev);
813 }
814 
815 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
816 {
817 	if (dev->dev_private) {
818 		drm_i915_private_t *dev_priv = dev->dev_private;
819 		if (dev_priv->page_flipping) {
820 		(void) i915_do_cleanup_pageflip(dev);
821 		}
822 		i915_mem_release(dev, filp, dev_priv->agp_heap);
823 	}
824 }
825 
826 extern drm_ioctl_desc_t i915_ioctls[];
827 
828 void i915_set_ioctl_desc(int n, drm_ioctl_t * func,
829 	    int auth_needed, int root_only, char *desc)
830 {
831 	i915_ioctls[n].func = func;
832 	i915_ioctls[n].auth_needed = auth_needed;
833 	i915_ioctls[n].root_only = root_only;
834 	i915_ioctls[n].desc = desc;
835 }
836 void
837 i915_init_ioctl_arrays(void)
838 {
839 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT),
840 	    i915_dma_init, 1, 1, "i915_dma_init");
841 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLUSH),
842 	    i915_flush_ioctl, 1, 0, "i915_flush_ioctl");
843 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FLIP),
844 	    i915_flip_bufs, 1, 0, "i915_flip_bufs");
845 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_BATCHBUFFER),
846 	    i915_batchbuffer, 1, 0, "i915_batchbuffer");
847 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_EMIT),
848 	    i915_irq_emit, 1, 0, " i915_irq_emit");
849 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_IRQ_WAIT),
850 	    i915_irq_wait, 1, 0, "i915_irq_wait");
851 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_GETPARAM),
852 	    i915_getparam, 1, 0, "i915_getparam");
853 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_SETPARAM),
854 	    i915_setparam, 1, 1, "i915_setparam");
855 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_ALLOC),
856 	    i915_mem_alloc, 1, 0, "i915_mem_alloc");
857 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_FREE),
858 	    i915_mem_free, 1, 0, "i915_mem_free");
859 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_INIT_HEAP),
860 	    i915_mem_init_heap, 1, 1, "i915_mem_init_heap");
861 	i915_set_ioctl_desc(DRM_IOCTL_NR(DRM_I915_CMDBUFFER),
862 	    i915_cmdbuffer, 1, 0, "i915_cmdbuffer");
863 }
864 /**
865  * Determine if the device really is AGP or not.
866  *
867  * All Intel graphics chipsets are treated as AGP, even if they are really
868  * PCI-e.
869  *
870  * \param dev   The device to be tested.
871  *
872  * \returns
873  * A value of 1 is always retured to indictate every i9x5 is AGP.
874  */
875 /*ARGSUSED*/
876 int i915_driver_device_is_agp(drm_device_t * dev)
877 {
878 	return 1;
879 }
880