xref: /titanic_41/usr/src/uts/intel/io/drm/i915_dma.c (revision b509e89b2befbaa42939abad9da1d7f5a8c6aaae)
1 /* BEGIN CSTYLED */
2 
3 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4  */
5 /*
6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  */
30 
31 /*
32  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
33  * Use is subject to license terms.
34  */
35 
36 #include "drmP.h"
37 #include "drm.h"
38 #include "i915_drm.h"
39 #include "i915_drv.h"
40 
41 
42 
43 /* Really want an OS-independent resettable timer.  Would like to have
44  * this loop run for (eg) 3 sec, but have the timer reset every time
45  * the head pointer changes, so that EBUSY only happens if the ring
46  * actually stalls for (eg) 3 seconds.
47  */
48 /*ARGSUSED*/
49 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
50 {
51 	drm_i915_private_t *dev_priv = dev->dev_private;
52 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
53 	u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
54 	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
55 	u32 last_acthd = I915_READ(acthd_reg);
56 	u32 acthd;
57 	int i;
58 
59 	for (i = 0; i < 100000; i++) {
60 		ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
61 		acthd = I915_READ(acthd_reg);
62 		ring->space = ring->head - (ring->tail + 8);
63 		if (ring->space < 0)
64 			ring->space += ring->Size;
65 		if (ring->space >= n)
66 			return 0;
67 
68 		if (ring->head != last_head)
69 			i = 0;
70 
71 		if (acthd != last_acthd)
72 			i = 0;
73 
74 		last_head = ring->head;
75 		last_acthd = acthd;
76 		DRM_UDELAY(10);
77 	}
78 
79 	return (EBUSY);
80 }
81 
82 int i915_init_hardware_status(drm_device_t *dev)
83 {
84        drm_i915_private_t *dev_priv = dev->dev_private;
85        drm_dma_handle_t *dmah;
86 
87        /* Program Hardware Status Page */
88        dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff,1);
89 
90        if (!dmah) {
91                DRM_ERROR("Can not allocate hardware status page\n");
92                return -ENOMEM;
93        }
94 
95        dev_priv->status_page_dmah = dmah;
96        dev_priv->hw_status_page = (void *)dmah->vaddr;
97        dev_priv->dma_status_page = dmah->paddr;
98 
99        (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
100 
101        I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
102        DRM_DEBUG("Enabled hardware status page\n");
103        return 0;
104 }
105 
106 void i915_free_hardware_status(drm_device_t *dev)
107 {
108        drm_i915_private_t *dev_priv = dev->dev_private;
109 	if (!I915_NEED_GFX_HWS(dev)) {
110 		if (dev_priv->status_page_dmah) {
111 			drm_pci_free(dev, dev_priv->status_page_dmah);
112 			dev_priv->status_page_dmah = NULL;
113 			/* Need to rewrite hardware status page */
114 			I915_WRITE(HWS_PGA, 0x1ffff000);
115 		}
116        	} else {
117 		if (dev_priv->status_gfx_addr) {
118 			dev_priv->status_gfx_addr = 0;
119 			drm_core_ioremapfree(&dev_priv->hws_map, dev);
120 			I915_WRITE(HWS_PGA, 0x1ffff000);
121 		}
122 	}
123 }
124 
125 #if I915_RING_VALIDATE
126 /**
127  * Validate the cached ring tail value
128  *
129  * If the X server writes to the ring and DRM doesn't
130  * reload the head and tail pointers, it will end up writing
131  * data to the wrong place in the ring, causing havoc.
132  */
133 void i915_ring_validate(struct drm_device *dev, const char *func, int line)
134 {
135        drm_i915_private_t *dev_priv = dev->dev_private;
136        drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
137        u32     tail = I915_READ(PRB0_TAIL) & HEAD_ADDR;
138        u32     head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
139 
140        if (tail != ring->tail) {
141                DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
142                          func, line,
143                          ring->head, head, ring->tail, tail);
144        }
145 }
146 #endif
147 
148 void i915_kernel_lost_context(drm_device_t * dev)
149 {
150 	drm_i915_private_t *dev_priv = dev->dev_private;
151 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
152 
153        ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
154        ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
155 	ring->space = ring->head - (ring->tail + 8);
156 	if (ring->space < 0)
157 		ring->space += ring->Size;
158 
159 }
160 
161 static int i915_dma_cleanup(drm_device_t * dev)
162 {
163 	drm_i915_private_t *dev_priv =
164 		    (drm_i915_private_t *) dev->dev_private;
165 
166 	/* Make sure interrupts are disabled here because the uninstall ioctl
167 	 * may not have been called from userspace and after dev_private
168 	 * is freed, it's too late.
169 	 */
170 	if (dev->irq_enabled)
171 		(void) drm_irq_uninstall(dev);
172 
173 	if (dev_priv->ring.virtual_start) {
174 		drm_core_ioremapfree(&dev_priv->ring.map, dev);
175 		dev_priv->ring.virtual_start = 0;
176 		dev_priv->ring.map.handle = 0;
177 		dev_priv->ring.map.size = 0;
178 	}
179 
180 	i915_free_hardware_status(dev);
181 
182 	dev_priv->sarea = NULL;
183 	dev_priv->sarea_priv = NULL;
184 	dev_priv->mmio_map = NULL;
185 
186 	return 0;
187 }
188 
189 static int i915_initialize(drm_device_t * dev,
190 			   drm_i915_init_t * init)
191 {
192 	drm_i915_private_t *dev_priv =
193 	    (drm_i915_private_t *)dev->dev_private;
194 
195 	DRM_GETSAREA();
196 	if (!dev_priv->sarea) {
197 		DRM_ERROR("can not find sarea!\n");
198 		dev->dev_private = (void *)dev_priv;
199 		(void) i915_dma_cleanup(dev);
200 		return (EINVAL);
201 	}
202 
203 	/*
204 	 * mmio_map will be destoried after DMA clean up.  We should not
205 	 * access mmio_map in suspend or resume process.
206 	 */
207 
208  	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
209 
210 	 if (!dev_priv->mmio_map) {
211 			dev->dev_private = (void *)dev_priv;
212 			(void) i915_dma_cleanup(dev);
213 			DRM_ERROR("can not find mmio map!\n");
214 			return (EINVAL);
215 	 }
216 
217        if (init->sarea_priv_offset)
218                dev_priv->sarea_priv = (drm_i915_sarea_t *)
219                        ((unsigned long) dev_priv->sarea->handle +
220                         init->sarea_priv_offset);
221        else {
222                /* No sarea_priv for you! */
223                dev_priv->sarea_priv = NULL;
224         }
225 
226 	if (init->ring_size != 0) {
227 		dev_priv->ring.Size = init->ring_size;
228 		dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
229 
230 		dev_priv->ring.map.offset = (u_offset_t)init->ring_start;
231 		dev_priv->ring.map.size = init->ring_size;
232 		dev_priv->ring.map.type = 0;
233 		dev_priv->ring.map.flags = 0;
234 		dev_priv->ring.map.mtrr = 0;
235 
236 		drm_core_ioremap(&dev_priv->ring.map, dev);
237 
238 		if (dev_priv->ring.map.handle == NULL) {
239 			(void) i915_dma_cleanup(dev);
240 			DRM_ERROR("can not ioremap virtual address for"
241 			  " ring buffer\n");
242 			return (ENOMEM);
243 		}
244 
245 		dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr;
246 	}
247 
248 	dev_priv->cpp = init->cpp;
249 
250 	if (dev_priv->sarea_priv)
251 		dev_priv->sarea_priv->pf_current_page = 0;
252 
253 	/* We are using separate values as placeholders for mechanisms for
254 	 * private backbuffer/depthbuffer usage.
255 	 */
256 
257 	/* Allow hardware batchbuffers unless told otherwise.
258 	 */
259 	dev_priv->allow_batchbuffer = 1;
260 
261 	/* Init HWS */
262 	if (!I915_NEED_GFX_HWS(dev)) {
263 		(void) i915_init_hardware_status(dev);
264 	}
265 
266 	/* Enable vblank on pipe A for older X servers
267 	*/
268 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
269 
270 #ifdef I915_HAVE_BUFFER
271 	drm_bo_driver_init(dev);
272 #endif
273 	return 0;
274 }
275 
276 static int i915_dma_resume(drm_device_t * dev)
277 {
278 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
279 
280 	DRM_DEBUG("i915_dma_resume\n");
281 
282 	if (!dev_priv->sarea) {
283 		DRM_ERROR("can not find sarea!\n");
284 		return (EINVAL);
285 	}
286 
287 	if (dev_priv->ring.map.handle == NULL) {
288 		DRM_ERROR("can not ioremap virtual address for"
289 			  " ring buffer\n");
290 		return (ENOMEM);
291 	}
292 
293 	/* Program Hardware Status Page */
294 	if (!dev_priv->hw_status_page) {
295 		DRM_ERROR("Can not find hardware status page\n");
296 		return (EINVAL);
297 	}
298 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
299 
300 	if (!I915_NEED_GFX_HWS(dev))
301 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
302 	else
303 		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
304 	DRM_DEBUG("Enabled hardware status page\n");
305 
306 	return 0;
307 }
308 
309 /*ARGSUSED*/
310 static int i915_dma_init(DRM_IOCTL_ARGS)
311 {
312 	DRM_DEVICE;
313 	drm_i915_init_t init;
314 	int retcode = 0;
315 
316 	DRM_COPYFROM_WITH_RETURN(&init, (drm_i915_init_t *)data, sizeof(init));
317 
318 	switch (init.func) {
319 	case I915_INIT_DMA:
320 		retcode = i915_initialize(dev, &init);
321 		break;
322 	case I915_CLEANUP_DMA:
323 		retcode = i915_dma_cleanup(dev);
324 		break;
325 	case I915_RESUME_DMA:
326 		retcode = i915_dma_resume(dev);
327 		break;
328 	default:
329 		retcode = EINVAL;
330 		break;
331 	}
332 
333 	return retcode;
334 }
335 
336 /* Implement basically the same security restrictions as hardware does
337  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
338  *
339  * Most of the calculations below involve calculating the size of a
340  * particular instruction.  It's important to get the size right as
341  * that tells us where the next instruction to check is.  Any illegal
342  * instruction detected will be given a size of zero, which is a
343  * signal to abort the rest of the buffer.
344  */
345 static int do_validate_cmd(int cmd)
346 {
347 	switch (((cmd >> 29) & 0x7)) {
348 	case 0x0:
349 		switch ((cmd >> 23) & 0x3f) {
350 		case 0x0:
351 			return 1;	/* MI_NOOP */
352 		case 0x4:
353 			return 1;	/* MI_FLUSH */
354 		default:
355 			return 0;	/* disallow everything else */
356 		}
357 #ifndef __SUNPRO_C
358 		break;
359 #endif
360 	case 0x1:
361 		return 0;	/* reserved */
362 	case 0x2:
363 		return (cmd & 0xff) + 2;	/* 2d commands */
364 	case 0x3:
365 		if (((cmd >> 24) & 0x1f) <= 0x18)
366 			return 1;
367 
368 		switch ((cmd >> 24) & 0x1f) {
369 		case 0x1c:
370 			return 1;
371 		case 0x1d:
372 			switch ((cmd >> 16) & 0xff) {
373 			case 0x3:
374 				return (cmd & 0x1f) + 2;
375 			case 0x4:
376 				return (cmd & 0xf) + 2;
377 			default:
378 				return (cmd & 0xffff) + 2;
379 			}
380 		case 0x1e:
381 			if (cmd & (1 << 23))
382 				return (cmd & 0xffff) + 1;
383 			else
384 				return 1;
385 		case 0x1f:
386 			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
387 				return (cmd & 0x1ffff) + 2;
388 			else if (cmd & (1 << 17))	/* indirect random */
389 				if ((cmd & 0xffff) == 0)
390 					return 0;	/* unknown length, too hard */
391 				else
392 					return (((cmd & 0xffff) + 1) / 2) + 1;
393 			else
394 				return 2;	/* indirect sequential */
395 		default:
396 			return 0;
397 		}
398 	default:
399 		return 0;
400 	}
401 
402 #ifndef __SUNPRO_C
403 	return 0;
404 #endif
405 }
406 
407 static int validate_cmd(int cmd)
408 {
409 	int ret = do_validate_cmd(cmd);
410 
411 /* 	printk("validate_cmd( %x ): %d\n", cmd, ret); */
412 
413 	return ret;
414 }
415 
416 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
417 {
418 	drm_i915_private_t *dev_priv = dev->dev_private;
419 	int i;
420 	RING_LOCALS;
421 
422 	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
423 		return (EINVAL);
424 
425 	BEGIN_LP_RING((dwords+1)&~1);
426 
427 	for (i = 0; i < dwords;) {
428 		int cmd, sz;
429 
430 		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
431 			return (EINVAL);
432 
433 
434 		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
435 			return (EINVAL);
436 
437 		OUT_RING(cmd);
438 
439 		while (++i, --sz) {
440 			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
441 							 sizeof(cmd))) {
442 				return (EINVAL);
443 			}
444 			OUT_RING(cmd);
445 		}
446 	}
447 
448 	if (dwords & 1)
449 		OUT_RING(0);
450 
451 	ADVANCE_LP_RING();
452 
453 	return 0;
454 }
455 
456 int i915_emit_box(drm_device_t * dev,
457 			 drm_clip_rect_t __user * boxes,
458 			 int i, int DR1, int DR4)
459 {
460 	drm_i915_private_t *dev_priv = dev->dev_private;
461 	drm_clip_rect_t box;
462 	RING_LOCALS;
463 
464 	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
465 		return (EFAULT);
466 	}
467 
468 	if (box.y2 <= box.y1 || box.x2 <= box.x1) {
469 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
470 			  box.x1, box.y1, box.x2, box.y2);
471 		return (EINVAL);
472 	}
473 
474 	if (IS_I965G(dev)) {
475 		BEGIN_LP_RING(4);
476 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
477 		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
478 		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
479 		OUT_RING(DR4);
480 		ADVANCE_LP_RING();
481 	} else {
482 		BEGIN_LP_RING(6);
483 		OUT_RING(GFX_OP_DRAWRECT_INFO);
484 		OUT_RING(DR1);
485 		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
486 		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
487 		OUT_RING(DR4);
488 		OUT_RING(0);
489 		ADVANCE_LP_RING();
490 	}
491 
492 	return 0;
493 }
494 
495 /* XXX: Emitting the counter should really be moved to part of the IRQ
496  * emit.  For now, do it in both places:
497  */
498 
499 void i915_emit_breadcrumb(drm_device_t *dev)
500 {
501 	drm_i915_private_t *dev_priv = dev->dev_private;
502 	RING_LOCALS;
503 
504 	if (++dev_priv->counter > BREADCRUMB_MASK) {
505 		 dev_priv->counter = 1;
506 		 DRM_DEBUG("Breadcrumb counter wrapped around\n");
507 	}
508 
509 	if (dev_priv->sarea_priv)
510 		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
511 
512 
513 	BEGIN_LP_RING(4);
514 	OUT_RING(MI_STORE_DWORD_INDEX);
515 	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
516 	OUT_RING(dev_priv->counter);
517 	OUT_RING(0);
518 	ADVANCE_LP_RING();
519 
520 }
521 
522 
523 void i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
524 {
525 	drm_i915_private_t *dev_priv = dev->dev_private;
526 	uint32_t flush_cmd = MI_FLUSH;
527 	RING_LOCALS;
528 
529 	flush_cmd |= flush;
530 
531 	i915_kernel_lost_context(dev);
532 
533 	BEGIN_LP_RING(4);
534 	OUT_RING(flush_cmd);
535 	OUT_RING(0);
536 	OUT_RING(0);
537 	OUT_RING(0);
538 	ADVANCE_LP_RING();
539 }
540 
541 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
542 				   drm_i915_cmdbuffer_t * cmd)
543 {
544 #ifdef I915_HAVE_FENCE
545 	drm_i915_private_t *dev_priv = dev->dev_private;
546 #endif
547 	int nbox = cmd->num_cliprects;
548 	int i = 0, count, ret;
549 
550 	if (cmd->sz & 0x3) {
551 		DRM_ERROR("alignment");
552 		return (EINVAL);
553 	}
554 
555 	i915_kernel_lost_context(dev);
556 
557 	count = nbox ? nbox : 1;
558 
559 	for (i = 0; i < count; i++) {
560 		if (i < nbox) {
561 			ret = i915_emit_box(dev, cmd->cliprects, i,
562 					    cmd->DR1, cmd->DR4);
563 			if (ret)
564 				return ret;
565 		}
566 
567 		ret = i915_emit_cmds(dev, (int __user *)(void *)cmd->buf, cmd->sz / 4);
568 		if (ret)
569 			return ret;
570 	}
571 
572 	i915_emit_breadcrumb( dev );
573 #ifdef I915_HAVE_FENCE
574 	if (unlikely((dev_priv->counter & 0xFF) == 0))
575 		drm_fence_flush_old(dev, 0, dev_priv->counter);
576 #endif
577 	return 0;
578 }
579 
580 static int i915_dispatch_batchbuffer(drm_device_t * dev,
581 				     drm_i915_batchbuffer_t * batch)
582 {
583 	drm_i915_private_t *dev_priv = dev->dev_private;
584 	drm_clip_rect_t __user *boxes = batch->cliprects;
585 	int nbox = batch->num_cliprects;
586 	int i = 0, count;
587 	RING_LOCALS;
588 
589 	if ((batch->start | batch->used) & 0x7) {
590 		DRM_ERROR("alignment");
591 		return (EINVAL);
592 	}
593 
594 	i915_kernel_lost_context(dev);
595 
596 	count = nbox ? nbox : 1;
597 
598 	for (i = 0; i < count; i++) {
599 		if (i < nbox) {
600 			int ret = i915_emit_box(dev, boxes, i,
601 						batch->DR1, batch->DR4);
602 			if (ret)
603 				return ret;
604 		}
605 
606 		if (IS_I830(dev) || IS_845G(dev)) {
607 			BEGIN_LP_RING(4);
608 			OUT_RING(MI_BATCH_BUFFER);
609 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
610 			OUT_RING(batch->start + batch->used - 4);
611 			OUT_RING(0);
612 			ADVANCE_LP_RING();
613 		} else {
614 			BEGIN_LP_RING(2);
615 			if (IS_I965G(dev)) {
616 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
617 				OUT_RING(batch->start);
618 			} else {
619 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
620 				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
621 			}
622 			ADVANCE_LP_RING();
623 		}
624 	}
625 
626 	i915_emit_breadcrumb( dev );
627 
628 	return 0;
629 }
630 
631 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
632 {
633 	drm_i915_private_t *dev_priv = dev->dev_private;
634 	u32 num_pages, current_page, next_page, dspbase;
635 	int shift = 2 * plane, x, y;
636 	RING_LOCALS;
637 
638 	/* Calculate display base offset */
639 	num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
640 	current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
641 	next_page = (current_page + 1) % num_pages;
642 
643 	switch (next_page) {
644 	default:
645 	case 0:
646 		dspbase = dev_priv->sarea_priv->front_offset;
647 		break;
648 	case 1:
649 		dspbase = dev_priv->sarea_priv->back_offset;
650 		break;
651 	case 2:
652 		dspbase = dev_priv->sarea_priv->third_offset;
653 		break;
654 	}
655 
656 	if (plane == 0) {
657 		x = dev_priv->sarea_priv->planeA_x;
658 		y = dev_priv->sarea_priv->planeA_y;
659 	} else {
660 		x = dev_priv->sarea_priv->planeB_x;
661 		y = dev_priv->sarea_priv->planeB_y;
662 	}
663 
664 	dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
665 
666 
667 	DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page, dspbase);
668 
669 	BEGIN_LP_RING(4);
670 	OUT_RING(sync ? 0 :
671 		(MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
672 					MI_WAIT_FOR_PLANE_A_FLIP)));
673 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
674 		(plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
675 	OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
676 	OUT_RING(dspbase);
677 	ADVANCE_LP_RING();
678 
679 	dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
680 	dev_priv->sarea_priv->pf_current_page |= next_page << shift;
681 }
682 
683 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
684 {
685 	drm_i915_private_t *dev_priv = dev->dev_private;
686 	int i;
687 
688 	DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
689 		planes, dev_priv->sarea_priv->pf_current_page);
690 
691 	i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
692 
693 	for (i = 0; i < 2; i++)
694 		if (planes & (1 << i))
695 			i915_do_dispatch_flip(dev, i, sync);
696 
697 	i915_emit_breadcrumb(dev);
698 #ifdef I915_HAVE_FENCE
699 	if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
700 		drm_fence_flush_old(dev, 0, dev_priv->counter);
701 #endif
702 
703 }
704 
705 static int i915_quiescent(drm_device_t * dev)
706 {
707 	drm_i915_private_t *dev_priv = dev->dev_private;
708 	int ret;
709 	i915_kernel_lost_context(dev);
710 	ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
711 
712 	if (ret)
713 	{
714 		i915_kernel_lost_context (dev);
715 		DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
716 			   dev_priv->ring.head,
717 			   dev_priv->ring.tail,
718 			   dev_priv->ring.space);
719 	}
720 	return ret;
721 }
722 
723 /*ARGSUSED*/
724 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
725 {
726 	DRM_DEVICE;
727 
728 	LOCK_TEST_WITH_RETURN(dev, fpriv);
729 
730 	return i915_quiescent(dev);
731 }
732 
733 /*ARGSUSED*/
734 static int i915_batchbuffer(DRM_IOCTL_ARGS)
735 {
736 	DRM_DEVICE;
737 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
738 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
739 	    dev_priv->sarea_priv;
740 	drm_i915_batchbuffer_t batch;
741 	int ret;
742 
743 	if (!dev_priv->allow_batchbuffer) {
744 		DRM_ERROR("Batchbuffer ioctl disabled\n");
745 		return (EINVAL);
746 	}
747 
748 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
749 		drm_i915_batchbuffer32_t batchbuffer32_t;
750 
751 		DRM_COPYFROM_WITH_RETURN(&batchbuffer32_t,
752 			(void *) data, sizeof (batchbuffer32_t));
753 
754 		batch.start = batchbuffer32_t.start;
755 		batch.used = batchbuffer32_t.used;
756 		batch.DR1 = batchbuffer32_t.DR1;
757 		batch.DR4 = batchbuffer32_t.DR4;
758 		batch.num_cliprects = batchbuffer32_t.num_cliprects;
759 		batch.cliprects = (drm_clip_rect_t __user *)
760 			(uintptr_t)batchbuffer32_t.cliprects;
761 	} else
762 		DRM_COPYFROM_WITH_RETURN(&batch, (void *) data,
763 			sizeof(batch));
764 
765 
766 	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d, counter %d\n",
767 		  batch.start, batch.used, batch.num_cliprects, dev_priv->counter);
768 
769 	LOCK_TEST_WITH_RETURN(dev, fpriv);
770 
771 /*
772 	if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
773 						       batch.num_cliprects *
774 						       sizeof(drm_clip_rect_t)))
775 		return (EFAULT);
776 */
777 
778 
779 	ret = i915_dispatch_batchbuffer(dev, &batch);
780 	sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
781 
782 	return ret;
783 }
784 
785 /*ARGSUSED*/
786 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
787 {
788 	DRM_DEVICE;
789 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
790 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
791 	    dev_priv->sarea_priv;
792 	drm_i915_cmdbuffer_t cmdbuf;
793 	int ret;
794 
795 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
796 		drm_i915_cmdbuffer32_t cmdbuffer32_t;
797 
798 		DRM_COPYFROM_WITH_RETURN(&cmdbuffer32_t,
799 			(drm_i915_cmdbuffer32_t __user *) data,
800 			sizeof (drm_i915_cmdbuffer32_t));
801 
802 		cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf;
803 		cmdbuf.sz = cmdbuffer32_t.sz;
804 		cmdbuf.DR1 = cmdbuffer32_t.DR1;
805 		cmdbuf.DR4 = cmdbuffer32_t.DR4;
806 		cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects;
807 		cmdbuf.cliprects = (drm_clip_rect_t __user *)
808 			(uintptr_t)cmdbuffer32_t.cliprects;
809 	} else
810 		DRM_COPYFROM_WITH_RETURN(&cmdbuf, (void *) data,
811 			sizeof(cmdbuf));
812 
813 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
814 		  cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
815 
816 	LOCK_TEST_WITH_RETURN(dev, fpriv);
817 
818 /*
819 	if (cmdbuf.num_cliprects &&
820 	    DRM_VERIFYAREA_READ(cmdbuf.cliprects,
821 				cmdbuf.num_cliprects *
822 				sizeof(drm_clip_rect_t))) {
823 		DRM_ERROR("Fault accessing cliprects\n");
824 		return (EFAULT);
825 	}
826 */
827 
828 	ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
829 	if (ret) {
830 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
831 		return ret;
832 	}
833 
834 	sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
835 	return 0;
836 }
837 
838 static void i915_do_cleanup_pageflip(drm_device_t * dev)
839 {
840 	drm_i915_private_t *dev_priv = dev->dev_private;
841 	int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
842 
843 	DRM_DEBUG("i915_do_cleanup_pageflip\n");
844 
845 	for (i = 0, planes = 0; i < 2; i++)
846 		if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
847 			dev_priv->sarea_priv->pf_current_page =
848 				(dev_priv->sarea_priv->pf_current_page &
849 				 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
850 
851 			planes |= 1 << i;
852 		}
853 
854 	if (planes)
855 		i915_dispatch_flip(dev, planes, 0);
856 
857 }
858 
859 /*ARGSUSED*/
860 static int i915_flip_bufs(DRM_IOCTL_ARGS)
861 {
862 	DRM_DEVICE;
863 	drm_i915_flip_t param;
864         DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_flip_t *) data,
865                                  sizeof(param));
866 
867 	DRM_DEBUG("i915_flip_bufs\n");
868 
869 	LOCK_TEST_WITH_RETURN(dev, fpriv);
870 	/* This is really planes */
871 	if (param.pipes & ~0x3) {
872 		DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
873 			  param.pipes);
874 		return -EINVAL;
875 	}
876 	i915_dispatch_flip(dev, param.pipes, 0);
877 	return 0;
878 }
879 
880 /*ARGSUSED*/
881 static int i915_getparam(DRM_IOCTL_ARGS)
882 {
883 	DRM_DEVICE;
884 	drm_i915_private_t *dev_priv = dev->dev_private;
885 	drm_i915_getparam_t param;
886 	int value;
887 
888 	if (!dev_priv) {
889 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
890 		return (EINVAL);
891 	}
892 
893 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
894 		drm_i915_getparam32_t getparam32_t;
895 
896 		DRM_COPYFROM_WITH_RETURN(&getparam32_t,
897 			(drm_i915_getparam32_t __user *) data,
898 			sizeof (drm_i915_getparam32_t));
899 
900 		param.param = getparam32_t.param;
901 		param.value = (int __user *)(uintptr_t)getparam32_t.value;
902 	} else
903 		DRM_COPYFROM_WITH_RETURN(&param,
904 		    (drm_i915_getparam_t *) data, sizeof(param));
905 
906 	switch (param.param) {
907 	case I915_PARAM_IRQ_ACTIVE:
908 		value = dev->irq_enabled ? 1 : 0;
909 		break;
910 	case I915_PARAM_ALLOW_BATCHBUFFER:
911 		value = dev_priv->allow_batchbuffer ? 1 : 0;
912 		break;
913 	case I915_PARAM_LAST_DISPATCH:
914 		value = READ_BREADCRUMB(dev_priv);
915 		break;
916 	case I915_PARAM_CHIPSET_ID:
917 		value = dev->pci_device;
918 		break;
919 	default:
920 		DRM_ERROR("Unknown parameter %d\n", param.param);
921 		return (EINVAL);
922 	}
923 
924 	if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
925 		DRM_ERROR("i915_getparam failed\n");
926 		return (EFAULT);
927 	}
928 	return 0;
929 }
930 
931 /*ARGSUSED*/
932 static int i915_setparam(DRM_IOCTL_ARGS)
933 {
934 	DRM_DEVICE;
935 	drm_i915_private_t *dev_priv = dev->dev_private;
936 	drm_i915_setparam_t param;
937 
938 	if (!dev_priv) {
939 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
940 		return (EINVAL);
941 	}
942 
943 	DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_setparam_t *) data,
944 				 sizeof(param));
945 
946 	switch (param.param) {
947 	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
948 		break;
949 	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
950 		dev_priv->tex_lru_log_granularity = param.value;
951 		break;
952 	case I915_SETPARAM_ALLOW_BATCHBUFFER:
953 		dev_priv->allow_batchbuffer = param.value;
954 		break;
955 	default:
956 		DRM_ERROR("unknown parameter %d\n", param.param);
957 		return (EINVAL);
958 	}
959 
960 	return 0;
961 }
962 
963 /*ARGSUSED*/
964 static int i915_set_status_page(DRM_IOCTL_ARGS)
965 {
966 	DRM_DEVICE;
967 	drm_i915_private_t *dev_priv = dev->dev_private;
968 	drm_i915_hws_addr_t hws;
969 
970 	if (!I915_NEED_GFX_HWS(dev))
971 		return (EINVAL);
972 
973 	if (!dev_priv) {
974 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
975 		return (EINVAL);
976 	}
977 	DRM_COPYFROM_WITH_RETURN(&hws, (drm_i915_hws_addr_t __user *) data,
978 			sizeof(hws));
979 	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws.addr);
980 
981 	dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
982 	DRM_DEBUG("set gfx_addr 0x%08x\n", dev_priv->status_gfx_addr);
983 
984 	dev_priv->hws_map.offset =
985 	    (u_offset_t)dev->agp->agp_info.agpi_aperbase + hws.addr;
986 	dev_priv->hws_map.size = 4 * 1024; /* 4K pages */
987 	dev_priv->hws_map.type = 0;
988 	dev_priv->hws_map.flags = 0;
989 	dev_priv->hws_map.mtrr = 0;
990 
991 	DRM_DEBUG("set status page: i915_set_status_page: mapoffset 0x%llx\n",
992 	    dev_priv->hws_map.offset);
993 	drm_core_ioremap(&dev_priv->hws_map, dev);
994 	if (dev_priv->hws_map.handle == NULL) {
995 		dev->dev_private = (void *)dev_priv;
996 		(void) i915_dma_cleanup(dev);
997 		dev_priv->status_gfx_addr = 0;
998 		DRM_ERROR("can not ioremap virtual address for"
999 				" G33 hw status page\n");
1000 		return (ENOMEM);
1001 	}
1002 	dev_priv->hw_status_page = dev_priv->hws_map.dev_addr;
1003 
1004 	(void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1005 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1006 	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
1007 			dev_priv->status_gfx_addr);
1008 	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1009 	return 0;
1010 }
1011 
1012 /*ARGSUSED*/
1013 int i915_driver_load(drm_device_t *dev, unsigned long flags)
1014 {
1015 	struct drm_i915_private *dev_priv;
1016 	int ret = 0;
1017 
1018 	/* i915 has 4 more counters */
1019 	dev->counters += 4;
1020 	dev->types[6] = _DRM_STAT_IRQ;
1021 	dev->types[7] = _DRM_STAT_PRIMARY;
1022 	dev->types[8] = _DRM_STAT_SECONDARY;
1023 	dev->types[9] = _DRM_STAT_DMA;
1024 
1025 	dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
1026 	if (dev_priv == NULL)
1027 		return ENOMEM;
1028 
1029 	(void) memset(dev_priv, 0, sizeof(drm_i915_private_t));
1030 	dev->dev_private = (void *)dev_priv;
1031 	dev_priv->dev = dev;
1032 
1033 	mutex_init(&dev_priv->swaps_lock, "swap", MUTEX_DRIVER, NULL);
1034 	mutex_init(&dev_priv->user_irq_lock, "userirq", MUTEX_DRIVER, NULL);
1035 
1036 	ret = drm_vblank_init(dev, I915_NUM_PIPE);
1037 	if (ret) {
1038 		(void) i915_driver_unload(dev);
1039 		return ret;
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 int i915_driver_unload(struct drm_device *dev)
1046 {
1047        drm_i915_private_t *dev_priv = dev->dev_private;
1048 
1049        i915_free_hardware_status(dev);
1050 
1051 	DRM_FINI_WAITQUEUE(&dev_priv->irq_queue);
1052         mutex_destroy(&dev_priv->swaps_lock);
1053         mutex_destroy(&dev_priv->user_irq_lock);
1054 
1055 	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1056 	    DRM_MEM_DRIVER);
1057 	dev->dev_private = NULL;
1058 
1059 	return 0;
1060 }
1061 
1062 
1063 void i915_driver_lastclose(drm_device_t * dev)
1064 {
1065 	drm_i915_private_t *dev_priv = dev->dev_private;
1066 
1067 	/* agp off can use this to get called before dev_priv */
1068 	if (!dev_priv)
1069 		return;
1070 
1071 #ifdef I915_HAVE_BUFFER
1072 	if (dev_priv->val_bufs) {
1073 		vfree(dev_priv->val_bufs);
1074 		dev_priv->val_bufs = NULL;
1075 	}
1076 #endif
1077 
1078 
1079 	DRM_GETSAREA();
1080 	if (dev_priv->sarea_priv)
1081 		i915_do_cleanup_pageflip(dev);
1082 	if (dev_priv->agp_heap)
1083 		i915_mem_takedown(&(dev_priv->agp_heap));
1084 #if defined(I915_HAVE_BUFFER)
1085 	if (dev_priv->sarea_kmap.virtual) {
1086 		drm_bo_kunmap(&dev_priv->sarea_kmap);
1087 		dev_priv->sarea_kmap.virtual = NULL;
1088 		dev->lock.hw_lock = NULL;
1089 		dev->sigdata.lock = NULL;
1090 	}
1091 
1092 	if (dev_priv->sarea_bo) {
1093 		mutex_lock(&dev->struct_mutex);
1094 		drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
1095 		mutex_unlock(&dev->struct_mutex);
1096 		dev_priv->sarea_bo = NULL;
1097 	}
1098 #endif
1099 	(void) i915_dma_cleanup(dev);
1100 }
1101 
1102 void i915_driver_preclose(drm_device_t * dev, drm_file_t *fpriv)
1103 {
1104 	if (dev->dev_private) {
1105 		drm_i915_private_t *dev_priv = dev->dev_private;
1106 		i915_mem_release(dev, fpriv, dev_priv->agp_heap);
1107 	}
1108 }
1109 
1110 drm_ioctl_desc_t i915_ioctls[] = {
1111 	[DRM_IOCTL_NR(DRM_I915_INIT)] =
1112 	    {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1113 	[DRM_IOCTL_NR(DRM_I915_FLUSH)] =
1114 	    {i915_flush_ioctl, DRM_AUTH},
1115 	[DRM_IOCTL_NR(DRM_I915_FLIP)] =
1116 	    {i915_flip_bufs, DRM_AUTH},
1117 	[DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] =
1118 	    {i915_batchbuffer, DRM_AUTH},
1119 	[DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] =
1120 	    {i915_irq_emit, DRM_AUTH},
1121 	[DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] =
1122 	    {i915_irq_wait, DRM_AUTH},
1123 	[DRM_IOCTL_NR(DRM_I915_GETPARAM)] =
1124 	    {i915_getparam, DRM_AUTH},
1125 	[DRM_IOCTL_NR(DRM_I915_SETPARAM)] =
1126 	    {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1127 	[DRM_IOCTL_NR(DRM_I915_ALLOC)] =
1128 	    {i915_mem_alloc, DRM_AUTH},
1129 	[DRM_IOCTL_NR(DRM_I915_FREE)] =
1130 	    {i915_mem_free, DRM_AUTH},
1131 	[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] =
1132 	    {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1133 	[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] =
1134 	    {i915_cmdbuffer, DRM_AUTH},
1135 	[DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] =
1136 	    {i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1137 	[DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] =
1138 	    {i915_vblank_pipe_set, DRM_AUTH},
1139 	[DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] =
1140 	    {i915_vblank_pipe_get, DRM_AUTH},
1141 	[DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] =
1142 	    {i915_vblank_swap, DRM_AUTH},
1143 	[DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] =
1144 	    {i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1145 };
1146 
1147 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1148 
1149 /**
1150  * Determine if the device really is AGP or not.
1151  *
1152  * All Intel graphics chipsets are treated as AGP, even if they are really
1153  * PCI-e.
1154  *
1155  * \param dev   The device to be tested.
1156  *
1157  * \returns
1158  * A value of 1 is always retured to indictate every i9x5 is AGP.
1159  */
1160 /*ARGSUSED*/
1161 int i915_driver_device_is_agp(drm_device_t * dev)
1162 {
1163 	return 1;
1164 }
1165 
1166 /*ARGSUSED*/
1167 int i915_driver_firstopen(struct drm_device *dev)
1168 {
1169 #ifdef I915_HAVE_BUFFER
1170 	drm_bo_driver_init(dev);
1171 #endif
1172 	return 0;
1173 }
1174 
1175