xref: /titanic_50/usr/src/uts/intel/io/drm/i915_irq.c (revision de3d2ce46fc25c7b67ccbae4afe5f15e5357568f)
1 /* BEGIN CSTYLED */
2 
3 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4  */
5 /*
6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  */
30 
31 /*
32  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
33  * Use is subject to license terms.
34  */
35 
36 #include "drmP.h"
37 #include "drm.h"
38 #include "i915_drm.h"
39 #include "i915_drv.h"
40 
41 
42 #define MAX_NOPID ((u32)~0)
43 
44 /*
45  * These are the interrupts used by the driver
46  */
47 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
48 				    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
49 				    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
50 
51 static inline void
52 i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
53 {
54 	if ((dev_priv->irq_mask_reg & mask) != 0) {
55 		dev_priv->irq_mask_reg &= ~mask;
56 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
57 		(void) I915_READ(IMR);
58 	}
59 }
60 
61 static inline void
62 i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
63 {
64 	if ((dev_priv->irq_mask_reg & mask) != mask) {
65 		dev_priv->irq_mask_reg |= mask;
66 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
67 		(void) I915_READ(IMR);
68 	}
69 }
70  /**
71  * i915_get_pipe - return the the pipe associated with a given plane
72  * @dev: DRM device
73  * @plane: plane to look for
74  *
75  * The Intel Mesa & 2D drivers call the vblank routines with a plane number
76  * rather than a pipe number, since they may not always be equal.  This routine
77  * maps the given @plane back to a pipe number.
78  */
79 static int
80 i915_get_pipe(struct drm_device *dev, int plane)
81 {
82 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
83 	u32 dspcntr;
84 
85 	dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
86 
87 	return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
88 }
89 
90 /**
91  * i915_get_plane - return the the plane associated with a given pipe
92  * @dev: DRM device
93  * @pipe: pipe to look for
94  *
95  * The Intel Mesa & 2D drivers call the vblank routines with a plane number
96  * rather than a plane number, since they may not always be equal.  This routine
97  * maps the given @pipe back to a plane number.
98  */
99 static int
100 i915_get_plane(struct drm_device *dev, int pipe)
101 {
102 	if (i915_get_pipe(dev, 0) == pipe)
103 		return 0;
104 	return 1;
105 }
106 
107 /**
108  * i915_pipe_enabled - check if a pipe is enabled
109  * @dev: DRM device
110  * @pipe: pipe to check
111  *
112  * Reading certain registers when the pipe is disabled can hang the chip.
113  * Use this routine to make sure the PLL is running and the pipe is active
114  * before reading such registers if unsure.
115  */
116 static int
117 i915_pipe_enabled(struct drm_device *dev, int pipe)
118 {
119 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
120 	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
121 
122 	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
123 		return 1;
124 
125 	return 0;
126 }
127 
128 /**
129  * Emit a synchronous flip.
130  *
131  * This function must be called with the drawable spinlock held.
132  */
133 static void
134 i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
135 			 int plane)
136 {
137 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
138 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
139 	u16 x1, y1, x2, y2;
140 	int pf_planes = 1 << plane;
141 
142 	DRM_SPINLOCK_ASSERT(&dev->drw_lock);
143 
144 	/* If the window is visible on the other plane, we have to flip on that
145 	 * plane as well.
146 	 */
147 	if (plane == 1) {
148 		x1 = sarea_priv->planeA_x;
149 		y1 = sarea_priv->planeA_y;
150 		x2 = x1 + sarea_priv->planeA_w;
151 		y2 = y1 + sarea_priv->planeA_h;
152 	} else {
153 		x1 = sarea_priv->planeB_x;
154 		y1 = sarea_priv->planeB_y;
155 		x2 = x1 + sarea_priv->planeB_w;
156 		y2 = y1 + sarea_priv->planeB_h;
157 	}
158 
159 	if (x2 > 0 && y2 > 0) {
160 		int i, num_rects = drw->num_rects;
161 		struct drm_clip_rect *rect = drw->rects;
162 
163 		for (i = 0; i < num_rects; i++)
164 			if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
165 			      rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
166 				pf_planes = 0x3;
167 
168 				break;
169 			}
170 	}
171 
172 	i915_dispatch_flip(dev, pf_planes, 1);
173 }
174 
175 /**
176  * Emit blits for scheduled buffer swaps.
177  *
178  * This function will be called with the HW lock held.
179  */
180 static void i915_vblank_tasklet(drm_device_t *dev)
181 {
182 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
183 	struct list_head *list, *tmp, hits, *hit;
184 	int nhits, slice[2], upper[2], lower[2], i, num_pages;
185 	unsigned counter[2];
186 	struct drm_drawable_info *drw;
187 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
188 	u32 cpp = dev_priv->cpp,  offsets[3];
189 	u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
190 				XY_SRC_COPY_BLT_WRITE_ALPHA |
191 				XY_SRC_COPY_BLT_WRITE_RGB)
192 			     : XY_SRC_COPY_BLT_CMD;
193 	u32 src_pitch = sarea_priv->pitch * cpp;
194 	u32 dst_pitch = sarea_priv->pitch * cpp;
195 	/* COPY rop (0xcc), map cpp to magic color depth constants */
196 	u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
197 	RING_LOCALS;
198 
199 	if (IS_I965G(dev) && sarea_priv->front_tiled) {
200 		cmd |= XY_SRC_COPY_BLT_DST_TILED;
201 		dst_pitch >>= 2;
202 	}
203 	if (IS_I965G(dev) && sarea_priv->back_tiled) {
204 		cmd |= XY_SRC_COPY_BLT_SRC_TILED;
205 		src_pitch >>= 2;
206 	}
207 
208 	counter[0] = drm_vblank_count(dev, 0);
209 	counter[1] = drm_vblank_count(dev, 1);
210 
211 	INIT_LIST_HEAD(&hits);
212 
213 	nhits = 0;
214 
215 	/* No irqsave/restore necessary.  This tasklet may be run in an
216 	 * interrupt context or normal context, but we don't have to worry
217 	 * about getting interrupted by something acquiring the lock, because
218 	 * we are the interrupt context thing that acquires the lock.
219 	 */
220 	DRM_SPINLOCK(&dev_priv->swaps_lock);
221 
222 	/* Find buffer swaps scheduled for this vertical blank */
223 	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
224 		drm_i915_vbl_swap_t *vbl_swap =
225 			list_entry(list, drm_i915_vbl_swap_t, head);
226 		int pipe = i915_get_pipe(dev, vbl_swap->plane);
227 
228 		if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
229 			continue;
230 
231 		list_del(list);
232 		dev_priv->swaps_pending--;
233 		drm_vblank_put(dev, pipe);
234 
235 		DRM_SPINUNLOCK(&dev_priv->swaps_lock);
236 		DRM_SPINLOCK(&dev->drw_lock);
237 
238 		drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
239 
240 		if (!drw) {
241 			DRM_SPINUNLOCK(&dev->drw_lock);
242 			drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
243 			DRM_SPINLOCK(&dev_priv->swaps_lock);
244 			continue;
245 		}
246 
247 		list_for_each(hit, &hits) {
248 			drm_i915_vbl_swap_t *swap_cmp =
249 				list_entry(hit, drm_i915_vbl_swap_t, head);
250 			struct drm_drawable_info *drw_cmp =
251 				drm_get_drawable_info(dev, swap_cmp->drw_id);
252 
253 			if (drw_cmp &&
254 			    drw_cmp->rects[0].y1 > drw->rects[0].y1) {
255 				list_add_tail(list, hit);
256 				break;
257 			}
258 		}
259 
260 		DRM_SPINUNLOCK(&dev->drw_lock);
261 
262 		/* List of hits was empty, or we reached the end of it */
263 		if (hit == &hits)
264 			list_add_tail(list, hits.prev);
265 
266 		nhits++;
267 
268 		DRM_SPINLOCK(&dev_priv->swaps_lock);
269 	}
270 
271 	DRM_SPINUNLOCK(&dev_priv->swaps_lock);
272 
273 	if (nhits == 0) {
274 		return;
275 	}
276 
277 	i915_kernel_lost_context(dev);
278 
279 	upper[0] = upper[1] = 0;
280 	slice[0] = max(sarea_priv->planeA_h / nhits, 1);
281 	slice[1] = max(sarea_priv->planeB_h / nhits, 1);
282 	lower[0] = sarea_priv->planeA_y + slice[0];
283 	lower[1] = sarea_priv->planeB_y + slice[0];
284 
285 	offsets[0] = sarea_priv->front_offset;
286 	offsets[1] = sarea_priv->back_offset;
287 	offsets[2] = sarea_priv->third_offset;
288 	num_pages = sarea_priv->third_handle ? 3 : 2;
289 
290 	DRM_SPINLOCK(&dev->drw_lock);
291 
292 	/* Emit blits for buffer swaps, partitioning both outputs into as many
293 	 * slices as there are buffer swaps scheduled in order to avoid tearing
294 	 * (based on the assumption that a single buffer swap would always
295 	 * complete before scanout starts).
296 	 */
297 	for (i = 0; i++ < nhits;
298 	     upper[0] = lower[0], lower[0] += slice[0],
299 	     upper[1] = lower[1], lower[1] += slice[1]) {
300 		int init_drawrect = 1;
301 
302 		if (i == nhits)
303 			lower[0] = lower[1] = sarea_priv->height;
304 
305 		list_for_each(hit, &hits) {
306 			drm_i915_vbl_swap_t *swap_hit =
307 				list_entry(hit, drm_i915_vbl_swap_t, head);
308 			struct drm_clip_rect *rect;
309 			int num_rects, plane, front, back;
310 			unsigned short top, bottom;
311 
312 			drw = drm_get_drawable_info(dev, swap_hit->drw_id);
313 
314 			if (!drw)
315 				continue;
316 
317 			plane = swap_hit->plane;
318 
319 			if (swap_hit->flip) {
320 				i915_dispatch_vsync_flip(dev, drw, plane);
321 				continue;
322 			}
323 
324 			if (init_drawrect) {
325 				int width  = sarea_priv->width;
326 				int height = sarea_priv->height;
327 				if (IS_I965G(dev)) {
328 					BEGIN_LP_RING(4);
329 					OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
330 					OUT_RING(0);
331 					OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
332 					OUT_RING(0);
333 					ADVANCE_LP_RING();
334 				} else {
335 					BEGIN_LP_RING(6);
336 					OUT_RING(GFX_OP_DRAWRECT_INFO);
337 					OUT_RING(0);
338 					OUT_RING(0);
339 					OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
340 					OUT_RING(0);
341 					OUT_RING(0);
342 					ADVANCE_LP_RING();
343 				}
344 
345 				sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
346 
347 				init_drawrect = 0;
348 			}
349 
350 			rect = drw->rects;
351 			top = upper[plane];
352 			bottom = lower[plane];
353 
354 			front = (dev_priv->sarea_priv->pf_current_page >>
355 				 (2 * plane)) & 0x3;
356 			back = (front + 1) % num_pages;
357 
358 			for (num_rects = drw->num_rects; num_rects--; rect++) {
359 				int y1 = max(rect->y1, top);
360 				int y2 = min(rect->y2, bottom);
361 
362 				if (y1 >= y2)
363 					continue;
364 
365 				BEGIN_LP_RING(8);
366 				OUT_RING(cmd);
367 				OUT_RING(ropcpp | dst_pitch);
368 				OUT_RING((y1 << 16) | rect->x1);
369 				OUT_RING((y2 << 16) | rect->x2);
370 				OUT_RING(offsets[front]);
371 				OUT_RING((y1 << 16) | rect->x1);
372 				OUT_RING(src_pitch);
373 				OUT_RING(offsets[back]);
374 				ADVANCE_LP_RING();
375 			}
376 		}
377 	}
378 
379 	DRM_SPINUNLOCK(&dev->drw_lock);
380 
381 	list_for_each_safe(hit, tmp, &hits) {
382 		drm_i915_vbl_swap_t *swap_hit =
383 			list_entry(hit, drm_i915_vbl_swap_t, head);
384 
385 		list_del(hit);
386 
387 		drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
388 	}
389 }
390 
391 u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
392 {
393 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
394 	unsigned long high_frame;
395 	unsigned long low_frame;
396 	u32 high1, high2, low, count;
397 	int pipe;
398 
399 	pipe = i915_get_pipe(dev, plane);
400 	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
401 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
402 
403 	if (!i915_pipe_enabled(dev, pipe)) {
404 	    DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
405 	    return 0;
406 	}
407 
408 	/*
409 	 * High & low register fields aren't synchronized, so make sure
410 	 * we get a low value that's stable across two reads of the high
411 	 * register.
412 	 */
413 	do {
414 		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
415 			 PIPE_FRAME_HIGH_SHIFT);
416 		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
417 			PIPE_FRAME_LOW_SHIFT);
418 		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
419 			 PIPE_FRAME_HIGH_SHIFT);
420 	} while (high1 != high2);
421 
422 	count = (high1 << 8) | low;
423 
424 	/* count may be reset by other driver(e.g. 2D driver),
425 	   we have no way to know if it is wrapped or resetted
426 	   when count is zero. do a rough guess.
427 	*/
428 	if (count < dev->last_vblank[pipe] && dev->last_vblank[pipe] < dev->max_vblank_count/2)
429 		dev->last_vblank[pipe]=0;
430 	return count;
431 }
432 
433 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
434 {
435         drm_device_t *dev = (drm_device_t *) (void *) arg;
436         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
437         u32 iir;
438         u32 pipea_stats = 0, pipeb_stats = 0;
439 	int vblank = 0;
440 	iir = I915_READ(IIR);
441 
442 	atomic_inc(&dev_priv->irq_received);
443 
444 	if (iir == 0) {
445 		return IRQ_NONE;
446 	}
447 
448 	if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
449 		pipea_stats = I915_READ(PIPEASTAT);
450 
451 		/* The vblank interrupt gets enabled even if we didn't ask for
452 		   it, so make sure it's shut down again */
453 		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
454 			pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
455 					 PIPE_VBLANK_INTERRUPT_ENABLE);
456 		else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
457 					PIPE_VBLANK_INTERRUPT_STATUS))
458 		{
459 			vblank++;
460 			drm_handle_vblank(dev, i915_get_plane(dev, 0));
461 		}
462 
463 		I915_WRITE(PIPEASTAT, pipea_stats);
464 	}
465 	if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
466 		pipeb_stats = I915_READ(PIPEBSTAT);
467 
468 		/* The vblank interrupt gets enabled even if we didn't ask for
469 		   it, so make sure it's shut down again */
470 		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
471 			pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
472 					 PIPE_VBLANK_INTERRUPT_ENABLE);
473 		else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
474 					PIPE_VBLANK_INTERRUPT_STATUS))
475 		{
476 			vblank++;
477 			drm_handle_vblank(dev, i915_get_plane(dev, 1));
478 		}
479 
480 		I915_WRITE(PIPEBSTAT, pipeb_stats);
481 	}
482 
483 	if (dev_priv->sarea_priv)
484 	    dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
485 
486 	I915_WRITE(IIR, iir);
487 
488 	(void) I915_READ(IIR); /* Flush posted writes */
489 
490         if (iir & I915_USER_INTERRUPT) {
491                 DRM_WAKEUP(&dev_priv->irq_queue);
492 #ifdef I915_HAVE_FENCE
493                 i915_fence_handler(dev);
494 #endif
495         }
496 
497 	if (vblank) {
498 		if (dev_priv->swaps_pending > 0)
499 			drm_locked_tasklet(dev, i915_vblank_tasklet);
500 	}
501 
502         return IRQ_HANDLED;
503 
504 }
505 
506 int i915_emit_irq(drm_device_t * dev)
507 {
508 
509 	drm_i915_private_t *dev_priv = dev->dev_private;
510 	RING_LOCALS;
511 
512 	i915_kernel_lost_context(dev);
513 
514 	i915_emit_breadcrumb(dev);
515 
516 	BEGIN_LP_RING(2);
517 	OUT_RING(0);
518 	OUT_RING(MI_USER_INTERRUPT);
519 	ADVANCE_LP_RING();
520 
521 	return dev_priv->counter;
522 }
523 
524 void i915_user_irq_on(drm_i915_private_t *dev_priv)
525 {
526 	spin_lock(&dev_priv->user_irq_lock);
527 	if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
528 		i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
529 	}
530 	spin_unlock(&dev_priv->user_irq_lock);
531 
532 }
533 
534 void i915_user_irq_off(drm_i915_private_t *dev_priv)
535 {
536 	spin_lock(&dev_priv->user_irq_lock);
537 	if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
538 		i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
539 	}
540 	spin_unlock(&dev_priv->user_irq_lock);
541 }
542 
543 
544 static int i915_wait_irq(drm_device_t * dev, int irq_nr)
545 {
546 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
547 	int ret = 0;
548 
549 
550 
551        if (!dev_priv) {
552                DRM_ERROR("called with no initialization\n");
553                return -EINVAL;
554        }
555 
556 
557 
558 
559 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
560 		if (dev_priv->sarea_priv)
561 			dev_priv->sarea_priv->last_dispatch =
562 				READ_BREADCRUMB(dev_priv);
563 		return 0;
564 	}
565 
566 	DRM_DEBUG("i915_wait_irq: irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv));
567 	i915_user_irq_on(dev_priv);
568 	DRM_WAIT_ON(ret, &dev_priv->irq_queue, 3 * DRM_HZ,
569 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
570 	i915_user_irq_off(dev_priv);
571 
572 	if (ret == EBUSY || ret == EINTR) {
573 		DRM_DEBUG("%d: EBUSY -- rec: %d emitted: %d\n",
574 			  ret,
575 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
576 	}
577 
578 	if (dev_priv->sarea_priv)
579 		dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
580 
581 	return ret;
582 }
583 
584 
585 /* Needs the lock as it touches the ring.
586  */
587 /*ARGSUSED*/
588 int i915_irq_emit(DRM_IOCTL_ARGS)
589 {
590 	DRM_DEVICE;
591 	drm_i915_private_t *dev_priv = dev->dev_private;
592 	drm_i915_irq_emit_t emit;
593 	int result;
594 
595 	LOCK_TEST_WITH_RETURN(dev, fpriv);
596 
597 	if (!dev_priv) {
598 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
599 		return (EINVAL);
600 	}
601 
602 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
603 		drm_i915_irq_emit32_t irq_emit32;
604 
605 		DRM_COPYFROM_WITH_RETURN(&irq_emit32,
606 			(drm_i915_irq_emit32_t __user *) data,
607 			sizeof (drm_i915_irq_emit32_t));
608 		emit.irq_seq = (int __user *)(uintptr_t)irq_emit32.irq_seq;
609 	} else
610 		DRM_COPYFROM_WITH_RETURN(&emit,
611 		    (drm_i915_irq_emit_t __user *) data, sizeof(emit));
612 
613 	result = i915_emit_irq(dev);
614 
615 	if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
616 		DRM_ERROR("copy_to_user\n");
617 		return (EFAULT);
618 	}
619 
620 	return 0;
621 }
622 
623 /* Doesn't need the hardware lock.
624  */
625 /*ARGSUSED*/
626 int i915_irq_wait(DRM_IOCTL_ARGS)
627 {
628 	DRM_DEVICE;
629 	drm_i915_private_t *dev_priv = dev->dev_private;
630 	drm_i915_irq_wait_t irqwait;
631 
632 	if (!dev_priv) {
633 		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
634 		return (EINVAL);
635 	}
636 
637 	DRM_COPYFROM_WITH_RETURN(&irqwait,
638 	    (drm_i915_irq_wait_t __user *) data, sizeof(irqwait));
639 
640 	return i915_wait_irq(dev, irqwait.irq_seq);
641 }
642 
643 int i915_enable_vblank(struct drm_device *dev, int plane)
644 {
645 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
646 	int pipe = i915_get_pipe(dev, plane);
647 	u32	pipestat_reg = 0;
648 	u32	mask_reg = 0;
649 	u32	pipestat;
650 
651 	switch (pipe) {
652 	case 0:
653 		pipestat_reg = PIPEASTAT;
654 		mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
655 		break;
656 	case 1:
657 		pipestat_reg = PIPEBSTAT;
658 		mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
659 		break;
660 	default:
661 		DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
662 			  pipe);
663 		break;
664 	}
665 
666 	if (pipestat_reg)
667 	{
668 		pipestat = I915_READ (pipestat_reg);
669 		/*
670 		 * Older chips didn't have the start vblank interrupt,
671 		 * but
672 		 */
673 		if (IS_I965G (dev))
674 			pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
675 		else
676 			pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
677 		/*
678 		 * Clear any pending status
679 		 */
680 		pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
681 			     PIPE_VBLANK_INTERRUPT_STATUS);
682 		I915_WRITE(pipestat_reg, pipestat);
683 	}
684 	DRM_SPINLOCK(&dev_priv->user_irq_lock);
685 	i915_enable_irq(dev_priv, mask_reg);
686 	DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
687 
688 	return 0;
689 }
690 
691 void i915_disable_vblank(struct drm_device *dev, int plane)
692 {
693 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
694 	int pipe = i915_get_pipe(dev, plane);
695 	u32	pipestat_reg = 0;
696 	u32	mask_reg = 0;
697 	u32	pipestat;
698 
699 	switch (pipe) {
700 	case 0:
701 		pipestat_reg = PIPEASTAT;
702 		mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
703 		break;
704 	case 1:
705 		pipestat_reg = PIPEBSTAT;
706 		mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
707 		break;
708 	default:
709 		DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
710 			  pipe);
711 		break;
712 	}
713 
714 	DRM_SPINLOCK(&dev_priv->user_irq_lock);
715 	i915_disable_irq(dev_priv, mask_reg);
716 	DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
717 
718 	if (pipestat_reg)
719 	{
720 		pipestat = I915_READ (pipestat_reg);
721 		pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
722 			      PIPE_VBLANK_INTERRUPT_ENABLE);
723 		/*
724 		 * Clear any pending status
725 		 */
726 		pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
727 			     PIPE_VBLANK_INTERRUPT_STATUS);
728 		I915_WRITE(pipestat_reg, pipestat);
729 		(void) I915_READ(pipestat_reg);
730 	}
731 }
732 
733 
734 static void i915_enable_interrupt (drm_device_t *dev)
735 {
736 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
737 
738 	dev_priv->irq_mask_reg = 0xffffffff;
739 	I915_WRITE(IMR, dev_priv->irq_mask_reg);
740 	I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
741 	(void) I915_READ (IER);
742 
743 	dev_priv->irq_enabled = 1;
744 }
745 
746 /* Set the vblank monitor pipe
747  */
748 /*ARGSUSED*/
749 int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
750 {
751 	DRM_DEVICE;
752 	drm_i915_private_t *dev_priv = dev->dev_private;
753 
754 	if (!dev_priv) {
755 		DRM_ERROR("called with no initialization\n");
756 		return (-EINVAL);
757 	}
758 
759 	return (0);
760 }
761 
762 /*ARGSUSED*/
763 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
764 {
765 	DRM_DEVICE;
766 	drm_i915_private_t *dev_priv = dev->dev_private;
767 	drm_i915_vblank_pipe_t pipe;
768 
769 	if (!dev_priv) {
770 		DRM_ERROR("called with no initialization\n");
771 		return -EINVAL;
772 	}
773 
774 
775 	DRM_COPYFROM_WITH_RETURN(&pipe, (drm_i915_vblank_pipe_t __user *)data, sizeof (pipe));
776 
777 	pipe.pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
778 
779 	return 0;
780 }
781 
782 /**
783  * Schedule buffer swap at given vertical blank.
784  */
785 /*ARGSUSED*/
786 int i915_vblank_swap(DRM_IOCTL_ARGS)
787 {
788 	DRM_DEVICE;
789 	drm_i915_private_t *dev_priv = dev->dev_private;
790 	drm_i915_vblank_swap_t *swap;
791 	drm_i915_vbl_swap_t *vbl_swap;
792 	unsigned int pipe, seqtype, curseq, plane;
793 	struct list_head *list;
794 	int ret;
795 
796 	if (!dev_priv) {
797 		DRM_ERROR("%s called with no initialization\n", __func__);
798 		return -EINVAL;
799 	}
800 
801 	if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) {
802 		DRM_DEBUG("Rotation not supported\n");
803 		return -EINVAL;
804 	}
805 
806 	DRM_COPYFROM_WITH_RETURN(&swap, (drm_i915_vblank_swap_t __user *)data, sizeof (swap));
807 
808 	if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
809 			     _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
810 			     _DRM_VBLANK_FLIP)) {
811 		DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
812 		return -EINVAL;
813 	}
814 
815 	plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
816 	pipe = i915_get_pipe(dev, plane);
817 
818 	seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
819 
820 	if (!(dev_priv->vblank_pipe & (1 << pipe))) {
821 		DRM_ERROR("Invalid pipe %d\n", pipe);
822 		return -EINVAL;
823 	}
824 
825 	spin_lock_irqsave(&dev->drw_lock, irqflags);
826 
827 	/* It makes no sense to schedule a swap for a drawable that doesn't have
828 	 * valid information at this point. E.g. this could mean that the X
829 	 * server is too old to push drawable information to the DRM, in which
830 	 * case all such swaps would become ineffective.
831 	 */
832 	if (!drm_get_drawable_info(dev, swap->drawable)) {
833 		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
834 		DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
835 		return -EINVAL;
836 	}
837 
838 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
839 
840 	/*
841 	 * We take the ref here and put it when the swap actually completes
842 	 * in the tasklet.
843 	 */
844 	ret = drm_vblank_get(dev, pipe);
845 	if (ret)
846 		return ret;
847 	curseq = drm_vblank_count(dev, pipe);
848 
849 	if (seqtype == _DRM_VBLANK_RELATIVE)
850 		swap->sequence += curseq;
851 
852 	if ((curseq - swap->sequence) <= (1<<23)) {
853 		if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
854 			swap->sequence = curseq + 1;
855 		} else {
856 			DRM_DEBUG("Missed target sequence\n");
857 			drm_vblank_put(dev, pipe);
858 			return -EINVAL;
859 		}
860 	}
861 
862 	if (swap->seqtype & _DRM_VBLANK_FLIP) {
863 		swap->sequence--;
864 
865 		if ((curseq - swap->sequence) <= (1<<23)) {
866 			struct drm_drawable_info *drw;
867 
868 			LOCK_TEST_WITH_RETURN(dev, fpriv);
869 
870 			spin_lock_irqsave(&dev->drw_lock, irqflags);
871 
872 			drw = drm_get_drawable_info(dev, swap->drawable);
873 
874 			if (!drw) {
875 				spin_unlock_irqrestore(&dev->drw_lock,
876 				    irqflags);
877 				DRM_DEBUG("Invalid drawable ID %d\n",
878 					  swap->drawable);
879 				drm_vblank_put(dev, pipe);
880 				return -EINVAL;
881 			}
882 
883 			i915_dispatch_vsync_flip(dev, drw, plane);
884 
885 			spin_unlock_irqrestore(&dev->drw_lock, irqflags);
886 
887 			drm_vblank_put(dev, pipe);
888 			return 0;
889 		}
890 	}
891 
892 	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
893 
894 	list_for_each(list, &dev_priv->vbl_swaps.head) {
895 		vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
896 
897 		if (vbl_swap->drw_id == swap->drawable &&
898 		    vbl_swap->plane == plane &&
899 		    vbl_swap->sequence == swap->sequence) {
900 			vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
901 			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
902 			DRM_DEBUG("Already scheduled\n");
903 			return 0;
904 		}
905 	}
906 
907 	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
908 
909 	if (dev_priv->swaps_pending >= 100) {
910 		DRM_DEBUG("Too many swaps queued\n");
911 		drm_vblank_put(dev, pipe);
912 		return -EBUSY;
913 	}
914 
915 	vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
916 
917 	if (!vbl_swap) {
918 		DRM_ERROR("Failed to allocate memory to queue swap\n");
919 		drm_vblank_put(dev, pipe);
920 		return -ENOMEM;
921 	}
922 
923 	DRM_DEBUG("vbl_swap\n");
924 
925 	vbl_swap->drw_id = swap->drawable;
926 	vbl_swap->plane = plane;
927 	vbl_swap->sequence = swap->sequence;
928 	vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
929 
930 	if (vbl_swap->flip)
931 		swap->sequence++;
932 
933 	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
934 
935 	list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
936 	dev_priv->swaps_pending++;
937 
938 	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
939 
940 	return 0;
941 }
942 
943 /* drm_dma.h hooks
944 */
945 int i915_driver_irq_preinstall(drm_device_t * dev)
946 {
947 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
948 
949 	if (!dev_priv->mmio_map)
950 		return -EINVAL;
951 
952 	I915_WRITE(HWSTAM, 0xeffe);
953 	I915_WRITE(IMR, 0xffffffff);
954 	I915_WRITE(IER, 0x0);
955 
956 	return 0;
957 }
958 
959 void i915_driver_irq_postinstall(drm_device_t * dev)
960 {
961 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
962 
963 	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
964 	dev_priv->swaps_pending = 0;
965 
966 	dev_priv->user_irq_refcount = 0;
967 	dev_priv->irq_mask_reg = 0xffffffff;
968 
969 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
970 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
971 
972 	i915_enable_interrupt(dev);
973 
974 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue, DRM_INTR_PRI(dev));
975 
976 	/*
977 	 * Initialize the hardware status page IRQ location.
978 	 */
979 
980 	I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
981 	return;
982 }
983 
984 void i915_driver_irq_uninstall(drm_device_t * dev)
985 {
986 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
987 	u32 temp;
988 	if ((!dev_priv) || (dev_priv->irq_enabled == 0))
989 		return;
990 
991 	dev_priv->vblank_pipe = 0;
992 	dev_priv->irq_enabled = 0;
993 
994 	I915_WRITE(HWSTAM, 0xffffffff);
995 	I915_WRITE(IMR, 0xffffffff);
996 	I915_WRITE(IER, 0x0);
997 
998 	temp = I915_READ(PIPEASTAT);
999 	I915_WRITE(PIPEASTAT, temp);
1000 	temp = I915_READ(PIPEBSTAT);
1001 	I915_WRITE(PIPEBSTAT, temp);
1002 	temp = I915_READ(IIR);
1003 	I915_WRITE(IIR, temp);
1004 
1005 }
1006