xref: /linux/drivers/gpu/drm/i915/i915_irq.c (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33 
34 #define MAX_NOPID ((u32)~0)
35 
36 /** These are the interrupts used by the driver */
37 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT |		\
38 				    I915_ASLE_INTERRUPT |		\
39 				    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
40 				    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
41 
42 void
43 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
44 {
45 	if ((dev_priv->irq_mask_reg & mask) != 0) {
46 		dev_priv->irq_mask_reg &= ~mask;
47 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
48 		(void) I915_READ(IMR);
49 	}
50 }
51 
52 static inline void
53 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
54 {
55 	if ((dev_priv->irq_mask_reg & mask) != mask) {
56 		dev_priv->irq_mask_reg |= mask;
57 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
58 		(void) I915_READ(IMR);
59 	}
60 }
61 
62 /**
63  * i915_pipe_enabled - check if a pipe is enabled
64  * @dev: DRM device
65  * @pipe: pipe to check
66  *
67  * Reading certain registers when the pipe is disabled can hang the chip.
68  * Use this routine to make sure the PLL is running and the pipe is active
69  * before reading such registers if unsure.
70  */
71 static int
72 i915_pipe_enabled(struct drm_device *dev, int pipe)
73 {
74 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
75 	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
76 
77 	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
78 		return 1;
79 
80 	return 0;
81 }
82 
83 /**
84  * Emit blits for scheduled buffer swaps.
85  *
86  * This function will be called with the HW lock held.
87  * Because this function must grab the ring mutex (dev->struct_mutex),
88  * it can no longer run at soft irq time. We'll fix this when we do
89  * the DRI2 swap buffer work.
90  */
91 static void i915_vblank_tasklet(struct drm_device *dev)
92 {
93 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
94 	unsigned long irqflags;
95 	struct list_head *list, *tmp, hits, *hit;
96 	int nhits, nrects, slice[2], upper[2], lower[2], i;
97 	unsigned counter[2];
98 	struct drm_drawable_info *drw;
99 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
100 	u32 cpp = dev_priv->cpp;
101 	u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
102 				XY_SRC_COPY_BLT_WRITE_ALPHA |
103 				XY_SRC_COPY_BLT_WRITE_RGB)
104 			     : XY_SRC_COPY_BLT_CMD;
105 	u32 src_pitch = sarea_priv->pitch * cpp;
106 	u32 dst_pitch = sarea_priv->pitch * cpp;
107 	u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
108 	RING_LOCALS;
109 
110 	mutex_lock(&dev->struct_mutex);
111 
112 	if (IS_I965G(dev) && sarea_priv->front_tiled) {
113 		cmd |= XY_SRC_COPY_BLT_DST_TILED;
114 		dst_pitch >>= 2;
115 	}
116 	if (IS_I965G(dev) && sarea_priv->back_tiled) {
117 		cmd |= XY_SRC_COPY_BLT_SRC_TILED;
118 		src_pitch >>= 2;
119 	}
120 
121 	counter[0] = drm_vblank_count(dev, 0);
122 	counter[1] = drm_vblank_count(dev, 1);
123 
124 	DRM_DEBUG("\n");
125 
126 	INIT_LIST_HEAD(&hits);
127 
128 	nhits = nrects = 0;
129 
130 	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
131 
132 	/* Find buffer swaps scheduled for this vertical blank */
133 	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
134 		drm_i915_vbl_swap_t *vbl_swap =
135 			list_entry(list, drm_i915_vbl_swap_t, head);
136 		int pipe = vbl_swap->pipe;
137 
138 		if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
139 			continue;
140 
141 		list_del(list);
142 		dev_priv->swaps_pending--;
143 		drm_vblank_put(dev, pipe);
144 
145 		spin_unlock(&dev_priv->swaps_lock);
146 		spin_lock(&dev->drw_lock);
147 
148 		drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
149 
150 		list_for_each(hit, &hits) {
151 			drm_i915_vbl_swap_t *swap_cmp =
152 				list_entry(hit, drm_i915_vbl_swap_t, head);
153 			struct drm_drawable_info *drw_cmp =
154 				drm_get_drawable_info(dev, swap_cmp->drw_id);
155 
156 			/* Make sure both drawables are still
157 			 * around and have some rectangles before
158 			 * we look inside to order them for the
159 			 * blts below.
160 			 */
161 			if (drw_cmp && drw_cmp->num_rects > 0 &&
162 			    drw && drw->num_rects > 0 &&
163 			    drw_cmp->rects[0].y1 > drw->rects[0].y1) {
164 				list_add_tail(list, hit);
165 				break;
166 			}
167 		}
168 
169 		spin_unlock(&dev->drw_lock);
170 
171 		/* List of hits was empty, or we reached the end of it */
172 		if (hit == &hits)
173 			list_add_tail(list, hits.prev);
174 
175 		nhits++;
176 
177 		spin_lock(&dev_priv->swaps_lock);
178 	}
179 
180 	if (nhits == 0) {
181 		spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
182 		mutex_unlock(&dev->struct_mutex);
183 		return;
184 	}
185 
186 	spin_unlock(&dev_priv->swaps_lock);
187 
188 	i915_kernel_lost_context(dev);
189 
190 	if (IS_I965G(dev)) {
191 		BEGIN_LP_RING(4);
192 
193 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
194 		OUT_RING(0);
195 		OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
196 		OUT_RING(0);
197 		ADVANCE_LP_RING();
198 	} else {
199 		BEGIN_LP_RING(6);
200 
201 		OUT_RING(GFX_OP_DRAWRECT_INFO);
202 		OUT_RING(0);
203 		OUT_RING(0);
204 		OUT_RING(sarea_priv->width | sarea_priv->height << 16);
205 		OUT_RING(sarea_priv->width | sarea_priv->height << 16);
206 		OUT_RING(0);
207 
208 		ADVANCE_LP_RING();
209 	}
210 
211 	sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
212 
213 	upper[0] = upper[1] = 0;
214 	slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
215 	slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
216 	lower[0] = sarea_priv->pipeA_y + slice[0];
217 	lower[1] = sarea_priv->pipeB_y + slice[0];
218 
219 	spin_lock(&dev->drw_lock);
220 
221 	/* Emit blits for buffer swaps, partitioning both outputs into as many
222 	 * slices as there are buffer swaps scheduled in order to avoid tearing
223 	 * (based on the assumption that a single buffer swap would always
224 	 * complete before scanout starts).
225 	 */
226 	for (i = 0; i++ < nhits;
227 	     upper[0] = lower[0], lower[0] += slice[0],
228 	     upper[1] = lower[1], lower[1] += slice[1]) {
229 		if (i == nhits)
230 			lower[0] = lower[1] = sarea_priv->height;
231 
232 		list_for_each(hit, &hits) {
233 			drm_i915_vbl_swap_t *swap_hit =
234 				list_entry(hit, drm_i915_vbl_swap_t, head);
235 			struct drm_clip_rect *rect;
236 			int num_rects, pipe;
237 			unsigned short top, bottom;
238 
239 			drw = drm_get_drawable_info(dev, swap_hit->drw_id);
240 
241 			/* The drawable may have been destroyed since
242 			 * the vblank swap was queued
243 			 */
244 			if (!drw)
245 				continue;
246 
247 			rect = drw->rects;
248 			pipe = swap_hit->pipe;
249 			top = upper[pipe];
250 			bottom = lower[pipe];
251 
252 			for (num_rects = drw->num_rects; num_rects--; rect++) {
253 				int y1 = max(rect->y1, top);
254 				int y2 = min(rect->y2, bottom);
255 
256 				if (y1 >= y2)
257 					continue;
258 
259 				BEGIN_LP_RING(8);
260 
261 				OUT_RING(cmd);
262 				OUT_RING(ropcpp | dst_pitch);
263 				OUT_RING((y1 << 16) | rect->x1);
264 				OUT_RING((y2 << 16) | rect->x2);
265 				OUT_RING(sarea_priv->front_offset);
266 				OUT_RING((y1 << 16) | rect->x1);
267 				OUT_RING(src_pitch);
268 				OUT_RING(sarea_priv->back_offset);
269 
270 				ADVANCE_LP_RING();
271 			}
272 		}
273 	}
274 
275 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
276 	mutex_unlock(&dev->struct_mutex);
277 
278 	list_for_each_safe(hit, tmp, &hits) {
279 		drm_i915_vbl_swap_t *swap_hit =
280 			list_entry(hit, drm_i915_vbl_swap_t, head);
281 
282 		list_del(hit);
283 
284 		drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
285 	}
286 }
287 
288 /* Called from drm generic code, passed a 'crtc', which
289  * we use as a pipe index
290  */
291 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
292 {
293 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
294 	unsigned long high_frame;
295 	unsigned long low_frame;
296 	u32 high1, high2, low, count;
297 
298 	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
299 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
300 
301 	if (!i915_pipe_enabled(dev, pipe)) {
302 		DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
303 		return 0;
304 	}
305 
306 	/*
307 	 * High & low register fields aren't synchronized, so make sure
308 	 * we get a low value that's stable across two reads of the high
309 	 * register.
310 	 */
311 	do {
312 		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
313 			 PIPE_FRAME_HIGH_SHIFT);
314 		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
315 			PIPE_FRAME_LOW_SHIFT);
316 		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
317 			 PIPE_FRAME_HIGH_SHIFT);
318 	} while (high1 != high2);
319 
320 	count = (high1 << 8) | low;
321 
322 	return count;
323 }
324 
325 void
326 i915_vblank_work_handler(struct work_struct *work)
327 {
328 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
329 						    vblank_work);
330 	struct drm_device *dev = dev_priv->dev;
331 	unsigned long irqflags;
332 
333 	if (dev->lock.hw_lock == NULL) {
334 		i915_vblank_tasklet(dev);
335 		return;
336 	}
337 
338 	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
339 	dev->locked_tasklet_func = i915_vblank_tasklet;
340 	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
341 
342 	/* Try to get the lock now, if this fails, the lock
343 	 * holder will execute the tasklet during unlock
344 	 */
345 	if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
346 		return;
347 
348 	dev->lock.lock_time = jiffies;
349 	atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
350 
351 	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
352 	dev->locked_tasklet_func = NULL;
353 	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
354 
355 	i915_vblank_tasklet(dev);
356 	drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
357 }
358 
359 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
360 {
361 	struct drm_device *dev = (struct drm_device *) arg;
362 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
363 	u32 iir;
364 	u32 pipea_stats, pipeb_stats;
365 	int vblank = 0;
366 
367 	atomic_inc(&dev_priv->irq_received);
368 
369 	if (dev->pdev->msi_enabled)
370 		I915_WRITE(IMR, ~0);
371 	iir = I915_READ(IIR);
372 
373 	if (iir == 0) {
374 		if (dev->pdev->msi_enabled) {
375 			I915_WRITE(IMR, dev_priv->irq_mask_reg);
376 			(void) I915_READ(IMR);
377 		}
378 		return IRQ_NONE;
379 	}
380 
381 	/*
382 	 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
383 	 * we may get extra interrupts.
384 	 */
385 	if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
386 		pipea_stats = I915_READ(PIPEASTAT);
387 		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
388 			pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
389 					 PIPE_VBLANK_INTERRUPT_ENABLE);
390 		else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
391 					PIPE_VBLANK_INTERRUPT_STATUS)) {
392 			vblank++;
393 			drm_handle_vblank(dev, 0);
394 		}
395 
396 		I915_WRITE(PIPEASTAT, pipea_stats);
397 	}
398 	if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
399 		pipeb_stats = I915_READ(PIPEBSTAT);
400 		/* Ack the event */
401 		I915_WRITE(PIPEBSTAT, pipeb_stats);
402 
403 		/* The vblank interrupt gets enabled even if we didn't ask for
404 		   it, so make sure it's shut down again */
405 		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
406 			pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
407 					 PIPE_VBLANK_INTERRUPT_ENABLE);
408 		else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
409 					PIPE_VBLANK_INTERRUPT_STATUS)) {
410 			vblank++;
411 			drm_handle_vblank(dev, 1);
412 		}
413 
414 		if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
415 			opregion_asle_intr(dev);
416 		I915_WRITE(PIPEBSTAT, pipeb_stats);
417 	}
418 
419 	I915_WRITE(IIR, iir);
420 	if (dev->pdev->msi_enabled)
421 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
422 	(void) I915_READ(IIR); /* Flush posted writes */
423 
424 	if (dev_priv->sarea_priv)
425 		dev_priv->sarea_priv->last_dispatch =
426 			READ_BREADCRUMB(dev_priv);
427 
428 	if (iir & I915_USER_INTERRUPT) {
429 		dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
430 		DRM_WAKEUP(&dev_priv->irq_queue);
431 	}
432 
433 	if (iir & I915_ASLE_INTERRUPT)
434 		opregion_asle_intr(dev);
435 
436 	if (vblank && dev_priv->swaps_pending > 0)
437 		schedule_work(&dev_priv->vblank_work);
438 
439 	return IRQ_HANDLED;
440 }
441 
442 static int i915_emit_irq(struct drm_device * dev)
443 {
444 	drm_i915_private_t *dev_priv = dev->dev_private;
445 	RING_LOCALS;
446 
447 	i915_kernel_lost_context(dev);
448 
449 	DRM_DEBUG("\n");
450 
451 	dev_priv->counter++;
452 	if (dev_priv->counter > 0x7FFFFFFFUL)
453 		dev_priv->counter = 1;
454 	if (dev_priv->sarea_priv)
455 		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
456 
457 	BEGIN_LP_RING(6);
458 	OUT_RING(MI_STORE_DWORD_INDEX);
459 	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
460 	OUT_RING(dev_priv->counter);
461 	OUT_RING(0);
462 	OUT_RING(0);
463 	OUT_RING(MI_USER_INTERRUPT);
464 	ADVANCE_LP_RING();
465 
466 	return dev_priv->counter;
467 }
468 
469 void i915_user_irq_get(struct drm_device *dev)
470 {
471 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
472 	unsigned long irqflags;
473 
474 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
475 	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
476 		i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
477 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
478 }
479 
480 void i915_user_irq_put(struct drm_device *dev)
481 {
482 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
483 	unsigned long irqflags;
484 
485 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
486 	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
487 	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
488 		i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
489 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
490 }
491 
492 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
493 {
494 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
495 	int ret = 0;
496 
497 	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
498 		  READ_BREADCRUMB(dev_priv));
499 
500 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
501 		if (dev_priv->sarea_priv) {
502 			dev_priv->sarea_priv->last_dispatch =
503 				READ_BREADCRUMB(dev_priv);
504 		}
505 		return 0;
506 	}
507 
508 	if (dev_priv->sarea_priv)
509 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
510 
511 	i915_user_irq_get(dev);
512 	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
513 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
514 	i915_user_irq_put(dev);
515 
516 	if (ret == -EBUSY) {
517 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
518 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
519 	}
520 
521 	if (dev_priv->sarea_priv)
522 		dev_priv->sarea_priv->last_dispatch =
523 			READ_BREADCRUMB(dev_priv);
524 
525 	return ret;
526 }
527 
528 /* Needs the lock as it touches the ring.
529  */
530 int i915_irq_emit(struct drm_device *dev, void *data,
531 			 struct drm_file *file_priv)
532 {
533 	drm_i915_private_t *dev_priv = dev->dev_private;
534 	drm_i915_irq_emit_t *emit = data;
535 	int result;
536 
537 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
538 
539 	if (!dev_priv) {
540 		DRM_ERROR("called with no initialization\n");
541 		return -EINVAL;
542 	}
543 	mutex_lock(&dev->struct_mutex);
544 	result = i915_emit_irq(dev);
545 	mutex_unlock(&dev->struct_mutex);
546 
547 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
548 		DRM_ERROR("copy_to_user\n");
549 		return -EFAULT;
550 	}
551 
552 	return 0;
553 }
554 
555 /* Doesn't need the hardware lock.
556  */
557 int i915_irq_wait(struct drm_device *dev, void *data,
558 			 struct drm_file *file_priv)
559 {
560 	drm_i915_private_t *dev_priv = dev->dev_private;
561 	drm_i915_irq_wait_t *irqwait = data;
562 
563 	if (!dev_priv) {
564 		DRM_ERROR("called with no initialization\n");
565 		return -EINVAL;
566 	}
567 
568 	return i915_wait_irq(dev, irqwait->irq_seq);
569 }
570 
571 /* Called from drm generic code, passed 'crtc' which
572  * we use as a pipe index
573  */
574 int i915_enable_vblank(struct drm_device *dev, int pipe)
575 {
576 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
577 	u32	pipestat_reg = 0;
578 	u32	pipestat;
579 	u32	interrupt = 0;
580 	unsigned long irqflags;
581 
582 	switch (pipe) {
583 	case 0:
584 		pipestat_reg = PIPEASTAT;
585 		interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
586 		break;
587 	case 1:
588 		pipestat_reg = PIPEBSTAT;
589 		interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
590 		break;
591 	default:
592 		DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
593 			  pipe);
594 		return 0;
595 	}
596 
597 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
598 	/* Enabling vblank events in IMR comes before PIPESTAT write, or
599 	 * there's a race where the PIPESTAT vblank bit gets set to 1, so
600 	 * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
601 	 * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
602 	 * IMR masks it.  It doesn't ever get set after we clear the masking
603 	 * in IMR because the ISR bit is edge, not level-triggered, on the
604 	 * OR of PIPESTAT bits.
605 	 */
606 	i915_enable_irq(dev_priv, interrupt);
607 	pipestat = I915_READ(pipestat_reg);
608 	if (IS_I965G(dev))
609 		pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
610 	else
611 		pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
612 	/* Clear any stale interrupt status */
613 	pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
614 		     PIPE_VBLANK_INTERRUPT_STATUS);
615 	I915_WRITE(pipestat_reg, pipestat);
616 	(void) I915_READ(pipestat_reg);	/* Posting read */
617 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
618 
619 	return 0;
620 }
621 
622 /* Called from drm generic code, passed 'crtc' which
623  * we use as a pipe index
624  */
625 void i915_disable_vblank(struct drm_device *dev, int pipe)
626 {
627 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
628 	u32	pipestat_reg = 0;
629 	u32	pipestat;
630 	u32	interrupt = 0;
631 	unsigned long irqflags;
632 
633 	switch (pipe) {
634 	case 0:
635 		pipestat_reg = PIPEASTAT;
636 		interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
637 		break;
638 	case 1:
639 		pipestat_reg = PIPEBSTAT;
640 		interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
641 		break;
642 	default:
643 		DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
644 			  pipe);
645 		return;
646 		break;
647 	}
648 
649 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
650 	i915_disable_irq(dev_priv, interrupt);
651 	pipestat = I915_READ(pipestat_reg);
652 	pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
653 		      PIPE_VBLANK_INTERRUPT_ENABLE);
654 	/* Clear any stale interrupt status */
655 	pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
656 		     PIPE_VBLANK_INTERRUPT_STATUS);
657 	I915_WRITE(pipestat_reg, pipestat);
658 	(void) I915_READ(pipestat_reg);	/* Posting read */
659 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
660 }
661 
662 /* Set the vblank monitor pipe
663  */
664 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
665 			 struct drm_file *file_priv)
666 {
667 	drm_i915_private_t *dev_priv = dev->dev_private;
668 
669 	if (!dev_priv) {
670 		DRM_ERROR("called with no initialization\n");
671 		return -EINVAL;
672 	}
673 
674 	return 0;
675 }
676 
677 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
678 			 struct drm_file *file_priv)
679 {
680 	drm_i915_private_t *dev_priv = dev->dev_private;
681 	drm_i915_vblank_pipe_t *pipe = data;
682 
683 	if (!dev_priv) {
684 		DRM_ERROR("called with no initialization\n");
685 		return -EINVAL;
686 	}
687 
688 	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
689 
690 	return 0;
691 }
692 
693 /**
694  * Schedule buffer swap at given vertical blank.
695  */
696 int i915_vblank_swap(struct drm_device *dev, void *data,
697 		     struct drm_file *file_priv)
698 {
699 	drm_i915_private_t *dev_priv = dev->dev_private;
700 	drm_i915_vblank_swap_t *swap = data;
701 	drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
702 	unsigned int pipe, seqtype, curseq;
703 	unsigned long irqflags;
704 	struct list_head *list;
705 	int ret;
706 
707 	if (!dev_priv || !dev_priv->sarea_priv) {
708 		DRM_ERROR("%s called with no initialization\n", __func__);
709 		return -EINVAL;
710 	}
711 
712 	if (dev_priv->sarea_priv->rotation) {
713 		DRM_DEBUG("Rotation not supported\n");
714 		return -EINVAL;
715 	}
716 
717 	if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
718 			     _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
719 		DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
720 		return -EINVAL;
721 	}
722 
723 	pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
724 
725 	seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
726 
727 	if (!(dev_priv->vblank_pipe & (1 << pipe))) {
728 		DRM_ERROR("Invalid pipe %d\n", pipe);
729 		return -EINVAL;
730 	}
731 
732 	spin_lock_irqsave(&dev->drw_lock, irqflags);
733 
734 	if (!drm_get_drawable_info(dev, swap->drawable)) {
735 		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
736 		DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
737 		return -EINVAL;
738 	}
739 
740 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
741 
742 	/*
743 	 * We take the ref here and put it when the swap actually completes
744 	 * in the tasklet.
745 	 */
746 	ret = drm_vblank_get(dev, pipe);
747 	if (ret)
748 		return ret;
749 	curseq = drm_vblank_count(dev, pipe);
750 
751 	if (seqtype == _DRM_VBLANK_RELATIVE)
752 		swap->sequence += curseq;
753 
754 	if ((curseq - swap->sequence) <= (1<<23)) {
755 		if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
756 			swap->sequence = curseq + 1;
757 		} else {
758 			DRM_DEBUG("Missed target sequence\n");
759 			drm_vblank_put(dev, pipe);
760 			return -EINVAL;
761 		}
762 	}
763 
764 	vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
765 
766 	if (!vbl_swap) {
767 		DRM_ERROR("Failed to allocate memory to queue swap\n");
768 		drm_vblank_put(dev, pipe);
769 		return -ENOMEM;
770 	}
771 
772 	vbl_swap->drw_id = swap->drawable;
773 	vbl_swap->pipe = pipe;
774 	vbl_swap->sequence = swap->sequence;
775 
776 	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
777 
778 	list_for_each(list, &dev_priv->vbl_swaps.head) {
779 		vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
780 
781 		if (vbl_old->drw_id == swap->drawable &&
782 		    vbl_old->pipe == pipe &&
783 		    vbl_old->sequence == swap->sequence) {
784 			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
785 			drm_vblank_put(dev, pipe);
786 			drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
787 			DRM_DEBUG("Already scheduled\n");
788 			return 0;
789 		}
790 	}
791 
792 	if (dev_priv->swaps_pending >= 10) {
793 		DRM_DEBUG("Too many swaps queued\n");
794 		DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
795 			  drm_vblank_count(dev, 0),
796 			  drm_vblank_count(dev, 1));
797 
798 		list_for_each(list, &dev_priv->vbl_swaps.head) {
799 			vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
800 			DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
801 				  vbl_old->drw_id, vbl_old->pipe,
802 				  vbl_old->sequence);
803 		}
804 		spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
805 		drm_vblank_put(dev, pipe);
806 		drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
807 		return -EBUSY;
808 	}
809 
810 	list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
811 	dev_priv->swaps_pending++;
812 
813 	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
814 
815 	return 0;
816 }
817 
818 /* drm_dma.h hooks
819 */
820 void i915_driver_irq_preinstall(struct drm_device * dev)
821 {
822 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
823 
824 	I915_WRITE(HWSTAM, 0xeffe);
825 	I915_WRITE(IMR, 0xffffffff);
826 	I915_WRITE(IER, 0x0);
827 }
828 
829 int i915_driver_irq_postinstall(struct drm_device *dev)
830 {
831 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
832 	int ret, num_pipes = 2;
833 
834 	spin_lock_init(&dev_priv->swaps_lock);
835 	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
836 	INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
837 	dev_priv->swaps_pending = 0;
838 
839 	/* Set initial unmasked IRQs to just the selected vblank pipes. */
840 	dev_priv->irq_mask_reg = ~0;
841 
842 	ret = drm_vblank_init(dev, num_pipes);
843 	if (ret)
844 		return ret;
845 
846 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
847 	dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
848 	dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
849 
850 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
851 
852 	dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
853 
854 	I915_WRITE(IMR, dev_priv->irq_mask_reg);
855 	I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
856 	(void) I915_READ(IER);
857 
858 	opregion_enable_asle(dev);
859 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
860 
861 	return 0;
862 }
863 
864 void i915_driver_irq_uninstall(struct drm_device * dev)
865 {
866 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
867 	u32 temp;
868 
869 	if (!dev_priv)
870 		return;
871 
872 	dev_priv->vblank_pipe = 0;
873 
874 	I915_WRITE(HWSTAM, 0xffffffff);
875 	I915_WRITE(IMR, 0xffffffff);
876 	I915_WRITE(IER, 0x0);
877 
878 	temp = I915_READ(PIPEASTAT);
879 	I915_WRITE(PIPEASTAT, temp);
880 	temp = I915_READ(PIPEBSTAT);
881 	I915_WRITE(PIPEBSTAT, temp);
882 	temp = I915_READ(IIR);
883 	I915_WRITE(IIR, temp);
884 }
885