xref: /linux/drivers/gpu/drm/i915/i915_irq.c (revision cc4589ebfae6f8dbb5cf880a0a67eedab3416492)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <linux/sysrq.h>
30 #include <linux/slab.h>
31 #include "drmP.h"
32 #include "drm.h"
33 #include "i915_drm.h"
34 #include "i915_drv.h"
35 #include "i915_trace.h"
36 #include "intel_drv.h"
37 
38 #define MAX_NOPID ((u32)~0)
39 
40 /**
41  * Interrupts that are always left unmasked.
42  *
43  * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44  * we leave them always unmasked in IMR and then control enabling them through
45  * PIPESTAT alone.
46  */
47 #define I915_INTERRUPT_ENABLE_FIX			\
48 	(I915_ASLE_INTERRUPT |				\
49 	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
50 	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
51 	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
52 	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
53 	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
54 
55 /** Interrupts that we mask and unmask at runtime. */
56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
57 
58 #define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 				 PIPE_VBLANK_INTERRUPT_STATUS)
60 
61 #define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 				 PIPE_VBLANK_INTERRUPT_ENABLE)
63 
64 #define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
65 					 DRM_I915_VBLANK_PIPE_B)
66 
67 void
68 ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
69 {
70 	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
71 		dev_priv->gt_irq_mask_reg &= ~mask;
72 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
73 		(void) I915_READ(GTIMR);
74 	}
75 }
76 
77 void
78 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79 {
80 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
81 		dev_priv->gt_irq_mask_reg |= mask;
82 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
83 		(void) I915_READ(GTIMR);
84 	}
85 }
86 
87 /* For display hotplug interrupt */
88 void
89 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90 {
91 	if ((dev_priv->irq_mask_reg & mask) != 0) {
92 		dev_priv->irq_mask_reg &= ~mask;
93 		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
94 		(void) I915_READ(DEIMR);
95 	}
96 }
97 
98 static inline void
99 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
100 {
101 	if ((dev_priv->irq_mask_reg & mask) != mask) {
102 		dev_priv->irq_mask_reg |= mask;
103 		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
104 		(void) I915_READ(DEIMR);
105 	}
106 }
107 
108 void
109 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
110 {
111 	if ((dev_priv->irq_mask_reg & mask) != 0) {
112 		dev_priv->irq_mask_reg &= ~mask;
113 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
114 		(void) I915_READ(IMR);
115 	}
116 }
117 
118 void
119 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120 {
121 	if ((dev_priv->irq_mask_reg & mask) != mask) {
122 		dev_priv->irq_mask_reg |= mask;
123 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
124 		(void) I915_READ(IMR);
125 	}
126 }
127 
128 static inline u32
129 i915_pipestat(int pipe)
130 {
131 	if (pipe == 0)
132 		return PIPEASTAT;
133 	if (pipe == 1)
134 		return PIPEBSTAT;
135 	BUG();
136 }
137 
138 void
139 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
140 {
141 	if ((dev_priv->pipestat[pipe] & mask) != mask) {
142 		u32 reg = i915_pipestat(pipe);
143 
144 		dev_priv->pipestat[pipe] |= mask;
145 		/* Enable the interrupt, clear any pending status */
146 		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
147 		(void) I915_READ(reg);
148 	}
149 }
150 
151 void
152 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
153 {
154 	if ((dev_priv->pipestat[pipe] & mask) != 0) {
155 		u32 reg = i915_pipestat(pipe);
156 
157 		dev_priv->pipestat[pipe] &= ~mask;
158 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
159 		(void) I915_READ(reg);
160 	}
161 }
162 
163 /**
164  * intel_enable_asle - enable ASLE interrupt for OpRegion
165  */
166 void intel_enable_asle (struct drm_device *dev)
167 {
168 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
169 
170 	if (HAS_PCH_SPLIT(dev))
171 		ironlake_enable_display_irq(dev_priv, DE_GSE);
172 	else {
173 		i915_enable_pipestat(dev_priv, 1,
174 				     PIPE_LEGACY_BLC_EVENT_ENABLE);
175 		if (IS_I965G(dev))
176 			i915_enable_pipestat(dev_priv, 0,
177 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
178 	}
179 }
180 
181 /**
182  * i915_pipe_enabled - check if a pipe is enabled
183  * @dev: DRM device
184  * @pipe: pipe to check
185  *
186  * Reading certain registers when the pipe is disabled can hang the chip.
187  * Use this routine to make sure the PLL is running and the pipe is active
188  * before reading such registers if unsure.
189  */
190 static int
191 i915_pipe_enabled(struct drm_device *dev, int pipe)
192 {
193 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
195 
196 	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
197 		return 1;
198 
199 	return 0;
200 }
201 
202 /* Called from drm generic code, passed a 'crtc', which
203  * we use as a pipe index
204  */
205 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
206 {
207 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
208 	unsigned long high_frame;
209 	unsigned long low_frame;
210 	u32 high1, high2, low, count;
211 
212 	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
213 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
214 
215 	if (!i915_pipe_enabled(dev, pipe)) {
216 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
217 				"pipe %d\n", pipe);
218 		return 0;
219 	}
220 
221 	/*
222 	 * High & low register fields aren't synchronized, so make sure
223 	 * we get a low value that's stable across two reads of the high
224 	 * register.
225 	 */
226 	do {
227 		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
228 			 PIPE_FRAME_HIGH_SHIFT);
229 		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
230 			PIPE_FRAME_LOW_SHIFT);
231 		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
232 			 PIPE_FRAME_HIGH_SHIFT);
233 	} while (high1 != high2);
234 
235 	count = (high1 << 8) | low;
236 
237 	return count;
238 }
239 
240 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
241 {
242 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
244 
245 	if (!i915_pipe_enabled(dev, pipe)) {
246 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
247 					"pipe %d\n", pipe);
248 		return 0;
249 	}
250 
251 	return I915_READ(reg);
252 }
253 
254 /*
255  * Handle hotplug events outside the interrupt handler proper.
256  */
257 static void i915_hotplug_work_func(struct work_struct *work)
258 {
259 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
260 						    hotplug_work);
261 	struct drm_device *dev = dev_priv->dev;
262 	struct drm_mode_config *mode_config = &dev->mode_config;
263 	struct drm_encoder *encoder;
264 
265 	if (mode_config->num_encoder) {
266 		list_for_each_entry(encoder, &mode_config->encoder_list, head) {
267 			struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
268 
269 			if (intel_encoder->hot_plug)
270 				(*intel_encoder->hot_plug) (intel_encoder);
271 		}
272 	}
273 	/* Just fire off a uevent and let userspace tell us what to do */
274 	drm_helper_hpd_irq_event(dev);
275 }
276 
277 static void i915_handle_rps_change(struct drm_device *dev)
278 {
279 	drm_i915_private_t *dev_priv = dev->dev_private;
280 	u32 busy_up, busy_down, max_avg, min_avg;
281 	u8 new_delay = dev_priv->cur_delay;
282 
283 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
284 	busy_up = I915_READ(RCPREVBSYTUPAVG);
285 	busy_down = I915_READ(RCPREVBSYTDNAVG);
286 	max_avg = I915_READ(RCBMAXAVG);
287 	min_avg = I915_READ(RCBMINAVG);
288 
289 	/* Handle RCS change request from hw */
290 	if (busy_up > max_avg) {
291 		if (dev_priv->cur_delay != dev_priv->max_delay)
292 			new_delay = dev_priv->cur_delay - 1;
293 		if (new_delay < dev_priv->max_delay)
294 			new_delay = dev_priv->max_delay;
295 	} else if (busy_down < min_avg) {
296 		if (dev_priv->cur_delay != dev_priv->min_delay)
297 			new_delay = dev_priv->cur_delay + 1;
298 		if (new_delay > dev_priv->min_delay)
299 			new_delay = dev_priv->min_delay;
300 	}
301 
302 	if (ironlake_set_drps(dev, new_delay))
303 		dev_priv->cur_delay = new_delay;
304 
305 	return;
306 }
307 
308 irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 {
310 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311 	int ret = IRQ_NONE;
312 	u32 de_iir, gt_iir, de_ier, pch_iir;
313 	struct drm_i915_master_private *master_priv;
314 	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
315 
316 	/* disable master interrupt before clearing iir  */
317 	de_ier = I915_READ(DEIER);
318 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
319 	(void)I915_READ(DEIER);
320 
321 	de_iir = I915_READ(DEIIR);
322 	gt_iir = I915_READ(GTIIR);
323 	pch_iir = I915_READ(SDEIIR);
324 
325 	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
326 		goto done;
327 
328 	ret = IRQ_HANDLED;
329 
330 	if (dev->primary->master) {
331 		master_priv = dev->primary->master->driver_priv;
332 		if (master_priv->sarea_priv)
333 			master_priv->sarea_priv->last_dispatch =
334 				READ_BREADCRUMB(dev_priv);
335 	}
336 
337 	if (gt_iir & GT_PIPE_NOTIFY) {
338 		u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
339 		render_ring->irq_gem_seqno = seqno;
340 		trace_i915_gem_request_complete(dev, seqno);
341 		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
342 		dev_priv->hangcheck_count = 0;
343 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
344 	}
345 	if (gt_iir & GT_BSD_USER_INTERRUPT)
346 		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
347 
348 
349 	if (de_iir & DE_GSE)
350 		ironlake_opregion_gse_intr(dev);
351 
352 	if (de_iir & DE_PLANEA_FLIP_DONE) {
353 		intel_prepare_page_flip(dev, 0);
354 		intel_finish_page_flip(dev, 0);
355 	}
356 
357 	if (de_iir & DE_PLANEB_FLIP_DONE) {
358 		intel_prepare_page_flip(dev, 1);
359 		intel_finish_page_flip(dev, 1);
360 	}
361 
362 	if (de_iir & DE_PIPEA_VBLANK)
363 		drm_handle_vblank(dev, 0);
364 
365 	if (de_iir & DE_PIPEB_VBLANK)
366 		drm_handle_vblank(dev, 1);
367 
368 	/* check event from PCH */
369 	if ((de_iir & DE_PCH_EVENT) &&
370 	    (pch_iir & SDE_HOTPLUG_MASK)) {
371 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
372 	}
373 
374 	if (de_iir & DE_PCU_EVENT) {
375 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
376 		i915_handle_rps_change(dev);
377 	}
378 
379 	/* should clear PCH hotplug event before clear CPU irq */
380 	I915_WRITE(SDEIIR, pch_iir);
381 	I915_WRITE(GTIIR, gt_iir);
382 	I915_WRITE(DEIIR, de_iir);
383 
384 done:
385 	I915_WRITE(DEIER, de_ier);
386 	(void)I915_READ(DEIER);
387 
388 	return ret;
389 }
390 
391 /**
392  * i915_error_work_func - do process context error handling work
393  * @work: work struct
394  *
395  * Fire an error uevent so userspace can see that a hang or error
396  * was detected.
397  */
398 static void i915_error_work_func(struct work_struct *work)
399 {
400 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
401 						    error_work);
402 	struct drm_device *dev = dev_priv->dev;
403 	char *error_event[] = { "ERROR=1", NULL };
404 	char *reset_event[] = { "RESET=1", NULL };
405 	char *reset_done_event[] = { "ERROR=0", NULL };
406 
407 	DRM_DEBUG_DRIVER("generating error event\n");
408 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
409 
410 	if (atomic_read(&dev_priv->mm.wedged)) {
411 		if (IS_I965G(dev)) {
412 			DRM_DEBUG_DRIVER("resetting chip\n");
413 			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
414 			if (!i965_reset(dev, GDRST_RENDER)) {
415 				atomic_set(&dev_priv->mm.wedged, 0);
416 				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
417 			}
418 		} else {
419 			DRM_DEBUG_DRIVER("reboot required\n");
420 		}
421 	}
422 }
423 
424 static struct drm_i915_error_object *
425 i915_error_object_create(struct drm_device *dev,
426 			 struct drm_gem_object *src)
427 {
428 	struct drm_i915_error_object *dst;
429 	struct drm_i915_gem_object *src_priv;
430 	int page, page_count;
431 
432 	if (src == NULL)
433 		return NULL;
434 
435 	src_priv = to_intel_bo(src);
436 	if (src_priv->pages == NULL)
437 		return NULL;
438 
439 	page_count = src->size / PAGE_SIZE;
440 
441 	dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
442 	if (dst == NULL)
443 		return NULL;
444 
445 	for (page = 0; page < page_count; page++) {
446 		void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
447 		unsigned long flags;
448 
449 		if (d == NULL)
450 			goto unwind;
451 		local_irq_save(flags);
452 		s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
453 		memcpy(d, s, PAGE_SIZE);
454 		kunmap_atomic(s, KM_IRQ0);
455 		local_irq_restore(flags);
456 		dst->pages[page] = d;
457 	}
458 	dst->page_count = page_count;
459 	dst->gtt_offset = src_priv->gtt_offset;
460 
461 	return dst;
462 
463 unwind:
464 	while (page--)
465 		kfree(dst->pages[page]);
466 	kfree(dst);
467 	return NULL;
468 }
469 
470 static void
471 i915_error_object_free(struct drm_i915_error_object *obj)
472 {
473 	int page;
474 
475 	if (obj == NULL)
476 		return;
477 
478 	for (page = 0; page < obj->page_count; page++)
479 		kfree(obj->pages[page]);
480 
481 	kfree(obj);
482 }
483 
484 static void
485 i915_error_state_free(struct drm_device *dev,
486 		      struct drm_i915_error_state *error)
487 {
488 	i915_error_object_free(error->batchbuffer[0]);
489 	i915_error_object_free(error->batchbuffer[1]);
490 	i915_error_object_free(error->ringbuffer);
491 	kfree(error->active_bo);
492 	kfree(error);
493 }
494 
495 static u32
496 i915_get_bbaddr(struct drm_device *dev, u32 *ring)
497 {
498 	u32 cmd;
499 
500 	if (IS_I830(dev) || IS_845G(dev))
501 		cmd = MI_BATCH_BUFFER;
502 	else if (IS_I965G(dev))
503 		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
504 		       MI_BATCH_NON_SECURE_I965);
505 	else
506 		cmd = (MI_BATCH_BUFFER_START | (2 << 6));
507 
508 	return ring[0] == cmd ? ring[1] : 0;
509 }
510 
511 static u32
512 i915_ringbuffer_last_batch(struct drm_device *dev)
513 {
514 	struct drm_i915_private *dev_priv = dev->dev_private;
515 	u32 head, bbaddr;
516 	u32 *ring;
517 
518 	/* Locate the current position in the ringbuffer and walk back
519 	 * to find the most recently dispatched batch buffer.
520 	 */
521 	bbaddr = 0;
522 	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
523 	ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
524 
525 	while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
526 		bbaddr = i915_get_bbaddr(dev, ring);
527 		if (bbaddr)
528 			break;
529 	}
530 
531 	if (bbaddr == 0) {
532 		ring = (u32 *)(dev_priv->render_ring.virtual_start
533 				+ dev_priv->render_ring.size);
534 		while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
535 			bbaddr = i915_get_bbaddr(dev, ring);
536 			if (bbaddr)
537 				break;
538 		}
539 	}
540 
541 	return bbaddr;
542 }
543 
544 /**
545  * i915_capture_error_state - capture an error record for later analysis
546  * @dev: drm device
547  *
548  * Should be called when an error is detected (either a hang or an error
549  * interrupt) to capture error state from the time of the error.  Fills
550  * out a structure which becomes available in debugfs for user level tools
551  * to pick up.
552  */
553 static void i915_capture_error_state(struct drm_device *dev)
554 {
555 	struct drm_i915_private *dev_priv = dev->dev_private;
556 	struct drm_i915_gem_object *obj_priv;
557 	struct drm_i915_error_state *error;
558 	struct drm_gem_object *batchbuffer[2];
559 	unsigned long flags;
560 	u32 bbaddr;
561 	int count;
562 
563 	spin_lock_irqsave(&dev_priv->error_lock, flags);
564 	error = dev_priv->first_error;
565 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
566 	if (error)
567 		return;
568 
569 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
570 	if (!error) {
571 		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
572 		return;
573 	}
574 
575 	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
576 	error->eir = I915_READ(EIR);
577 	error->pgtbl_er = I915_READ(PGTBL_ER);
578 	error->pipeastat = I915_READ(PIPEASTAT);
579 	error->pipebstat = I915_READ(PIPEBSTAT);
580 	error->instpm = I915_READ(INSTPM);
581 	if (!IS_I965G(dev)) {
582 		error->ipeir = I915_READ(IPEIR);
583 		error->ipehr = I915_READ(IPEHR);
584 		error->instdone = I915_READ(INSTDONE);
585 		error->acthd = I915_READ(ACTHD);
586 		error->bbaddr = 0;
587 	} else {
588 		error->ipeir = I915_READ(IPEIR_I965);
589 		error->ipehr = I915_READ(IPEHR_I965);
590 		error->instdone = I915_READ(INSTDONE_I965);
591 		error->instps = I915_READ(INSTPS);
592 		error->instdone1 = I915_READ(INSTDONE1);
593 		error->acthd = I915_READ(ACTHD_I965);
594 		error->bbaddr = I915_READ64(BB_ADDR);
595 	}
596 
597 	bbaddr = i915_ringbuffer_last_batch(dev);
598 
599 	/* Grab the current batchbuffer, most likely to have crashed. */
600 	batchbuffer[0] = NULL;
601 	batchbuffer[1] = NULL;
602 	count = 0;
603 	list_for_each_entry(obj_priv,
604 			&dev_priv->render_ring.active_list, list) {
605 
606 		struct drm_gem_object *obj = &obj_priv->base;
607 
608 		if (batchbuffer[0] == NULL &&
609 		    bbaddr >= obj_priv->gtt_offset &&
610 		    bbaddr < obj_priv->gtt_offset + obj->size)
611 			batchbuffer[0] = obj;
612 
613 		if (batchbuffer[1] == NULL &&
614 		    error->acthd >= obj_priv->gtt_offset &&
615 		    error->acthd < obj_priv->gtt_offset + obj->size &&
616 		    batchbuffer[0] != obj)
617 			batchbuffer[1] = obj;
618 
619 		count++;
620 	}
621 
622 	/* We need to copy these to an anonymous buffer as the simplest
623 	 * method to avoid being overwritten by userpace.
624 	 */
625 	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
626 	error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
627 
628 	/* Record the ringbuffer */
629 	error->ringbuffer = i915_error_object_create(dev,
630 			dev_priv->render_ring.gem_object);
631 
632 	/* Record buffers on the active list. */
633 	error->active_bo = NULL;
634 	error->active_bo_count = 0;
635 
636 	if (count)
637 		error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
638 					   GFP_ATOMIC);
639 
640 	if (error->active_bo) {
641 		int i = 0;
642 		list_for_each_entry(obj_priv,
643 				&dev_priv->render_ring.active_list, list) {
644 			struct drm_gem_object *obj = &obj_priv->base;
645 
646 			error->active_bo[i].size = obj->size;
647 			error->active_bo[i].name = obj->name;
648 			error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
649 			error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
650 			error->active_bo[i].read_domains = obj->read_domains;
651 			error->active_bo[i].write_domain = obj->write_domain;
652 			error->active_bo[i].fence_reg = obj_priv->fence_reg;
653 			error->active_bo[i].pinned = 0;
654 			if (obj_priv->pin_count > 0)
655 				error->active_bo[i].pinned = 1;
656 			if (obj_priv->user_pin_count > 0)
657 				error->active_bo[i].pinned = -1;
658 			error->active_bo[i].tiling = obj_priv->tiling_mode;
659 			error->active_bo[i].dirty = obj_priv->dirty;
660 			error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
661 
662 			if (++i == count)
663 				break;
664 		}
665 		error->active_bo_count = i;
666 	}
667 
668 	do_gettimeofday(&error->time);
669 
670 	spin_lock_irqsave(&dev_priv->error_lock, flags);
671 	if (dev_priv->first_error == NULL) {
672 		dev_priv->first_error = error;
673 		error = NULL;
674 	}
675 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
676 
677 	if (error)
678 		i915_error_state_free(dev, error);
679 }
680 
681 void i915_destroy_error_state(struct drm_device *dev)
682 {
683 	struct drm_i915_private *dev_priv = dev->dev_private;
684 	struct drm_i915_error_state *error;
685 
686 	spin_lock(&dev_priv->error_lock);
687 	error = dev_priv->first_error;
688 	dev_priv->first_error = NULL;
689 	spin_unlock(&dev_priv->error_lock);
690 
691 	if (error)
692 		i915_error_state_free(dev, error);
693 }
694 
695 static void i915_report_and_clear_eir(struct drm_device *dev)
696 {
697 	struct drm_i915_private *dev_priv = dev->dev_private;
698 	u32 eir = I915_READ(EIR);
699 
700 	if (!eir)
701 		return;
702 
703 	printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
704 	       eir);
705 
706 	if (IS_G4X(dev)) {
707 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
708 			u32 ipeir = I915_READ(IPEIR_I965);
709 
710 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
711 			       I915_READ(IPEIR_I965));
712 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
713 			       I915_READ(IPEHR_I965));
714 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
715 			       I915_READ(INSTDONE_I965));
716 			printk(KERN_ERR "  INSTPS: 0x%08x\n",
717 			       I915_READ(INSTPS));
718 			printk(KERN_ERR "  INSTDONE1: 0x%08x\n",
719 			       I915_READ(INSTDONE1));
720 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
721 			       I915_READ(ACTHD_I965));
722 			I915_WRITE(IPEIR_I965, ipeir);
723 			(void)I915_READ(IPEIR_I965);
724 		}
725 		if (eir & GM45_ERROR_PAGE_TABLE) {
726 			u32 pgtbl_err = I915_READ(PGTBL_ER);
727 			printk(KERN_ERR "page table error\n");
728 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
729 			       pgtbl_err);
730 			I915_WRITE(PGTBL_ER, pgtbl_err);
731 			(void)I915_READ(PGTBL_ER);
732 		}
733 	}
734 
735 	if (IS_I9XX(dev)) {
736 		if (eir & I915_ERROR_PAGE_TABLE) {
737 			u32 pgtbl_err = I915_READ(PGTBL_ER);
738 			printk(KERN_ERR "page table error\n");
739 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
740 			       pgtbl_err);
741 			I915_WRITE(PGTBL_ER, pgtbl_err);
742 			(void)I915_READ(PGTBL_ER);
743 		}
744 	}
745 
746 	if (eir & I915_ERROR_MEMORY_REFRESH) {
747 		u32 pipea_stats = I915_READ(PIPEASTAT);
748 		u32 pipeb_stats = I915_READ(PIPEBSTAT);
749 
750 		printk(KERN_ERR "memory refresh error\n");
751 		printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
752 		       pipea_stats);
753 		printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
754 		       pipeb_stats);
755 		/* pipestat has already been acked */
756 	}
757 	if (eir & I915_ERROR_INSTRUCTION) {
758 		printk(KERN_ERR "instruction error\n");
759 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
760 		       I915_READ(INSTPM));
761 		if (!IS_I965G(dev)) {
762 			u32 ipeir = I915_READ(IPEIR);
763 
764 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
765 			       I915_READ(IPEIR));
766 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
767 			       I915_READ(IPEHR));
768 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
769 			       I915_READ(INSTDONE));
770 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
771 			       I915_READ(ACTHD));
772 			I915_WRITE(IPEIR, ipeir);
773 			(void)I915_READ(IPEIR);
774 		} else {
775 			u32 ipeir = I915_READ(IPEIR_I965);
776 
777 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
778 			       I915_READ(IPEIR_I965));
779 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
780 			       I915_READ(IPEHR_I965));
781 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
782 			       I915_READ(INSTDONE_I965));
783 			printk(KERN_ERR "  INSTPS: 0x%08x\n",
784 			       I915_READ(INSTPS));
785 			printk(KERN_ERR "  INSTDONE1: 0x%08x\n",
786 			       I915_READ(INSTDONE1));
787 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
788 			       I915_READ(ACTHD_I965));
789 			I915_WRITE(IPEIR_I965, ipeir);
790 			(void)I915_READ(IPEIR_I965);
791 		}
792 	}
793 
794 	I915_WRITE(EIR, eir);
795 	(void)I915_READ(EIR);
796 	eir = I915_READ(EIR);
797 	if (eir) {
798 		/*
799 		 * some errors might have become stuck,
800 		 * mask them.
801 		 */
802 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
803 		I915_WRITE(EMR, I915_READ(EMR) | eir);
804 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
805 	}
806 }
807 
808 /**
809  * i915_handle_error - handle an error interrupt
810  * @dev: drm device
811  *
812  * Do some basic checking of regsiter state at error interrupt time and
813  * dump it to the syslog.  Also call i915_capture_error_state() to make
814  * sure we get a record and make it available in debugfs.  Fire a uevent
815  * so userspace knows something bad happened (should trigger collection
816  * of a ring dump etc.).
817  */
818 static void i915_handle_error(struct drm_device *dev, bool wedged)
819 {
820 	struct drm_i915_private *dev_priv = dev->dev_private;
821 
822 	i915_capture_error_state(dev);
823 	i915_report_and_clear_eir(dev);
824 
825 	if (wedged) {
826 		atomic_set(&dev_priv->mm.wedged, 1);
827 
828 		/*
829 		 * Wakeup waiting processes so they don't hang
830 		 */
831 		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
832 	}
833 
834 	queue_work(dev_priv->wq, &dev_priv->error_work);
835 }
836 
837 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
838 {
839 	struct drm_device *dev = (struct drm_device *) arg;
840 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
841 	struct drm_i915_master_private *master_priv;
842 	u32 iir, new_iir;
843 	u32 pipea_stats, pipeb_stats;
844 	u32 vblank_status;
845 	int vblank = 0;
846 	unsigned long irqflags;
847 	int irq_received;
848 	int ret = IRQ_NONE;
849 	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
850 
851 	atomic_inc(&dev_priv->irq_received);
852 
853 	if (HAS_PCH_SPLIT(dev))
854 		return ironlake_irq_handler(dev);
855 
856 	iir = I915_READ(IIR);
857 
858 	if (IS_I965G(dev))
859 		vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
860 	else
861 		vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
862 
863 	for (;;) {
864 		irq_received = iir != 0;
865 
866 		/* Can't rely on pipestat interrupt bit in iir as it might
867 		 * have been cleared after the pipestat interrupt was received.
868 		 * It doesn't set the bit in iir again, but it still produces
869 		 * interrupts (for non-MSI).
870 		 */
871 		spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
872 		pipea_stats = I915_READ(PIPEASTAT);
873 		pipeb_stats = I915_READ(PIPEBSTAT);
874 
875 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
876 			i915_handle_error(dev, false);
877 
878 		/*
879 		 * Clear the PIPE(A|B)STAT regs before the IIR
880 		 */
881 		if (pipea_stats & 0x8000ffff) {
882 			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
883 				DRM_DEBUG_DRIVER("pipe a underrun\n");
884 			I915_WRITE(PIPEASTAT, pipea_stats);
885 			irq_received = 1;
886 		}
887 
888 		if (pipeb_stats & 0x8000ffff) {
889 			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
890 				DRM_DEBUG_DRIVER("pipe b underrun\n");
891 			I915_WRITE(PIPEBSTAT, pipeb_stats);
892 			irq_received = 1;
893 		}
894 		spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
895 
896 		if (!irq_received)
897 			break;
898 
899 		ret = IRQ_HANDLED;
900 
901 		/* Consume port.  Then clear IIR or we'll miss events */
902 		if ((I915_HAS_HOTPLUG(dev)) &&
903 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
904 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
905 
906 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
907 				  hotplug_status);
908 			if (hotplug_status & dev_priv->hotplug_supported_mask)
909 				queue_work(dev_priv->wq,
910 					   &dev_priv->hotplug_work);
911 
912 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
913 			I915_READ(PORT_HOTPLUG_STAT);
914 		}
915 
916 		I915_WRITE(IIR, iir);
917 		new_iir = I915_READ(IIR); /* Flush posted writes */
918 
919 		if (dev->primary->master) {
920 			master_priv = dev->primary->master->driver_priv;
921 			if (master_priv->sarea_priv)
922 				master_priv->sarea_priv->last_dispatch =
923 					READ_BREADCRUMB(dev_priv);
924 		}
925 
926 		if (iir & I915_USER_INTERRUPT) {
927 			u32 seqno =
928 				render_ring->get_gem_seqno(dev, render_ring);
929 			render_ring->irq_gem_seqno = seqno;
930 			trace_i915_gem_request_complete(dev, seqno);
931 			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
932 			dev_priv->hangcheck_count = 0;
933 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
934 		}
935 
936 		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
937 			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
938 
939 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
940 			intel_prepare_page_flip(dev, 0);
941 			if (dev_priv->flip_pending_is_done)
942 				intel_finish_page_flip_plane(dev, 0);
943 		}
944 
945 		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
946 			intel_prepare_page_flip(dev, 1);
947 			if (dev_priv->flip_pending_is_done)
948 				intel_finish_page_flip_plane(dev, 1);
949 		}
950 
951 		if (pipea_stats & vblank_status) {
952 			vblank++;
953 			drm_handle_vblank(dev, 0);
954 			if (!dev_priv->flip_pending_is_done)
955 				intel_finish_page_flip(dev, 0);
956 		}
957 
958 		if (pipeb_stats & vblank_status) {
959 			vblank++;
960 			drm_handle_vblank(dev, 1);
961 			if (!dev_priv->flip_pending_is_done)
962 				intel_finish_page_flip(dev, 1);
963 		}
964 
965 		if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
966 		    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
967 		    (iir & I915_ASLE_INTERRUPT))
968 			opregion_asle_intr(dev);
969 
970 		/* With MSI, interrupts are only generated when iir
971 		 * transitions from zero to nonzero.  If another bit got
972 		 * set while we were handling the existing iir bits, then
973 		 * we would never get another interrupt.
974 		 *
975 		 * This is fine on non-MSI as well, as if we hit this path
976 		 * we avoid exiting the interrupt handler only to generate
977 		 * another one.
978 		 *
979 		 * Note that for MSI this could cause a stray interrupt report
980 		 * if an interrupt landed in the time between writing IIR and
981 		 * the posting read.  This should be rare enough to never
982 		 * trigger the 99% of 100,000 interrupts test for disabling
983 		 * stray interrupts.
984 		 */
985 		iir = new_iir;
986 	}
987 
988 	return ret;
989 }
990 
991 static int i915_emit_irq(struct drm_device * dev)
992 {
993 	drm_i915_private_t *dev_priv = dev->dev_private;
994 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
995 
996 	i915_kernel_lost_context(dev);
997 
998 	DRM_DEBUG_DRIVER("\n");
999 
1000 	dev_priv->counter++;
1001 	if (dev_priv->counter > 0x7FFFFFFFUL)
1002 		dev_priv->counter = 1;
1003 	if (master_priv->sarea_priv)
1004 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1005 
1006 	BEGIN_LP_RING(4);
1007 	OUT_RING(MI_STORE_DWORD_INDEX);
1008 	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1009 	OUT_RING(dev_priv->counter);
1010 	OUT_RING(MI_USER_INTERRUPT);
1011 	ADVANCE_LP_RING();
1012 
1013 	return dev_priv->counter;
1014 }
1015 
1016 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1017 {
1018 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1019 	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1020 
1021 	if (dev_priv->trace_irq_seqno == 0)
1022 		render_ring->user_irq_get(dev, render_ring);
1023 
1024 	dev_priv->trace_irq_seqno = seqno;
1025 }
1026 
1027 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1028 {
1029 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1030 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1031 	int ret = 0;
1032 	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1033 
1034 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1035 		  READ_BREADCRUMB(dev_priv));
1036 
1037 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1038 		if (master_priv->sarea_priv)
1039 			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1040 		return 0;
1041 	}
1042 
1043 	if (master_priv->sarea_priv)
1044 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1045 
1046 	render_ring->user_irq_get(dev, render_ring);
1047 	DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
1048 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
1049 	render_ring->user_irq_put(dev, render_ring);
1050 
1051 	if (ret == -EBUSY) {
1052 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1053 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1054 	}
1055 
1056 	return ret;
1057 }
1058 
1059 /* Needs the lock as it touches the ring.
1060  */
1061 int i915_irq_emit(struct drm_device *dev, void *data,
1062 			 struct drm_file *file_priv)
1063 {
1064 	drm_i915_private_t *dev_priv = dev->dev_private;
1065 	drm_i915_irq_emit_t *emit = data;
1066 	int result;
1067 
1068 	if (!dev_priv || !dev_priv->render_ring.virtual_start) {
1069 		DRM_ERROR("called with no initialization\n");
1070 		return -EINVAL;
1071 	}
1072 
1073 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1074 
1075 	mutex_lock(&dev->struct_mutex);
1076 	result = i915_emit_irq(dev);
1077 	mutex_unlock(&dev->struct_mutex);
1078 
1079 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1080 		DRM_ERROR("copy_to_user\n");
1081 		return -EFAULT;
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 /* Doesn't need the hardware lock.
1088  */
1089 int i915_irq_wait(struct drm_device *dev, void *data,
1090 			 struct drm_file *file_priv)
1091 {
1092 	drm_i915_private_t *dev_priv = dev->dev_private;
1093 	drm_i915_irq_wait_t *irqwait = data;
1094 
1095 	if (!dev_priv) {
1096 		DRM_ERROR("called with no initialization\n");
1097 		return -EINVAL;
1098 	}
1099 
1100 	return i915_wait_irq(dev, irqwait->irq_seq);
1101 }
1102 
1103 /* Called from drm generic code, passed 'crtc' which
1104  * we use as a pipe index
1105  */
1106 int i915_enable_vblank(struct drm_device *dev, int pipe)
1107 {
1108 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1109 	unsigned long irqflags;
1110 	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1111 	u32 pipeconf;
1112 
1113 	pipeconf = I915_READ(pipeconf_reg);
1114 	if (!(pipeconf & PIPEACONF_ENABLE))
1115 		return -EINVAL;
1116 
1117 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1118 	if (HAS_PCH_SPLIT(dev))
1119 		ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1120 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1121 	else if (IS_I965G(dev))
1122 		i915_enable_pipestat(dev_priv, pipe,
1123 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1124 	else
1125 		i915_enable_pipestat(dev_priv, pipe,
1126 				     PIPE_VBLANK_INTERRUPT_ENABLE);
1127 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1128 	return 0;
1129 }
1130 
1131 /* Called from drm generic code, passed 'crtc' which
1132  * we use as a pipe index
1133  */
1134 void i915_disable_vblank(struct drm_device *dev, int pipe)
1135 {
1136 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1137 	unsigned long irqflags;
1138 
1139 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1140 	if (HAS_PCH_SPLIT(dev))
1141 		ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1142 					     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1143 	else
1144 		i915_disable_pipestat(dev_priv, pipe,
1145 				      PIPE_VBLANK_INTERRUPT_ENABLE |
1146 				      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1147 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1148 }
1149 
1150 void i915_enable_interrupt (struct drm_device *dev)
1151 {
1152 	struct drm_i915_private *dev_priv = dev->dev_private;
1153 
1154 	if (!HAS_PCH_SPLIT(dev))
1155 		opregion_enable_asle(dev);
1156 	dev_priv->irq_enabled = 1;
1157 }
1158 
1159 
1160 /* Set the vblank monitor pipe
1161  */
1162 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1163 			 struct drm_file *file_priv)
1164 {
1165 	drm_i915_private_t *dev_priv = dev->dev_private;
1166 
1167 	if (!dev_priv) {
1168 		DRM_ERROR("called with no initialization\n");
1169 		return -EINVAL;
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1176 			 struct drm_file *file_priv)
1177 {
1178 	drm_i915_private_t *dev_priv = dev->dev_private;
1179 	drm_i915_vblank_pipe_t *pipe = data;
1180 
1181 	if (!dev_priv) {
1182 		DRM_ERROR("called with no initialization\n");
1183 		return -EINVAL;
1184 	}
1185 
1186 	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1187 
1188 	return 0;
1189 }
1190 
1191 /**
1192  * Schedule buffer swap at given vertical blank.
1193  */
1194 int i915_vblank_swap(struct drm_device *dev, void *data,
1195 		     struct drm_file *file_priv)
1196 {
1197 	/* The delayed swap mechanism was fundamentally racy, and has been
1198 	 * removed.  The model was that the client requested a delayed flip/swap
1199 	 * from the kernel, then waited for vblank before continuing to perform
1200 	 * rendering.  The problem was that the kernel might wake the client
1201 	 * up before it dispatched the vblank swap (since the lock has to be
1202 	 * held while touching the ringbuffer), in which case the client would
1203 	 * clear and start the next frame before the swap occurred, and
1204 	 * flicker would occur in addition to likely missing the vblank.
1205 	 *
1206 	 * In the absence of this ioctl, userland falls back to a correct path
1207 	 * of waiting for a vblank, then dispatching the swap on its own.
1208 	 * Context switching to userland and back is plenty fast enough for
1209 	 * meeting the requirements of vblank swapping.
1210 	 */
1211 	return -EINVAL;
1212 }
1213 
1214 struct drm_i915_gem_request *
1215 i915_get_tail_request(struct drm_device *dev)
1216 {
1217 	drm_i915_private_t *dev_priv = dev->dev_private;
1218 	return list_entry(dev_priv->render_ring.request_list.prev,
1219 			struct drm_i915_gem_request, list);
1220 }
1221 
1222 /**
1223  * This is called when the chip hasn't reported back with completed
1224  * batchbuffers in a long time. The first time this is called we simply record
1225  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1226  * again, we assume the chip is wedged and try to fix it.
1227  */
1228 void i915_hangcheck_elapsed(unsigned long data)
1229 {
1230 	struct drm_device *dev = (struct drm_device *)data;
1231 	drm_i915_private_t *dev_priv = dev->dev_private;
1232 	uint32_t acthd, instdone, instdone1;
1233 
1234 	/* No reset support on this chip yet. */
1235 	if (IS_GEN6(dev))
1236 		return;
1237 
1238 	if (!IS_I965G(dev)) {
1239 		acthd = I915_READ(ACTHD);
1240 		instdone = I915_READ(INSTDONE);
1241 		instdone1 = 0;
1242 	} else {
1243 		acthd = I915_READ(ACTHD_I965);
1244 		instdone = I915_READ(INSTDONE_I965);
1245 		instdone1 = I915_READ(INSTDONE1);
1246 	}
1247 
1248 	/* If all work is done then ACTHD clearly hasn't advanced. */
1249 	if (list_empty(&dev_priv->render_ring.request_list) ||
1250 		i915_seqno_passed(i915_get_gem_seqno(dev,
1251 				&dev_priv->render_ring),
1252 			i915_get_tail_request(dev)->seqno)) {
1253 		dev_priv->hangcheck_count = 0;
1254 		return;
1255 	}
1256 
1257 	if (dev_priv->last_acthd == acthd &&
1258 	    dev_priv->last_instdone == instdone &&
1259 	    dev_priv->last_instdone1 == instdone1) {
1260 		if (dev_priv->hangcheck_count++ > 1) {
1261 			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1262 			i915_handle_error(dev, true);
1263 			return;
1264 		}
1265 	} else {
1266 		dev_priv->hangcheck_count = 0;
1267 
1268 		dev_priv->last_acthd = acthd;
1269 		dev_priv->last_instdone = instdone;
1270 		dev_priv->last_instdone1 = instdone1;
1271 	}
1272 
1273 	/* Reset timer case chip hangs without another request being added */
1274 	mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1275 }
1276 
1277 /* drm_dma.h hooks
1278 */
1279 static void ironlake_irq_preinstall(struct drm_device *dev)
1280 {
1281 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1282 
1283 	I915_WRITE(HWSTAM, 0xeffe);
1284 
1285 	/* XXX hotplug from PCH */
1286 
1287 	I915_WRITE(DEIMR, 0xffffffff);
1288 	I915_WRITE(DEIER, 0x0);
1289 	(void) I915_READ(DEIER);
1290 
1291 	/* and GT */
1292 	I915_WRITE(GTIMR, 0xffffffff);
1293 	I915_WRITE(GTIER, 0x0);
1294 	(void) I915_READ(GTIER);
1295 
1296 	/* south display irq */
1297 	I915_WRITE(SDEIMR, 0xffffffff);
1298 	I915_WRITE(SDEIER, 0x0);
1299 	(void) I915_READ(SDEIER);
1300 }
1301 
1302 static int ironlake_irq_postinstall(struct drm_device *dev)
1303 {
1304 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1305 	/* enable kind of interrupts always enabled */
1306 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1307 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1308 	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
1309 	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1310 			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1311 
1312 	dev_priv->irq_mask_reg = ~display_mask;
1313 	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1314 
1315 	/* should always can generate irq */
1316 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1317 	I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1318 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1319 	(void) I915_READ(DEIER);
1320 
1321 	/* user interrupt should be enabled, but masked initial */
1322 	dev_priv->gt_irq_mask_reg = ~render_mask;
1323 	dev_priv->gt_irq_enable_reg = render_mask;
1324 
1325 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1326 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1327 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1328 	(void) I915_READ(GTIER);
1329 
1330 	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1331 	dev_priv->pch_irq_enable_reg = hotplug_mask;
1332 
1333 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1334 	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1335 	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1336 	(void) I915_READ(SDEIER);
1337 
1338 	if (IS_IRONLAKE_M(dev)) {
1339 		/* Clear & enable PCU event interrupts */
1340 		I915_WRITE(DEIIR, DE_PCU_EVENT);
1341 		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1342 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 void i915_driver_irq_preinstall(struct drm_device * dev)
1349 {
1350 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1351 
1352 	atomic_set(&dev_priv->irq_received, 0);
1353 
1354 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1355 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1356 
1357 	if (HAS_PCH_SPLIT(dev)) {
1358 		ironlake_irq_preinstall(dev);
1359 		return;
1360 	}
1361 
1362 	if (I915_HAS_HOTPLUG(dev)) {
1363 		I915_WRITE(PORT_HOTPLUG_EN, 0);
1364 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1365 	}
1366 
1367 	I915_WRITE(HWSTAM, 0xeffe);
1368 	I915_WRITE(PIPEASTAT, 0);
1369 	I915_WRITE(PIPEBSTAT, 0);
1370 	I915_WRITE(IMR, 0xffffffff);
1371 	I915_WRITE(IER, 0x0);
1372 	(void) I915_READ(IER);
1373 }
1374 
1375 /*
1376  * Must be called after intel_modeset_init or hotplug interrupts won't be
1377  * enabled correctly.
1378  */
1379 int i915_driver_irq_postinstall(struct drm_device *dev)
1380 {
1381 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1382 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1383 	u32 error_mask;
1384 
1385 	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
1386 
1387 	if (HAS_BSD(dev))
1388 		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
1389 
1390 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1391 
1392 	if (HAS_PCH_SPLIT(dev))
1393 		return ironlake_irq_postinstall(dev);
1394 
1395 	/* Unmask the interrupts that we always want on. */
1396 	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1397 
1398 	dev_priv->pipestat[0] = 0;
1399 	dev_priv->pipestat[1] = 0;
1400 
1401 	if (I915_HAS_HOTPLUG(dev)) {
1402 		/* Enable in IER... */
1403 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1404 		/* and unmask in IMR */
1405 		dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
1406 	}
1407 
1408 	/*
1409 	 * Enable some error detection, note the instruction error mask
1410 	 * bit is reserved, so we leave it masked.
1411 	 */
1412 	if (IS_G4X(dev)) {
1413 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
1414 			       GM45_ERROR_MEM_PRIV |
1415 			       GM45_ERROR_CP_PRIV |
1416 			       I915_ERROR_MEMORY_REFRESH);
1417 	} else {
1418 		error_mask = ~(I915_ERROR_PAGE_TABLE |
1419 			       I915_ERROR_MEMORY_REFRESH);
1420 	}
1421 	I915_WRITE(EMR, error_mask);
1422 
1423 	I915_WRITE(IMR, dev_priv->irq_mask_reg);
1424 	I915_WRITE(IER, enable_mask);
1425 	(void) I915_READ(IER);
1426 
1427 	if (I915_HAS_HOTPLUG(dev)) {
1428 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1429 
1430 		/* Note HDMI and DP share bits */
1431 		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1432 			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1433 		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1434 			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1435 		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1436 			hotplug_en |= HDMID_HOTPLUG_INT_EN;
1437 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1438 			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1439 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1440 			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1441 		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1442 			hotplug_en |= CRT_HOTPLUG_INT_EN;
1443 
1444 			/* Programming the CRT detection parameters tends
1445 			   to generate a spurious hotplug event about three
1446 			   seconds later.  So just do it once.
1447 			*/
1448 			if (IS_G4X(dev))
1449 				hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1450 			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1451 		}
1452 
1453 		/* Ignore TV since it's buggy */
1454 
1455 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1456 	}
1457 
1458 	opregion_enable_asle(dev);
1459 
1460 	return 0;
1461 }
1462 
1463 static void ironlake_irq_uninstall(struct drm_device *dev)
1464 {
1465 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1466 	I915_WRITE(HWSTAM, 0xffffffff);
1467 
1468 	I915_WRITE(DEIMR, 0xffffffff);
1469 	I915_WRITE(DEIER, 0x0);
1470 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1471 
1472 	I915_WRITE(GTIMR, 0xffffffff);
1473 	I915_WRITE(GTIER, 0x0);
1474 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1475 }
1476 
1477 void i915_driver_irq_uninstall(struct drm_device * dev)
1478 {
1479 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1480 
1481 	if (!dev_priv)
1482 		return;
1483 
1484 	dev_priv->vblank_pipe = 0;
1485 
1486 	if (HAS_PCH_SPLIT(dev)) {
1487 		ironlake_irq_uninstall(dev);
1488 		return;
1489 	}
1490 
1491 	if (I915_HAS_HOTPLUG(dev)) {
1492 		I915_WRITE(PORT_HOTPLUG_EN, 0);
1493 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1494 	}
1495 
1496 	I915_WRITE(HWSTAM, 0xffffffff);
1497 	I915_WRITE(PIPEASTAT, 0);
1498 	I915_WRITE(PIPEBSTAT, 0);
1499 	I915_WRITE(IMR, 0xffffffff);
1500 	I915_WRITE(IER, 0x0);
1501 
1502 	I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1503 	I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1504 	I915_WRITE(IIR, I915_READ(IIR));
1505 }
1506