xref: /titanic_50/usr/src/uts/intel/io/drm/i915_drv.c (revision 174bc6499d233e329ecd3d98a880a7b07df16bfa)
1 /* BEGIN CSTYLED */
2 
3 /*
4  * i915_drv.c -- Intel i915 driver -*- linux-c -*-
5  * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
6  */
7 
8 /*
9  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
10  * Copyright (c) 2009, Intel Corporation.
11  * All Rights Reserved.
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the next
21  * paragraph) shall be included in all copies or substantial portions of the
22  * Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
27  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30  * OTHER DEALINGS IN THE SOFTWARE.
31  *
32  * Authors:
33  *    Gareth Hughes <gareth@valinux.com>
34  *
35  */
36 
37 /*
38  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
39  * Use is subject to license terms.
40  */
41 
42 /*
43  * I915 DRM Driver for Solaris
44  *
45  * This driver provides the hardware 3D acceleration support for Intel
46  * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
47  * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
48  * means the kernel device driver in DRI.
49  *
50  * I915 driver is a device dependent driver only, it depends on a misc module
51  * named drm for generic DRM operations.
52  */
53 
54 #include "drmP.h"
55 #include "i915_drm.h"
56 #include "i915_drv.h"
57 #include "drm_pciids.h"
58 
59 /*
60  * copied from vgasubr.h
61  */
62 
63 struct vgaregmap {
64 	uint8_t			*addr;
65 	ddi_acc_handle_t	handle;
66 	boolean_t		mapped;
67 };
68 
69 enum pipe {
70 	PIPE_A = 0,
71 	PIPE_B,
72 };
73 
74 
75 /*
76  * cb_ops entrypoint
77  */
78 extern struct cb_ops drm_cb_ops;
79 
80 /*
81  * module entrypoint
82  */
83 static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
84 static int i915_attach(dev_info_t *, ddi_attach_cmd_t);
85 static int i915_detach(dev_info_t *, ddi_detach_cmd_t);
86 
87 
88 /* drv_PCI_IDs comes from drm_pciids.h */
89 static drm_pci_id_list_t i915_pciidlist[] = {
90 	i915_PCI_IDS
91 };
92 
93 /*
94  * Local routines
95  */
96 static void i915_configure(drm_driver_t *);
97 static int i915_quiesce(dev_info_t *dip);
98 
99 /*
100  * DRM driver
101  */
102 static drm_driver_t	i915_driver = {0};
103 
104 
105 static struct dev_ops i915_dev_ops = {
106 	DEVO_REV,			/* devo_rev */
107 	0,				/* devo_refcnt */
108 	i915_info,			/* devo_getinfo */
109 	nulldev,			/* devo_identify */
110 	nulldev,			/* devo_probe */
111 	i915_attach,			/* devo_attach */
112 	i915_detach,			/* devo_detach */
113 	nodev,				/* devo_reset */
114 	&drm_cb_ops,			/* devo_cb_ops */
115 	NULL,				/* devo_bus_ops */
116 	NULL,				/* power */
117 	i915_quiesce,	/* devo_quiesce */
118 };
119 
120 static struct modldrv modldrv = {
121 	&mod_driverops,			/* drv_modops */
122 	"I915 DRM driver",	/* drv_linkinfo */
123 	&i915_dev_ops,			/* drv_dev_ops */
124 };
125 
126 static struct modlinkage modlinkage = {
127 	MODREV_1, (void *) &modldrv, NULL
128 };
129 
130 static ddi_device_acc_attr_t s3_attr = {
131         DDI_DEVICE_ATTR_V0,
132         DDI_NEVERSWAP_ACC,
133         DDI_STRICTORDER_ACC     /* must be DDI_STRICTORDER_ACC */
134 };
135 
136 /*
137  * softstate head
138  */
139 static void 	*i915_statep;
140 
141 int
142 _init(void)
143 {
144 	int error;
145 
146 	i915_configure(&i915_driver);
147 
148 	if ((error = ddi_soft_state_init(&i915_statep,
149 	    sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0)
150 		return (error);
151 
152 	if ((error = mod_install(&modlinkage)) != 0) {
153 		ddi_soft_state_fini(&i915_statep);
154 		return (error);
155 	}
156 
157 	return (error);
158 
159 }	/* _init() */
160 
161 int
162 _fini(void)
163 {
164 	int error;
165 
166 	if ((error = mod_remove(&modlinkage)) != 0)
167 		return (error);
168 
169 	(void) ddi_soft_state_fini(&i915_statep);
170 
171 	return (0);
172 
173 }	/* _fini() */
174 
175 int
176 _info(struct modinfo *modinfop)
177 {
178 	return (mod_info(&modlinkage, modinfop));
179 
180 }	/* _info() */
181 
182 /*
183  * off range: 0x3b0 ~ 0x3ff
184  */
185 
186 static void
187 vga_reg_put8(struct vgaregmap *regmap, uint16_t off, uint8_t val)
188 {
189 	ASSERT((off >= 0x3b0) && (off <= 0x3ff));
190 
191 	ddi_put8(regmap->handle, regmap->addr + off, val);
192 }
193 
194 /*
195  * off range: 0x3b0 ~ 0x3ff
196  */
197 static uint8_t
198 vga_reg_get8(struct vgaregmap *regmap, uint16_t off)
199 {
200 
201 	ASSERT((off >= 0x3b0) && (off <= 0x3ff));
202 
203 	return (ddi_get8(regmap->handle, regmap->addr + off));
204 }
205 
206 static void
207 i915_write_indexed(struct vgaregmap *regmap,
208     uint16_t index_port, uint16_t data_port, uint8_t index, uint8_t val)
209 {
210 	vga_reg_put8(regmap, index_port, index);
211 	vga_reg_put8(regmap, data_port, val);
212 }
213 
214 static uint8_t
215 i915_read_indexed(struct vgaregmap *regmap,
216     uint16_t index_port, uint16_t data_port, uint8_t index)
217 {
218 	vga_reg_put8(regmap, index_port, index);
219 	return (vga_reg_get8(regmap, data_port));
220 }
221 
222 static void
223 i915_write_ar(struct vgaregmap *regmap, uint16_t st01,
224     uint8_t reg, uint8_t val, uint8_t palette_enable)
225 {
226 	(void) vga_reg_get8(regmap, st01);
227 	vga_reg_put8(regmap, VGA_AR_INDEX, palette_enable | reg);
228 	vga_reg_put8(regmap, VGA_AR_DATA_WRITE, val);
229 }
230 
231 static uint8_t
232 i915_read_ar(struct vgaregmap *regmap, uint16_t st01,
233     uint8_t index, uint8_t palette_enable)
234 {
235 	(void) vga_reg_get8(regmap, st01);
236 	vga_reg_put8(regmap, VGA_AR_INDEX, index | palette_enable);
237 	return (vga_reg_get8(regmap, VGA_AR_DATA_READ));
238 }
239 
240 static int
241 i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
242 {
243 	struct s3_i915_private *s3_priv = dev->s3_private;
244 
245 	if (pipe == PIPE_A)
246 		return (S3_READ(DPLL_A) & DPLL_VCO_ENABLE);
247 	else
248 		return (S3_READ(DPLL_B) & DPLL_VCO_ENABLE);
249 }
250 
251 static void
252 i915_save_palette(struct drm_device *dev, enum pipe pipe)
253 {
254 	struct s3_i915_private *s3_priv = dev->s3_private;
255 	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
256 	uint32_t *array;
257 	int i;
258 
259 	if (!i915_pipe_enabled(dev, pipe))
260 		return;
261 
262 	if (pipe == PIPE_A)
263 		array = s3_priv->save_palette_a;
264 	else
265 		array = s3_priv->save_palette_b;
266 
267 	for(i = 0; i < 256; i++)
268 		array[i] = S3_READ(reg + (i << 2));
269 
270 }
271 
272 static void
273 i915_restore_palette(struct drm_device *dev, enum pipe pipe)
274 {
275 	struct s3_i915_private *s3_priv = dev->s3_private;
276 	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
277 	uint32_t *array;
278 	int i;
279 
280 	if (!i915_pipe_enabled(dev, pipe))
281 		return;
282 
283 	if (pipe == PIPE_A)
284 		array = s3_priv->save_palette_a;
285 	else
286 		array = s3_priv->save_palette_b;
287 
288 	for(i = 0; i < 256; i++)
289 		S3_WRITE(reg + (i << 2), array[i]);
290 }
291 
292 static void
293 i915_save_vga(struct drm_device *dev)
294 {
295 	struct s3_i915_private *s3_priv = dev->s3_private;
296 	int i;
297 	uint16_t cr_index, cr_data, st01;
298 	struct vgaregmap regmap;
299 
300 	regmap.addr = (uint8_t *)s3_priv->saveAddr;
301 	regmap.handle = s3_priv->saveHandle;
302 
303 	/* VGA color palette registers */
304         s3_priv->saveDACMASK = vga_reg_get8(&regmap, VGA_DACMASK);
305 	/* DACCRX automatically increments during read */
306 	vga_reg_put8(&regmap, VGA_DACRX, 0);
307 	/* Read 3 bytes of color data from each index */
308 	for (i = 0; i < 256 * 3; i++)
309 		s3_priv->saveDACDATA[i] = vga_reg_get8(&regmap, VGA_DACDATA);
310 
311 	/* MSR bits */
312 	s3_priv->saveMSR = vga_reg_get8(&regmap, VGA_MSR_READ);
313 	if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
314 		cr_index = VGA_CR_INDEX_CGA;
315 		cr_data = VGA_CR_DATA_CGA;
316 		st01 = VGA_ST01_CGA;
317 	} else {
318 		cr_index = VGA_CR_INDEX_MDA;
319 		cr_data = VGA_CR_DATA_MDA;
320 		st01 = VGA_ST01_MDA;
321 	}
322 
323 	/* CRT controller regs */
324 	i915_write_indexed(&regmap, cr_index, cr_data, 0x11,
325 	    i915_read_indexed(&regmap, cr_index, cr_data, 0x11) & (~0x80));
326 	for (i = 0; i <= 0x24; i++)
327 		s3_priv->saveCR[i] =
328 		    i915_read_indexed(&regmap, cr_index, cr_data, i);
329 	/* Make sure we don't turn off CR group 0 writes */
330 	s3_priv->saveCR[0x11] &= ~0x80;
331 
332 	/* Attribute controller registers */
333 	(void) vga_reg_get8(&regmap, st01);
334 	s3_priv->saveAR_INDEX = vga_reg_get8(&regmap, VGA_AR_INDEX);
335 	for (i = 0; i <= 0x14; i++)
336 		s3_priv->saveAR[i] = i915_read_ar(&regmap, st01, i, 0);
337 	(void) vga_reg_get8(&regmap, st01);
338 	vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX);
339 	(void) vga_reg_get8(&regmap, st01);
340 
341 	/* Graphics controller registers */
342 	for (i = 0; i < 9; i++)
343 		s3_priv->saveGR[i] =
344 		    i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i);
345 
346 	s3_priv->saveGR[0x10] =
347 		i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
348 	s3_priv->saveGR[0x11] =
349 		i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
350 	s3_priv->saveGR[0x18] =
351 		i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
352 
353 	/* Sequencer registers */
354 	for (i = 0; i < 8; i++)
355 		s3_priv->saveSR[i] =
356 		    i915_read_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i);
357 }
358 
359 static void
360 i915_restore_vga(struct drm_device *dev)
361 {
362 	struct s3_i915_private *s3_priv = dev->s3_private;
363 	int i;
364 	uint16_t cr_index, cr_data, st01;
365 	struct vgaregmap regmap;
366 
367 	regmap.addr = (uint8_t *)s3_priv->saveAddr;
368 	regmap.handle = s3_priv->saveHandle;
369 
370 	/*
371 	 * I/O Address Select. This bit selects 3Bxh or 3Dxh as the
372 	 * I/O address for the CRT Controller registers,
373 	 * the Feature Control Register (FCR), and Input Status Register
374 	 * 1 (ST01). Presently ignored (whole range is claimed), but
375 	 * will "ignore" 3Bx for color configuration or 3Dx for monochrome.
376 	 * Note that it is typical in AGP chipsets to shadow this bit
377 	 * and properly steer I/O cycles to the proper bus for operation
378 	 * where a MDA exists on another bus such as ISA.
379 	 * 0 = Select 3Bxh I/O address (MDA emulation) (default).
380 	 * 1 = Select 3Dxh I/O address (CGA emulation).
381 	 */
382 	vga_reg_put8(&regmap, VGA_MSR_WRITE, s3_priv->saveMSR);
383 
384 	if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
385 		cr_index = VGA_CR_INDEX_CGA;
386 		cr_data = VGA_CR_DATA_CGA;
387 		st01 = VGA_ST01_CGA;
388         } else {
389 		cr_index = VGA_CR_INDEX_MDA;
390 		cr_data = VGA_CR_DATA_MDA;
391 		st01 = VGA_ST01_MDA;
392         }
393 
394 	/* Sequencer registers, don't write SR07 */
395         for (i = 0; i < 7; i++)
396 		i915_write_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i,
397 		    s3_priv->saveSR[i]);
398 	/* CRT controller regs */
399 	/* Enable CR group 0 writes */
400 	i915_write_indexed(&regmap, cr_index, cr_data,
401 	    0x11, s3_priv->saveCR[0x11]);
402 	for (i = 0; i <= 0x24; i++)
403 		i915_write_indexed(&regmap, cr_index,
404 		    cr_data, i, s3_priv->saveCR[i]);
405 
406 	/* Graphics controller regs */
407 	for (i = 0; i < 9; i++)
408 		i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i,
409 		    s3_priv->saveGR[i]);
410 
411 	i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
412 	    s3_priv->saveGR[0x10]);
413 	i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
414 	    s3_priv->saveGR[0x11]);
415 	i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
416 	    s3_priv->saveGR[0x18]);
417 
418 	/* Attribute controller registers */
419 	(void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
420 	for (i = 0; i <= 0x14; i++)
421 	    i915_write_ar(&regmap, st01, i, s3_priv->saveAR[i], 0);
422 	(void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
423 	vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX | 0x20);
424 	(void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
425 
426 	/* VGA color palette registers */
427 	vga_reg_put8(&regmap, VGA_DACMASK, s3_priv->saveDACMASK);
428 	/* DACCRX automatically increments during read */
429 	vga_reg_put8(&regmap, VGA_DACWX, 0);
430 	/* Read 3 bytes of color data from each index */
431 	for (i = 0; i < 256 * 3; i++)
432 		vga_reg_put8(&regmap, VGA_DACDATA, s3_priv->saveDACDATA[i]);
433 }
434 
435 /**
436  * i915_save_display - save display & mode info
437  * @dev: DRM device
438  *
439  * Save mode timings and display info.
440  */
441 void i915_save_display(struct drm_device *dev)
442 {
443 	struct s3_i915_private *s3_priv = dev->s3_private;
444 
445 	/* Display arbitration control */
446 	s3_priv->saveDSPARB = S3_READ(DSPARB);
447 
448 	/*
449 	 * Pipe & plane A info.
450 	 */
451 	s3_priv->savePIPEACONF = S3_READ(PIPEACONF);
452 	s3_priv->savePIPEASRC = S3_READ(PIPEASRC);
453 	s3_priv->saveFPA0 = S3_READ(FPA0);
454 	s3_priv->saveFPA1 = S3_READ(FPA1);
455 	s3_priv->saveDPLL_A = S3_READ(DPLL_A);
456 	if (IS_I965G(dev))
457 		s3_priv->saveDPLL_A_MD = S3_READ(DPLL_A_MD);
458 	s3_priv->saveHTOTAL_A = S3_READ(HTOTAL_A);
459 	s3_priv->saveHBLANK_A = S3_READ(HBLANK_A);
460 	s3_priv->saveHSYNC_A = S3_READ(HSYNC_A);
461 	s3_priv->saveVTOTAL_A = S3_READ(VTOTAL_A);
462 	s3_priv->saveVBLANK_A = S3_READ(VBLANK_A);
463 	s3_priv->saveVSYNC_A = S3_READ(VSYNC_A);
464 	s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
465 
466 	s3_priv->saveDSPACNTR = S3_READ(DSPACNTR);
467 	s3_priv->saveDSPASTRIDE = S3_READ(DSPASTRIDE);
468 	s3_priv->saveDSPASIZE = S3_READ(DSPASIZE);
469 	s3_priv->saveDSPAPOS = S3_READ(DSPAPOS);
470 	s3_priv->saveDSPABASE = S3_READ(DSPABASE);
471 	if (IS_I965G(dev)) {
472 		s3_priv->saveDSPASURF = S3_READ(DSPASURF);
473 		s3_priv->saveDSPATILEOFF = S3_READ(DSPATILEOFF);
474 	}
475 	i915_save_palette(dev, PIPE_A);
476 	s3_priv->savePIPEASTAT = S3_READ(PIPEASTAT);
477 
478 	/*
479 	 * Pipe & plane B info
480 	 */
481 	s3_priv->savePIPEBCONF = S3_READ(PIPEBCONF);
482 	s3_priv->savePIPEBSRC = S3_READ(PIPEBSRC);
483 	s3_priv->saveFPB0 = S3_READ(FPB0);
484 	s3_priv->saveFPB1 = S3_READ(FPB1);
485 	s3_priv->saveDPLL_B = S3_READ(DPLL_B);
486 	if (IS_I965G(dev))
487 		s3_priv->saveDPLL_B_MD = S3_READ(DPLL_B_MD);
488 	s3_priv->saveHTOTAL_B = S3_READ(HTOTAL_B);
489 	s3_priv->saveHBLANK_B = S3_READ(HBLANK_B);
490 	s3_priv->saveHSYNC_B = S3_READ(HSYNC_B);
491 	s3_priv->saveVTOTAL_B = S3_READ(VTOTAL_B);
492 	s3_priv->saveVBLANK_B = S3_READ(VBLANK_B);
493 	s3_priv->saveVSYNC_B = S3_READ(VSYNC_B);
494 	s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
495 
496 	s3_priv->saveDSPBCNTR = S3_READ(DSPBCNTR);
497 	s3_priv->saveDSPBSTRIDE = S3_READ(DSPBSTRIDE);
498 	s3_priv->saveDSPBSIZE = S3_READ(DSPBSIZE);
499 	s3_priv->saveDSPBPOS = S3_READ(DSPBPOS);
500 	s3_priv->saveDSPBBASE = S3_READ(DSPBBASE);
501 	if (IS_I965GM(dev) || IS_GM45(dev)) {
502 		s3_priv->saveDSPBSURF = S3_READ(DSPBSURF);
503 		s3_priv->saveDSPBTILEOFF = S3_READ(DSPBTILEOFF);
504 	}
505 	i915_save_palette(dev, PIPE_B);
506 	s3_priv->savePIPEBSTAT = S3_READ(PIPEBSTAT);
507 
508 	/*
509 	 * CRT state
510 	 */
511 	s3_priv->saveADPA = S3_READ(ADPA);
512 
513 	/*
514 	 * LVDS state
515 	 */
516 	s3_priv->savePP_CONTROL = S3_READ(PP_CONTROL);
517 	s3_priv->savePFIT_PGM_RATIOS = S3_READ(PFIT_PGM_RATIOS);
518 	s3_priv->saveBLC_PWM_CTL = S3_READ(BLC_PWM_CTL);
519 	if (IS_I965G(dev))
520 		s3_priv->saveBLC_PWM_CTL2 = S3_READ(BLC_PWM_CTL2);
521 	if (IS_MOBILE(dev) && !IS_I830(dev))
522 		s3_priv->saveLVDS = S3_READ(LVDS);
523 	if (!IS_I830(dev) && !IS_845G(dev))
524 		s3_priv->savePFIT_CONTROL = S3_READ(PFIT_CONTROL);
525 	s3_priv->saveLVDSPP_ON = S3_READ(LVDSPP_ON);
526 	s3_priv->saveLVDSPP_OFF = S3_READ(LVDSPP_OFF);
527 	s3_priv->savePP_CYCLE = S3_READ(PP_CYCLE);
528 
529 	/* FIXME: save TV & SDVO state */
530 
531 	/* FBC state */
532 	s3_priv->saveFBC_CFB_BASE = S3_READ(FBC_CFB_BASE);
533 	s3_priv->saveFBC_LL_BASE = S3_READ(FBC_LL_BASE);
534 	s3_priv->saveFBC_CONTROL2 = S3_READ(FBC_CONTROL2);
535 	s3_priv->saveFBC_CONTROL = S3_READ(FBC_CONTROL);
536 
537 	/* VGA state */
538 	s3_priv->saveVCLK_DIVISOR_VGA0 = S3_READ(VCLK_DIVISOR_VGA0);
539 	s3_priv->saveVCLK_DIVISOR_VGA1 = S3_READ(VCLK_DIVISOR_VGA1);
540 	s3_priv->saveVCLK_POST_DIV = S3_READ(VCLK_POST_DIV);
541 	s3_priv->saveVGACNTRL = S3_READ(VGACNTRL);
542 
543 	i915_save_vga(dev);
544 }
545 
546 void i915_restore_display(struct drm_device *dev)
547 {
548         struct s3_i915_private *s3_priv = dev->s3_private;
549 
550 	S3_WRITE(DSPARB, s3_priv->saveDSPARB);
551 
552 	/*
553 	 * Pipe & plane A info
554 	 * Prime the clock
555 	 */
556 	if (s3_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
557 		S3_WRITE(DPLL_A, s3_priv->saveDPLL_A &
558 		    ~DPLL_VCO_ENABLE);
559 		drv_usecwait(150);
560         }
561 	S3_WRITE(FPA0, s3_priv->saveFPA0);
562 	S3_WRITE(FPA1, s3_priv->saveFPA1);
563 	/* Actually enable it */
564 	S3_WRITE(DPLL_A, s3_priv->saveDPLL_A);
565 	drv_usecwait(150);
566 	if (IS_I965G(dev))
567 		S3_WRITE(DPLL_A_MD, s3_priv->saveDPLL_A_MD);
568 	drv_usecwait(150);
569 
570 	/* Restore mode */
571 	S3_WRITE(HTOTAL_A, s3_priv->saveHTOTAL_A);
572 	S3_WRITE(HBLANK_A, s3_priv->saveHBLANK_A);
573 	S3_WRITE(HSYNC_A, s3_priv->saveHSYNC_A);
574 	S3_WRITE(VTOTAL_A, s3_priv->saveVTOTAL_A);
575 	S3_WRITE(VBLANK_A, s3_priv->saveVBLANK_A);
576 	S3_WRITE(VSYNC_A, s3_priv->saveVSYNC_A);
577 	S3_WRITE(BCLRPAT_A, s3_priv->saveBCLRPAT_A);
578 
579 	/* Restore plane info */
580 	S3_WRITE(DSPASIZE, s3_priv->saveDSPASIZE);
581 	S3_WRITE(DSPAPOS, s3_priv->saveDSPAPOS);
582 	S3_WRITE(PIPEASRC, s3_priv->savePIPEASRC);
583 	S3_WRITE(DSPABASE, s3_priv->saveDSPABASE);
584 	S3_WRITE(DSPASTRIDE, s3_priv->saveDSPASTRIDE);
585 	if (IS_I965G(dev)) {
586 		S3_WRITE(DSPASURF, s3_priv->saveDSPASURF);
587 		S3_WRITE(DSPATILEOFF, s3_priv->saveDSPATILEOFF);
588 	}
589 	S3_WRITE(PIPEACONF, s3_priv->savePIPEACONF);
590 	i915_restore_palette(dev, PIPE_A);
591 	/* Enable the plane */
592 	S3_WRITE(DSPACNTR, s3_priv->saveDSPACNTR);
593 	S3_WRITE(DSPABASE, S3_READ(DSPABASE));
594 
595 	/* Pipe & plane B info */
596 	if (s3_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
597 		S3_WRITE(DPLL_B, s3_priv->saveDPLL_B &
598 		    ~DPLL_VCO_ENABLE);
599 		drv_usecwait(150);
600 	}
601 	S3_WRITE(FPB0, s3_priv->saveFPB0);
602 	S3_WRITE(FPB1, s3_priv->saveFPB1);
603 	/* Actually enable it */
604 	S3_WRITE(DPLL_B, s3_priv->saveDPLL_B);
605 	drv_usecwait(150);
606 	if (IS_I965G(dev))
607 		S3_WRITE(DPLL_B_MD, s3_priv->saveDPLL_B_MD);
608 	drv_usecwait(150);
609 
610 	/* Restore mode */
611 	S3_WRITE(HTOTAL_B, s3_priv->saveHTOTAL_B);
612 	S3_WRITE(HBLANK_B, s3_priv->saveHBLANK_B);
613 	S3_WRITE(HSYNC_B, s3_priv->saveHSYNC_B);
614 	S3_WRITE(VTOTAL_B, s3_priv->saveVTOTAL_B);
615 	S3_WRITE(VBLANK_B, s3_priv->saveVBLANK_B);
616 	S3_WRITE(VSYNC_B, s3_priv->saveVSYNC_B);
617 	S3_WRITE(BCLRPAT_B, s3_priv->saveBCLRPAT_B);
618 
619 	/* Restore plane info */
620 	S3_WRITE(DSPBSIZE, s3_priv->saveDSPBSIZE);
621 	S3_WRITE(DSPBPOS, s3_priv->saveDSPBPOS);
622 	S3_WRITE(PIPEBSRC, s3_priv->savePIPEBSRC);
623 	S3_WRITE(DSPBBASE, s3_priv->saveDSPBBASE);
624 	S3_WRITE(DSPBSTRIDE, s3_priv->saveDSPBSTRIDE);
625 	if (IS_I965G(dev)) {
626 		S3_WRITE(DSPBSURF, s3_priv->saveDSPBSURF);
627 		S3_WRITE(DSPBTILEOFF, s3_priv->saveDSPBTILEOFF);
628         }
629 	S3_WRITE(PIPEBCONF, s3_priv->savePIPEBCONF);
630 	i915_restore_palette(dev, PIPE_B);
631 	/* Enable the plane */
632 	S3_WRITE(DSPBCNTR, s3_priv->saveDSPBCNTR);
633         S3_WRITE(DSPBBASE, S3_READ(DSPBBASE));
634 
635 	/* CRT state */
636 	S3_WRITE(ADPA, s3_priv->saveADPA);
637 
638 	/* LVDS state */
639 	if (IS_I965G(dev))
640 		S3_WRITE(BLC_PWM_CTL2, s3_priv->saveBLC_PWM_CTL2);
641 	if (IS_MOBILE(dev) && !IS_I830(dev))
642 		S3_WRITE(LVDS, s3_priv->saveLVDS);
643 	if (!IS_I830(dev) && !IS_845G(dev))
644 		S3_WRITE(PFIT_CONTROL, s3_priv->savePFIT_CONTROL);
645 
646 	S3_WRITE(PFIT_PGM_RATIOS, s3_priv->savePFIT_PGM_RATIOS);
647 	S3_WRITE(BLC_PWM_CTL, s3_priv->saveBLC_PWM_CTL);
648         S3_WRITE(LVDSPP_ON, s3_priv->saveLVDSPP_ON);
649         S3_WRITE(LVDSPP_OFF, s3_priv->saveLVDSPP_OFF);
650         S3_WRITE(PP_CYCLE, s3_priv->savePP_CYCLE);
651         S3_WRITE(PP_CONTROL, s3_priv->savePP_CONTROL);
652 
653 	/* FIXME: restore TV & SDVO state */
654 
655 	/* FBC info */
656 	S3_WRITE(FBC_CFB_BASE, s3_priv->saveFBC_CFB_BASE);
657 	S3_WRITE(FBC_LL_BASE, s3_priv->saveFBC_LL_BASE);
658 	S3_WRITE(FBC_CONTROL2, s3_priv->saveFBC_CONTROL2);
659 	S3_WRITE(FBC_CONTROL, s3_priv->saveFBC_CONTROL);
660 
661 	/* VGA state */
662 	S3_WRITE(VGACNTRL, s3_priv->saveVGACNTRL);
663 	S3_WRITE(VCLK_DIVISOR_VGA0, s3_priv->saveVCLK_DIVISOR_VGA0);
664 	S3_WRITE(VCLK_DIVISOR_VGA1, s3_priv->saveVCLK_DIVISOR_VGA1);
665 	S3_WRITE(VCLK_POST_DIV, s3_priv->saveVCLK_POST_DIV);
666 	drv_usecwait(150);
667 
668 	i915_restore_vga(dev);
669 }
670 static int
671 i915_resume(struct drm_device *dev)
672 {
673 	ddi_acc_handle_t conf_hdl;
674 	struct s3_i915_private *s3_priv = dev->s3_private;
675 	int i;
676 
677 	if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
678 		DRM_ERROR(("i915_resume: pci_config_setup fail"));
679 		return (DDI_FAILURE);
680 	}
681 	/*
682 	 * Nexus driver will resume pci config space and set the power state
683 	 * for its children. So we needn't resume them explicitly here.
684 	 * see pci_pre_resume for detail.
685 	 */
686 	pci_config_put8(conf_hdl, LBB, s3_priv->saveLBB);
687 
688 	if (IS_I965G(dev) && IS_MOBILE(dev))
689 		S3_WRITE(MCHBAR_RENDER_STANDBY, s3_priv->saveRENDERSTANDBY);
690 	if (IS_I965GM(dev))
691 		(void) S3_READ(MCHBAR_RENDER_STANDBY);
692 
693 	S3_WRITE(HWS_PGA, s3_priv->saveHWS);
694 	if (IS_I965GM(dev))
695 		(void) S3_READ(HWS_PGA);
696 
697 	i915_restore_display(dev);
698 
699 	 /* Clock gating state */
700 	S3_WRITE (D_STATE, s3_priv->saveD_STATE);
701 	S3_WRITE (CG_2D_DIS, s3_priv->saveCG_2D_DIS);
702 
703 	/* Cache mode state */
704 	S3_WRITE (CACHE_MODE_0, s3_priv->saveCACHE_MODE_0 | 0xffff0000);
705 
706 	/* Memory arbitration state */
707 	S3_WRITE (MI_ARB_STATE, s3_priv->saveMI_ARB_STATE | 0xffff0000);
708 
709 	for (i = 0; i < 16; i++) {
710 		S3_WRITE(SWF0 + (i << 2), s3_priv->saveSWF0[i]);
711 		S3_WRITE(SWF10 + (i << 2), s3_priv->saveSWF1[i+7]);
712         }
713 	for (i = 0; i < 3; i++)
714 		S3_WRITE(SWF30 + (i << 2), s3_priv->saveSWF2[i]);
715 
716 	S3_WRITE(I915REG_PGTBL_CTRL, s3_priv->pgtbl_ctl);
717 
718 	(void) pci_config_teardown(&conf_hdl);
719 
720 	drm_agp_rebind(dev);
721 
722 	return (DDI_SUCCESS);
723 }
724 
725 static int
726 i915_suspend(struct drm_device *dev)
727 {
728 	ddi_acc_handle_t conf_hdl;
729 	struct s3_i915_private *s3_priv = dev->s3_private;
730 	int i;
731 
732 	if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
733 		DRM_ERROR(("i915_suspend: pci_config_setup fail"));
734 		return (DDI_FAILURE);
735 	}
736 
737 	/*
738 	 * Nexus driver will resume pci config space for its children.
739 	 * So pci config registers are not saved here.
740 	 */
741 	s3_priv->saveLBB = pci_config_get8(conf_hdl, LBB);
742 
743 	if (IS_I965G(dev) && IS_MOBILE(dev))
744 		s3_priv->saveRENDERSTANDBY = S3_READ(MCHBAR_RENDER_STANDBY);
745 
746 	/* Hardware status page */
747 	s3_priv->saveHWS = S3_READ(HWS_PGA);
748 
749 	i915_save_display(dev);
750 
751 	/* Interrupt state */
752 	s3_priv->saveIIR = S3_READ(IIR);
753 	s3_priv->saveIER = S3_READ(IER);
754 	s3_priv->saveIMR = S3_READ(IMR);
755 
756 	/* Clock gating state */
757 	s3_priv->saveD_STATE = S3_READ(D_STATE);
758 	s3_priv->saveCG_2D_DIS = S3_READ(CG_2D_DIS);
759 
760 	/* Cache mode state */
761 	s3_priv->saveCACHE_MODE_0 = S3_READ(CACHE_MODE_0);
762 
763 	/* Memory Arbitration state */
764 	s3_priv->saveMI_ARB_STATE = S3_READ(MI_ARB_STATE);
765 
766 	/* Scratch space */
767 	for (i = 0; i < 16; i++) {
768 		s3_priv->saveSWF0[i] = S3_READ(SWF0 + (i << 2));
769 		s3_priv->saveSWF1[i] = S3_READ(SWF10 + (i << 2));
770 	}
771 	for (i = 0; i < 3; i++)
772 		s3_priv->saveSWF2[i] = S3_READ(SWF30 + (i << 2));
773 
774 	/*
775 	 * Save page table control register
776 	 */
777 	s3_priv->pgtbl_ctl = S3_READ(I915REG_PGTBL_CTRL);
778 
779 	(void) pci_config_teardown(&conf_hdl);
780 
781 	return (DDI_SUCCESS);
782 }
783 
784 /*
785  * This funtion check the length of memory mapped IO space to get the right bar. * And There are two possibilities here.
786  * 1. The MMIO registers is in memory map IO bar with 1M size. The bottom half
787  *    of the 1M space is the MMIO registers.
788  * 2. The MMIO register is in memory map IO with 512K size. The whole 512K
789  *    space is the MMIO registers.
790  */
791 static int
792 i915_map_regs(dev_info_t *dip, caddr_t *save_addr, ddi_acc_handle_t *handlep)
793 {
794 	int	rnumber;
795 	int	nregs;
796 	off_t	size = 0;
797 
798 	if (ddi_dev_nregs(dip, &nregs)) {
799 		cmn_err(CE_WARN, "i915_map_regs: failed to get nregs");
800 		return (DDI_FAILURE);
801 	}
802 
803 	for (rnumber = 1; rnumber < nregs; rnumber++) {
804 		(void) ddi_dev_regsize(dip, rnumber, &size);
805 		if ((size == 0x80000) ||
806 		    (size == 0x100000) ||
807 		    (size == 0x400000))
808 			break;
809 	}
810 
811 	if (rnumber >= nregs) {
812 		cmn_err(CE_WARN,
813 		    "i915_map_regs: failed to find MMIO registers");
814 		return (DDI_FAILURE);
815 	}
816 
817 	if (ddi_regs_map_setup(dip, rnumber, save_addr,
818 	    0, 0x80000, &s3_attr, handlep)) {
819 		cmn_err(CE_WARN,
820 		    "i915_map_regs: failed to map bar %d", rnumber);
821 		return (DDI_FAILURE);
822 	}
823 
824 	return (DDI_SUCCESS);
825 }
826 static void
827 i915_unmap_regs(ddi_acc_handle_t *handlep)
828 {
829 	ddi_regs_map_free(handlep);
830 }
831 static int
832 i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
833 {
834 	drm_device_t		*statep;
835 	s3_i915_private_t	*s3_private;
836 	void		*handle;
837 	int			unit;
838 
839 	unit =  ddi_get_instance(dip);
840 	switch (cmd) {
841 	case DDI_ATTACH:
842 		break;
843 	case DDI_RESUME:
844 		statep = ddi_get_soft_state(i915_statep, unit);
845 		return (i915_resume(statep));
846 	default:
847 		DRM_ERROR("i915_attach: attach and resume ops are supported");
848 		return (DDI_FAILURE);
849 
850 	}
851 
852 	if (ddi_soft_state_zalloc(i915_statep, unit) != DDI_SUCCESS) {
853 			cmn_err(CE_WARN,
854 			    "i915_attach: failed to alloc softstate");
855 			return (DDI_FAILURE);
856 	}
857 	statep = ddi_get_soft_state(i915_statep, unit);
858 	statep->dip = dip;
859 	statep->driver = &i915_driver;
860 
861 	statep->s3_private = drm_alloc(sizeof(s3_i915_private_t),
862 	    DRM_MEM_DRIVER);
863 
864 	if (statep->s3_private == NULL) {
865 		cmn_err(CE_WARN, "i915_attach: failed to allocate s3 priv");
866 		goto err_exit1;
867 	}
868 
869 	/*
870 	 * Map in the mmio register space for s3.
871 	 */
872 	s3_private = (s3_i915_private_t *)statep->s3_private;
873 
874 	if (i915_map_regs(dip, &s3_private->saveAddr,
875 	    &s3_private->saveHandle)) {
876 		cmn_err(CE_WARN, "i915_attach: failed to map MMIO");
877 		goto err_exit2;
878 	}
879 
880 	/*
881 	 * Call drm_supp_register to create minor nodes for us
882 	 */
883 	handle = drm_supp_register(dip, statep);
884 	if ( handle == NULL) {
885 		DRM_ERROR("i915_attach: drm_supp_register failed");
886 		goto err_exit3;
887 	}
888 	statep->drm_handle = handle;
889 
890 	/*
891 	 * After drm_supp_register, we can call drm_xxx routine
892 	 */
893 	statep->drm_supported = DRM_UNSUPPORT;
894 	if (
895 		    drm_probe(statep, i915_pciidlist) != DDI_SUCCESS) {
896 		DRM_ERROR("i915_open: "
897 		    "DRM current don't support this graphics card");
898 		goto err_exit4;
899 	}
900 	statep->drm_supported = DRM_SUPPORT;
901 
902 	/* call common attach code */
903 	if (drm_attach(statep) != DDI_SUCCESS) {
904 		DRM_ERROR("i915_attach: drm_attach failed");
905 		goto err_exit4;
906 	}
907 	return (DDI_SUCCESS);
908 err_exit4:
909 	(void) drm_supp_unregister(handle);
910 err_exit3:
911 	i915_unmap_regs(&s3_private->saveHandle);
912 err_exit2:
913 	drm_free(statep->s3_private, sizeof(s3_i915_private_t),
914 	    DRM_MEM_DRIVER);
915 err_exit1:
916 	(void) ddi_soft_state_free(i915_statep, unit);
917 
918 	return (DDI_FAILURE);
919 
920 }	/* i915_attach() */
921 
922 static int
923 i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
924 {
925 	drm_device_t		*statep;
926 	int		unit;
927 	s3_i915_private_t	*s3_private;
928 
929 	if ((cmd != DDI_SUSPEND) && (cmd != DDI_DETACH)) {
930 			DRM_ERROR("i915_detach: "
931 			    "only detach and resume ops are supported");
932 			return (DDI_FAILURE);
933 	}
934 
935 	unit =  ddi_get_instance(dip);
936 	statep = ddi_get_soft_state(i915_statep, unit);
937 	if (statep == NULL) {
938 		DRM_ERROR("i915_detach: can not get soft state");
939 		return (DDI_FAILURE);
940 	}
941 
942 	if (cmd == DDI_SUSPEND)
943 			return (i915_suspend(statep));
944 
945 	s3_private = (s3_i915_private_t *)statep->s3_private;
946 	ddi_regs_map_free(&s3_private->saveHandle);
947 
948 	/*
949 	 * Free the struct for context saving in S3
950 	 */
951 	drm_free(statep->s3_private, sizeof(s3_i915_private_t),
952 	    DRM_MEM_DRIVER);
953 
954 	(void) drm_detach(statep);
955 	(void) drm_supp_unregister(statep->drm_handle);
956 	(void) ddi_soft_state_free(i915_statep, unit);
957 
958 	return (DDI_SUCCESS);
959 
960 }	/* i915_detach() */
961 
962 
963 /*ARGSUSED*/
964 static int
965 i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
966 {
967 	drm_device_t		*statep;
968 	int 	error = DDI_SUCCESS;
969 	int 	unit;
970 
971 	unit = drm_dev_to_instance((dev_t)arg);
972 	switch (infocmd) {
973 	case DDI_INFO_DEVT2DEVINFO:
974 		statep = ddi_get_soft_state(i915_statep, unit);
975 		if (statep == NULL || statep->dip == NULL) {
976 			error = DDI_FAILURE;
977 		} else {
978 			*result = (void *) statep->dip;
979 			error = DDI_SUCCESS;
980 		}
981 		break;
982 	case DDI_INFO_DEVT2INSTANCE:
983 		*result = (void *)(uintptr_t)unit;
984 		error = DDI_SUCCESS;
985 		break;
986 	default:
987 		error = DDI_FAILURE;
988 		break;
989 	}
990 	return (error);
991 
992 }	/* i915_info() */
993 
994 
995 static void i915_configure(drm_driver_t *driver)
996 {
997 	driver->buf_priv_size	=	1;	/* No dev_priv */
998 	driver->load	=	i915_driver_load;
999 	driver->unload	=	i915_driver_unload;
1000 	driver->open	=	i915_driver_open;
1001 	driver->preclose	=	i915_driver_preclose;
1002 	driver->postclose	=	i915_driver_postclose;
1003 	driver->lastclose	=	i915_driver_lastclose;
1004 	driver->device_is_agp	=	i915_driver_device_is_agp;
1005 	driver->enable_vblank	= 	i915_enable_vblank;
1006 	driver->disable_vblank	= 	i915_disable_vblank;
1007 	driver->irq_preinstall	=	i915_driver_irq_preinstall;
1008 	driver->irq_postinstall	=	i915_driver_irq_postinstall;
1009 	driver->irq_uninstall	=	i915_driver_irq_uninstall;
1010 	driver->irq_handler 	=	i915_driver_irq_handler;
1011 
1012 	driver->gem_init_object = 	i915_gem_init_object;
1013 	driver->gem_free_object = 	i915_gem_free_object;
1014 
1015 	driver->driver_ioctls	=	i915_ioctls;
1016 	driver->max_driver_ioctl	=	i915_max_ioctl;
1017 
1018 	driver->driver_name	=	DRIVER_NAME;
1019 	driver->driver_desc	=	DRIVER_DESC;
1020 	driver->driver_date	=	DRIVER_DATE;
1021 	driver->driver_major	=	DRIVER_MAJOR;
1022 	driver->driver_minor	=	DRIVER_MINOR;
1023 	driver->driver_patchlevel	=	DRIVER_PATCHLEVEL;
1024 
1025 	driver->use_agp	=	1;
1026 	driver->require_agp	=	1;
1027 	driver->use_irq	=	1;
1028 }
1029 
1030 static int i915_quiesce(dev_info_t *dip)
1031 {
1032 	drm_device_t		*statep;
1033 	int		unit;
1034 
1035 	unit =  ddi_get_instance(dip);
1036 	statep = ddi_get_soft_state(i915_statep, unit);
1037 	if (statep == NULL) {
1038 		return (DDI_FAILURE);
1039 	}
1040 	i915_driver_irq_uninstall(statep);
1041 
1042 	return (DDI_SUCCESS);
1043 }
1044