1 /* BEGIN CSTYLED */ 2 3 /* 4 * i915_drv.c -- Intel i915 driver -*- linux-c -*- 5 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com 6 */ 7 8 /* 9 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 10 * All Rights Reserved. 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a 13 * copy of this software and associated documentation files (the "Software"), 14 * to deal in the Software without restriction, including without limitation 15 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 16 * and/or sell copies of the Software, and to permit persons to whom the 17 * Software is furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice (including the next 20 * paragraph) shall be included in all copies or substantial portions of the 21 * Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 27 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 28 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 29 * OTHER DEALINGS IN THE SOFTWARE. 30 * 31 * Authors: 32 * Gareth Hughes <gareth@valinux.com> 33 * 34 */ 35 36 /* 37 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 38 * Use is subject to license terms. 39 */ 40 41 /* 42 * I915 DRM Driver for Solaris 43 * 44 * This driver provides the hardware 3D acceleration support for Intel 45 * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the 46 * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here 47 * means the kernel device driver in DRI. 48 * 49 * I915 driver is a device dependent driver only, it depends on a misc module 50 * named drm for generic DRM operations. 51 */ 52 53 #include "drmP.h" 54 #include "i915_drm.h" 55 #include "i915_drv.h" 56 #include "drm_pciids.h" 57 58 /* 59 * copied from vgasubr.h 60 */ 61 62 struct vgaregmap { 63 uint8_t *addr; 64 ddi_acc_handle_t handle; 65 boolean_t mapped; 66 }; 67 68 enum pipe { 69 PIPE_A = 0, 70 PIPE_B, 71 }; 72 73 74 /* 75 * cb_ops entrypoint 76 */ 77 extern struct cb_ops drm_cb_ops; 78 79 /* 80 * module entrypoint 81 */ 82 static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 83 static int i915_attach(dev_info_t *, ddi_attach_cmd_t); 84 static int i915_detach(dev_info_t *, ddi_detach_cmd_t); 85 86 87 /* drv_PCI_IDs comes from drm_pciids.h */ 88 static drm_pci_id_list_t i915_pciidlist[] = { 89 i915_PCI_IDS 90 }; 91 92 /* 93 * Local routines 94 */ 95 static void i915_configure(drm_driver_t *); 96 static int i915_quiesce(dev_info_t *dip); 97 98 /* 99 * DRM driver 100 */ 101 static drm_driver_t i915_driver = {0}; 102 103 104 static struct dev_ops i915_dev_ops = { 105 DEVO_REV, /* devo_rev */ 106 0, /* devo_refcnt */ 107 i915_info, /* devo_getinfo */ 108 nulldev, /* devo_identify */ 109 nulldev, /* devo_probe */ 110 i915_attach, /* devo_attach */ 111 i915_detach, /* devo_detach */ 112 nodev, /* devo_reset */ 113 &drm_cb_ops, /* devo_cb_ops */ 114 NULL, /* devo_bus_ops */ 115 NULL, /* power */ 116 i915_quiesce, /* devo_quiesce */ 117 }; 118 119 static struct modldrv modldrv = { 120 &mod_driverops, /* drv_modops */ 121 "I915 DRM driver", /* drv_linkinfo */ 122 &i915_dev_ops, /* drv_dev_ops */ 123 }; 124 125 static struct modlinkage modlinkage = { 126 MODREV_1, (void *) &modldrv, NULL 127 }; 128 129 static ddi_device_acc_attr_t s3_attr = { 130 DDI_DEVICE_ATTR_V0, 131 DDI_NEVERSWAP_ACC, 132 DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */ 133 }; 134 135 /* 136 * softstate head 137 */ 138 static void *i915_statep; 139 140 int 141 _init(void) 142 { 143 int error; 144 145 i915_configure(&i915_driver); 146 147 if ((error = ddi_soft_state_init(&i915_statep, 148 sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0) 149 return (error); 150 151 if ((error = mod_install(&modlinkage)) != 0) { 152 ddi_soft_state_fini(&i915_statep); 153 return (error); 154 } 155 156 return (error); 157 158 } /* _init() */ 159 160 int 161 _fini(void) 162 { 163 int error; 164 165 if ((error = mod_remove(&modlinkage)) != 0) 166 return (error); 167 168 (void) ddi_soft_state_fini(&i915_statep); 169 170 return (0); 171 172 } /* _fini() */ 173 174 int 175 _info(struct modinfo *modinfop) 176 { 177 return (mod_info(&modlinkage, modinfop)); 178 179 } /* _info() */ 180 181 /* 182 * off range: 0x3b0 ~ 0x3ff 183 */ 184 185 static void 186 vga_reg_put8(struct vgaregmap *regmap, uint16_t off, uint8_t val) 187 { 188 ASSERT((off >= 0x3b0) && (off <= 0x3ff)); 189 190 ddi_put8(regmap->handle, regmap->addr + off, val); 191 } 192 193 /* 194 * off range: 0x3b0 ~ 0x3ff 195 */ 196 static uint8_t 197 vga_reg_get8(struct vgaregmap *regmap, uint16_t off) 198 { 199 200 ASSERT((off >= 0x3b0) && (off <= 0x3ff)); 201 202 return (ddi_get8(regmap->handle, regmap->addr + off)); 203 } 204 205 static void 206 i915_write_indexed(struct vgaregmap *regmap, 207 uint16_t index_port, uint16_t data_port, uint8_t index, uint8_t val) 208 { 209 vga_reg_put8(regmap, index_port, index); 210 vga_reg_put8(regmap, data_port, val); 211 } 212 213 static uint8_t 214 i915_read_indexed(struct vgaregmap *regmap, 215 uint16_t index_port, uint16_t data_port, uint8_t index) 216 { 217 vga_reg_put8(regmap, index_port, index); 218 return (vga_reg_get8(regmap, data_port)); 219 } 220 221 static void 222 i915_write_ar(struct vgaregmap *regmap, uint16_t st01, 223 uint8_t reg, uint8_t val, uint8_t palette_enable) 224 { 225 (void) vga_reg_get8(regmap, st01); 226 vga_reg_put8(regmap, VGA_AR_INDEX, palette_enable | reg); 227 vga_reg_put8(regmap, VGA_AR_DATA_WRITE, val); 228 } 229 230 static uint8_t 231 i915_read_ar(struct vgaregmap *regmap, uint16_t st01, 232 uint8_t index, uint8_t palette_enable) 233 { 234 (void) vga_reg_get8(regmap, st01); 235 vga_reg_put8(regmap, VGA_AR_INDEX, index | palette_enable); 236 return (vga_reg_get8(regmap, VGA_AR_DATA_READ)); 237 } 238 239 static int 240 i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 241 { 242 struct s3_i915_private *s3_priv = dev->s3_private; 243 244 if (pipe == PIPE_A) 245 return (S3_READ(DPLL_A) & DPLL_VCO_ENABLE); 246 else 247 return (S3_READ(DPLL_B) & DPLL_VCO_ENABLE); 248 } 249 250 static void 251 i915_save_palette(struct drm_device *dev, enum pipe pipe) 252 { 253 struct s3_i915_private *s3_priv = dev->s3_private; 254 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 255 uint32_t *array; 256 int i; 257 258 if (!i915_pipe_enabled(dev, pipe)) 259 return; 260 261 if (pipe == PIPE_A) 262 array = s3_priv->save_palette_a; 263 else 264 array = s3_priv->save_palette_b; 265 266 for(i = 0; i < 256; i++) 267 array[i] = S3_READ(reg + (i << 2)); 268 269 } 270 271 static void 272 i915_restore_palette(struct drm_device *dev, enum pipe pipe) 273 { 274 struct s3_i915_private *s3_priv = dev->s3_private; 275 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 276 uint32_t *array; 277 int i; 278 279 if (!i915_pipe_enabled(dev, pipe)) 280 return; 281 282 if (pipe == PIPE_A) 283 array = s3_priv->save_palette_a; 284 else 285 array = s3_priv->save_palette_b; 286 287 for(i = 0; i < 256; i++) 288 S3_WRITE(reg + (i << 2), array[i]); 289 } 290 291 static void 292 i915_save_vga(struct drm_device *dev) 293 { 294 struct s3_i915_private *s3_priv = dev->s3_private; 295 int i; 296 uint16_t cr_index, cr_data, st01; 297 struct vgaregmap regmap; 298 299 regmap.addr = (uint8_t *)s3_priv->saveAddr; 300 regmap.handle = s3_priv->saveHandle; 301 302 /* VGA color palette registers */ 303 s3_priv->saveDACMASK = vga_reg_get8(®map, VGA_DACMASK); 304 /* DACCRX automatically increments during read */ 305 vga_reg_put8(®map, VGA_DACRX, 0); 306 /* Read 3 bytes of color data from each index */ 307 for (i = 0; i < 256 * 3; i++) 308 s3_priv->saveDACDATA[i] = vga_reg_get8(®map, VGA_DACDATA); 309 310 /* MSR bits */ 311 s3_priv->saveMSR = vga_reg_get8(®map, VGA_MSR_READ); 312 if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) { 313 cr_index = VGA_CR_INDEX_CGA; 314 cr_data = VGA_CR_DATA_CGA; 315 st01 = VGA_ST01_CGA; 316 } else { 317 cr_index = VGA_CR_INDEX_MDA; 318 cr_data = VGA_CR_DATA_MDA; 319 st01 = VGA_ST01_MDA; 320 } 321 322 /* CRT controller regs */ 323 i915_write_indexed(®map, cr_index, cr_data, 0x11, 324 i915_read_indexed(®map, cr_index, cr_data, 0x11) & (~0x80)); 325 for (i = 0; i <= 0x24; i++) 326 s3_priv->saveCR[i] = 327 i915_read_indexed(®map, cr_index, cr_data, i); 328 /* Make sure we don't turn off CR group 0 writes */ 329 s3_priv->saveCR[0x11] &= ~0x80; 330 331 /* Attribute controller registers */ 332 (void) vga_reg_get8(®map, st01); 333 s3_priv->saveAR_INDEX = vga_reg_get8(®map, VGA_AR_INDEX); 334 for (i = 0; i <= 0x14; i++) 335 s3_priv->saveAR[i] = i915_read_ar(®map, st01, i, 0); 336 (void) vga_reg_get8(®map, st01); 337 vga_reg_put8(®map, VGA_AR_INDEX, s3_priv->saveAR_INDEX); 338 (void) vga_reg_get8(®map, st01); 339 340 /* Graphics controller registers */ 341 for (i = 0; i < 9; i++) 342 s3_priv->saveGR[i] = 343 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, i); 344 345 s3_priv->saveGR[0x10] = 346 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x10); 347 s3_priv->saveGR[0x11] = 348 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x11); 349 s3_priv->saveGR[0x18] = 350 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x18); 351 352 /* Sequencer registers */ 353 for (i = 0; i < 8; i++) 354 s3_priv->saveSR[i] = 355 i915_read_indexed(®map, VGA_SR_INDEX, VGA_SR_DATA, i); 356 } 357 358 static void 359 i915_restore_vga(struct drm_device *dev) 360 { 361 struct s3_i915_private *s3_priv = dev->s3_private; 362 int i; 363 uint16_t cr_index, cr_data, st01; 364 struct vgaregmap regmap; 365 366 regmap.addr = (uint8_t *)s3_priv->saveAddr; 367 regmap.handle = s3_priv->saveHandle; 368 369 /* 370 * I/O Address Select. This bit selects 3Bxh or 3Dxh as the 371 * I/O address for the CRT Controller registers, 372 * the Feature Control Register (FCR), and Input Status Register 373 * 1 (ST01). Presently ignored (whole range is claimed), but 374 * will "ignore" 3Bx for color configuration or 3Dx for monochrome. 375 * Note that it is typical in AGP chipsets to shadow this bit 376 * and properly steer I/O cycles to the proper bus for operation 377 * where a MDA exists on another bus such as ISA. 378 * 0 = Select 3Bxh I/O address (MDA emulation) (default). 379 * 1 = Select 3Dxh I/O address (CGA emulation). 380 */ 381 vga_reg_put8(®map, VGA_MSR_WRITE, s3_priv->saveMSR); 382 383 if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) { 384 cr_index = VGA_CR_INDEX_CGA; 385 cr_data = VGA_CR_DATA_CGA; 386 st01 = VGA_ST01_CGA; 387 } else { 388 cr_index = VGA_CR_INDEX_MDA; 389 cr_data = VGA_CR_DATA_MDA; 390 st01 = VGA_ST01_MDA; 391 } 392 393 /* Sequencer registers, don't write SR07 */ 394 for (i = 0; i < 7; i++) 395 i915_write_indexed(®map, VGA_SR_INDEX, VGA_SR_DATA, i, 396 s3_priv->saveSR[i]); 397 /* CRT controller regs */ 398 /* Enable CR group 0 writes */ 399 i915_write_indexed(®map, cr_index, cr_data, 400 0x11, s3_priv->saveCR[0x11]); 401 for (i = 0; i <= 0x24; i++) 402 i915_write_indexed(®map, cr_index, 403 cr_data, i, s3_priv->saveCR[i]); 404 405 /* Graphics controller regs */ 406 for (i = 0; i < 9; i++) 407 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, i, 408 s3_priv->saveGR[i]); 409 410 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x10, 411 s3_priv->saveGR[0x10]); 412 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x11, 413 s3_priv->saveGR[0x11]); 414 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x18, 415 s3_priv->saveGR[0x18]); 416 417 /* Attribute controller registers */ 418 (void) vga_reg_get8(®map, st01); /* switch back to index mode */ 419 for (i = 0; i <= 0x14; i++) 420 i915_write_ar(®map, st01, i, s3_priv->saveAR[i], 0); 421 (void) vga_reg_get8(®map, st01); /* switch back to index mode */ 422 vga_reg_put8(®map, VGA_AR_INDEX, s3_priv->saveAR_INDEX | 0x20); 423 (void) vga_reg_get8(®map, st01); /* switch back to index mode */ 424 425 /* VGA color palette registers */ 426 vga_reg_put8(®map, VGA_DACMASK, s3_priv->saveDACMASK); 427 /* DACCRX automatically increments during read */ 428 vga_reg_put8(®map, VGA_DACWX, 0); 429 /* Read 3 bytes of color data from each index */ 430 for (i = 0; i < 256 * 3; i++) 431 vga_reg_put8(®map, VGA_DACDATA, s3_priv->saveDACDATA[i]); 432 } 433 434 static int 435 i915_resume(struct drm_device *dev) 436 { 437 ddi_acc_handle_t conf_hdl; 438 struct s3_i915_private *s3_priv = dev->s3_private; 439 int i; 440 441 if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) { 442 DRM_ERROR(("i915_resume: pci_config_setup fail")); 443 return (DDI_FAILURE); 444 } 445 /* 446 * Nexus driver will resume pci config space and set the power state 447 * for its children. So we needn't resume them explicitly here. 448 * see pci_pre_resume for detail. 449 */ 450 pci_config_put8(conf_hdl, LBB, s3_priv->saveLBB); 451 452 S3_WRITE(DSPARB, s3_priv->saveDSPARB); 453 454 /* 455 * Pipe & plane A info 456 * Prime the clock 457 */ 458 if (s3_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 459 S3_WRITE(DPLL_A, s3_priv->saveDPLL_A & 460 ~DPLL_VCO_ENABLE); 461 drv_usecwait(150); 462 } 463 S3_WRITE(FPA0, s3_priv->saveFPA0); 464 S3_WRITE(FPA1, s3_priv->saveFPA1); 465 /* Actually enable it */ 466 S3_WRITE(DPLL_A, s3_priv->saveDPLL_A); 467 drv_usecwait(150); 468 if (IS_I965G(dev)) 469 S3_WRITE(DPLL_A_MD, s3_priv->saveDPLL_A_MD); 470 drv_usecwait(150); 471 472 /* Restore mode */ 473 S3_WRITE(HTOTAL_A, s3_priv->saveHTOTAL_A); 474 S3_WRITE(HBLANK_A, s3_priv->saveHBLANK_A); 475 S3_WRITE(HSYNC_A, s3_priv->saveHSYNC_A); 476 S3_WRITE(VTOTAL_A, s3_priv->saveVTOTAL_A); 477 S3_WRITE(VBLANK_A, s3_priv->saveVBLANK_A); 478 S3_WRITE(VSYNC_A, s3_priv->saveVSYNC_A); 479 S3_WRITE(BCLRPAT_A, s3_priv->saveBCLRPAT_A); 480 481 /* Restore plane info */ 482 S3_WRITE(DSPASIZE, s3_priv->saveDSPASIZE); 483 S3_WRITE(DSPAPOS, s3_priv->saveDSPAPOS); 484 S3_WRITE(PIPEASRC, s3_priv->savePIPEASRC); 485 S3_WRITE(DSPABASE, s3_priv->saveDSPABASE); 486 S3_WRITE(DSPASTRIDE, s3_priv->saveDSPASTRIDE); 487 if (IS_I965G(dev)) { 488 S3_WRITE(DSPASURF, s3_priv->saveDSPASURF); 489 S3_WRITE(DSPATILEOFF, s3_priv->saveDSPATILEOFF); 490 } 491 S3_WRITE(PIPEACONF, s3_priv->savePIPEACONF); 492 i915_restore_palette(dev, PIPE_A); 493 /* Enable the plane */ 494 S3_WRITE(DSPACNTR, s3_priv->saveDSPACNTR); 495 S3_WRITE(DSPABASE, S3_READ(DSPABASE)); 496 497 /* Pipe & plane B info */ 498 if (s3_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 499 S3_WRITE(DPLL_B, s3_priv->saveDPLL_B & 500 ~DPLL_VCO_ENABLE); 501 drv_usecwait(150); 502 } 503 S3_WRITE(FPB0, s3_priv->saveFPB0); 504 S3_WRITE(FPB1, s3_priv->saveFPB1); 505 /* Actually enable it */ 506 S3_WRITE(DPLL_B, s3_priv->saveDPLL_B); 507 drv_usecwait(150); 508 if (IS_I965G(dev)) 509 S3_WRITE(DPLL_B_MD, s3_priv->saveDPLL_B_MD); 510 drv_usecwait(150); 511 512 /* Restore mode */ 513 S3_WRITE(HTOTAL_B, s3_priv->saveHTOTAL_B); 514 S3_WRITE(HBLANK_B, s3_priv->saveHBLANK_B); 515 S3_WRITE(HSYNC_B, s3_priv->saveHSYNC_B); 516 S3_WRITE(VTOTAL_B, s3_priv->saveVTOTAL_B); 517 S3_WRITE(VBLANK_B, s3_priv->saveVBLANK_B); 518 S3_WRITE(VSYNC_B, s3_priv->saveVSYNC_B); 519 S3_WRITE(BCLRPAT_B, s3_priv->saveBCLRPAT_B); 520 521 /* Restore plane info */ 522 S3_WRITE(DSPBSIZE, s3_priv->saveDSPBSIZE); 523 S3_WRITE(DSPBPOS, s3_priv->saveDSPBPOS); 524 S3_WRITE(PIPEBSRC, s3_priv->savePIPEBSRC); 525 S3_WRITE(DSPBBASE, s3_priv->saveDSPBBASE); 526 S3_WRITE(DSPBSTRIDE, s3_priv->saveDSPBSTRIDE); 527 if (IS_I965G(dev)) { 528 S3_WRITE(DSPBSURF, s3_priv->saveDSPBSURF); 529 S3_WRITE(DSPBTILEOFF, s3_priv->saveDSPBTILEOFF); 530 } 531 S3_WRITE(PIPEBCONF, s3_priv->savePIPEBCONF); 532 i915_restore_palette(dev, PIPE_B); 533 /* Enable the plane */ 534 S3_WRITE(DSPBCNTR, s3_priv->saveDSPBCNTR); 535 S3_WRITE(DSPBBASE, S3_READ(DSPBBASE)); 536 537 /* CRT state */ 538 S3_WRITE(ADPA, s3_priv->saveADPA); 539 540 /* LVDS state */ 541 if (IS_I965G(dev)) 542 S3_WRITE(BLC_PWM_CTL2, s3_priv->saveBLC_PWM_CTL2); 543 if (IS_MOBILE(dev) && !IS_I830(dev)) 544 S3_WRITE(LVDS, s3_priv->saveLVDS); 545 if (!IS_I830(dev) && !IS_845G(dev)) 546 S3_WRITE(PFIT_CONTROL, s3_priv->savePFIT_CONTROL); 547 548 S3_WRITE(PFIT_PGM_RATIOS, s3_priv->savePFIT_PGM_RATIOS); 549 S3_WRITE(BLC_PWM_CTL, s3_priv->saveBLC_PWM_CTL); 550 S3_WRITE(LVDSPP_ON, s3_priv->saveLVDSPP_ON); 551 S3_WRITE(LVDSPP_OFF, s3_priv->saveLVDSPP_OFF); 552 S3_WRITE(PP_CYCLE, s3_priv->savePP_CYCLE); 553 S3_WRITE(PP_CONTROL, s3_priv->savePP_CONTROL); 554 555 /* FIXME: restore TV & SDVO state */ 556 557 /* FBC info */ 558 S3_WRITE(FBC_CFB_BASE, s3_priv->saveFBC_CFB_BASE); 559 S3_WRITE(FBC_LL_BASE, s3_priv->saveFBC_LL_BASE); 560 S3_WRITE(FBC_CONTROL2, s3_priv->saveFBC_CONTROL2); 561 S3_WRITE(FBC_CONTROL, s3_priv->saveFBC_CONTROL); 562 563 /* VGA state */ 564 S3_WRITE(VGACNTRL, s3_priv->saveVGACNTRL); 565 S3_WRITE(VCLK_DIVISOR_VGA0, s3_priv->saveVCLK_DIVISOR_VGA0); 566 S3_WRITE(VCLK_DIVISOR_VGA1, s3_priv->saveVCLK_DIVISOR_VGA1); 567 S3_WRITE(VCLK_POST_DIV, s3_priv->saveVCLK_POST_DIV); 568 drv_usecwait(150); 569 570 /* Clock gating state */ 571 S3_WRITE (D_STATE, s3_priv->saveD_STATE); 572 S3_WRITE (CG_2D_DIS, s3_priv->saveCG_2D_DIS); 573 574 /* Cache mode state */ 575 S3_WRITE (CACHE_MODE_0, s3_priv->saveCACHE_MODE_0 | 0xffff0000); 576 577 /* Memory arbitration state */ 578 S3_WRITE (MI_ARB_STATE, s3_priv->saveMI_ARB_STATE | 0xffff0000); 579 580 for (i = 0; i < 16; i++) { 581 S3_WRITE(SWF0 + (i << 2), s3_priv->saveSWF0[i]); 582 S3_WRITE(SWF10 + (i << 2), s3_priv->saveSWF1[i+7]); 583 } 584 for (i = 0; i < 3; i++) 585 S3_WRITE(SWF30 + (i << 2), s3_priv->saveSWF2[i]); 586 587 i915_restore_vga(dev); 588 589 S3_WRITE(I915REG_PGTBL_CTRL, s3_priv->pgtbl_ctl); 590 591 (void) pci_config_teardown(&conf_hdl); 592 593 return (DDI_SUCCESS); 594 } 595 static int 596 i915_suspend(struct drm_device *dev) 597 { 598 ddi_acc_handle_t conf_hdl; 599 struct s3_i915_private *s3_priv = dev->s3_private; 600 int i; 601 602 603 if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) { 604 DRM_ERROR(("i915_suspend: pci_config_setup fail")); 605 return (DDI_FAILURE); 606 } 607 608 /* 609 * Nexus driver will resume pci config space for its children. 610 * So pci config registers are not saved here. 611 */ 612 s3_priv->saveLBB = pci_config_get8(conf_hdl, LBB); 613 614 /* Display arbitration control */ 615 s3_priv->saveDSPARB = S3_READ(DSPARB); 616 617 /* 618 * Pipe & plane A info. 619 */ 620 s3_priv->savePIPEACONF = S3_READ(PIPEACONF); 621 s3_priv->savePIPEASRC = S3_READ(PIPEASRC); 622 s3_priv->saveFPA0 = S3_READ(FPA0); 623 s3_priv->saveFPA1 = S3_READ(FPA1); 624 s3_priv->saveDPLL_A = S3_READ(DPLL_A); 625 if (IS_I965G(dev)) 626 s3_priv->saveDPLL_A_MD = S3_READ(DPLL_A_MD); 627 s3_priv->saveHTOTAL_A = S3_READ(HTOTAL_A); 628 s3_priv->saveHBLANK_A = S3_READ(HBLANK_A); 629 s3_priv->saveHSYNC_A = S3_READ(HSYNC_A); 630 s3_priv->saveVTOTAL_A = S3_READ(VTOTAL_A); 631 s3_priv->saveVBLANK_A = S3_READ(VBLANK_A); 632 s3_priv->saveVSYNC_A = S3_READ(VSYNC_A); 633 s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A); 634 635 s3_priv->saveDSPACNTR = S3_READ(DSPACNTR); 636 s3_priv->saveDSPASTRIDE = S3_READ(DSPASTRIDE); 637 s3_priv->saveDSPASIZE = S3_READ(DSPASIZE); 638 s3_priv->saveDSPAPOS = S3_READ(DSPAPOS); 639 s3_priv->saveDSPABASE = S3_READ(DSPABASE); 640 if (IS_I965G(dev)) { 641 s3_priv->saveDSPASURF = S3_READ(DSPASURF); 642 s3_priv->saveDSPATILEOFF = S3_READ(DSPATILEOFF); 643 } 644 i915_save_palette(dev, PIPE_A); 645 s3_priv->savePIPEASTAT = S3_READ(PIPEASTAT); 646 647 /* 648 * Pipe & plane B info 649 */ 650 s3_priv->savePIPEBCONF = S3_READ(PIPEBCONF); 651 s3_priv->savePIPEBSRC = S3_READ(PIPEBSRC); 652 s3_priv->saveFPB0 = S3_READ(FPB0); 653 s3_priv->saveFPB1 = S3_READ(FPB1); 654 s3_priv->saveDPLL_B = S3_READ(DPLL_B); 655 if (IS_I965G(dev)) 656 s3_priv->saveDPLL_B_MD = S3_READ(DPLL_B_MD); 657 s3_priv->saveHTOTAL_B = S3_READ(HTOTAL_B); 658 s3_priv->saveHBLANK_B = S3_READ(HBLANK_B); 659 s3_priv->saveHSYNC_B = S3_READ(HSYNC_B); 660 s3_priv->saveVTOTAL_B = S3_READ(VTOTAL_B); 661 s3_priv->saveVBLANK_B = S3_READ(VBLANK_B); 662 s3_priv->saveVSYNC_B = S3_READ(VSYNC_B); 663 s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A); 664 665 s3_priv->saveDSPBCNTR = S3_READ(DSPBCNTR); 666 s3_priv->saveDSPBSTRIDE = S3_READ(DSPBSTRIDE); 667 s3_priv->saveDSPBSIZE = S3_READ(DSPBSIZE); 668 s3_priv->saveDSPBPOS = S3_READ(DSPBPOS); 669 s3_priv->saveDSPBBASE = S3_READ(DSPBBASE); 670 if (IS_I965GM(dev) || IS_GM45(dev)) { 671 s3_priv->saveDSPBSURF = S3_READ(DSPBSURF); 672 s3_priv->saveDSPBTILEOFF = S3_READ(DSPBTILEOFF); 673 } 674 i915_save_palette(dev, PIPE_B); 675 s3_priv->savePIPEBSTAT = S3_READ(PIPEBSTAT); 676 677 /* 678 * CRT state 679 */ 680 s3_priv->saveADPA = S3_READ(ADPA); 681 682 /* 683 * LVDS state 684 */ 685 s3_priv->savePP_CONTROL = S3_READ(PP_CONTROL); 686 s3_priv->savePFIT_PGM_RATIOS = S3_READ(PFIT_PGM_RATIOS); 687 s3_priv->saveBLC_PWM_CTL = S3_READ(BLC_PWM_CTL); 688 if (IS_I965G(dev)) 689 s3_priv->saveBLC_PWM_CTL2 = S3_READ(BLC_PWM_CTL2); 690 if (IS_MOBILE(dev) && !IS_I830(dev)) 691 s3_priv->saveLVDS = S3_READ(LVDS); 692 if (!IS_I830(dev) && !IS_845G(dev)) 693 s3_priv->savePFIT_CONTROL = S3_READ(PFIT_CONTROL); 694 s3_priv->saveLVDSPP_ON = S3_READ(LVDSPP_ON); 695 s3_priv->saveLVDSPP_OFF = S3_READ(LVDSPP_OFF); 696 s3_priv->savePP_CYCLE = S3_READ(PP_CYCLE); 697 698 /* FIXME: save TV & SDVO state */ 699 700 /* FBC state */ 701 s3_priv->saveFBC_CFB_BASE = S3_READ(FBC_CFB_BASE); 702 s3_priv->saveFBC_LL_BASE = S3_READ(FBC_LL_BASE); 703 s3_priv->saveFBC_CONTROL2 = S3_READ(FBC_CONTROL2); 704 s3_priv->saveFBC_CONTROL = S3_READ(FBC_CONTROL); 705 706 /* Interrupt state */ 707 s3_priv->saveIIR = S3_READ(IIR); 708 s3_priv->saveIER = S3_READ(IER); 709 s3_priv->saveIMR = S3_READ(IMR); 710 711 /* VGA state */ 712 s3_priv->saveVCLK_DIVISOR_VGA0 = S3_READ(VCLK_DIVISOR_VGA0); 713 s3_priv->saveVCLK_DIVISOR_VGA1 = S3_READ(VCLK_DIVISOR_VGA1); 714 s3_priv->saveVCLK_POST_DIV = S3_READ(VCLK_POST_DIV); 715 s3_priv->saveVGACNTRL = S3_READ(VGACNTRL); 716 717 /* Clock gating state */ 718 s3_priv->saveD_STATE = S3_READ(D_STATE); 719 s3_priv->saveCG_2D_DIS = S3_READ(CG_2D_DIS); 720 721 /* Cache mode state */ 722 s3_priv->saveCACHE_MODE_0 = S3_READ(CACHE_MODE_0); 723 724 /* Memory Arbitration state */ 725 s3_priv->saveMI_ARB_STATE = S3_READ(MI_ARB_STATE); 726 727 /* Scratch space */ 728 for (i = 0; i < 16; i++) { 729 s3_priv->saveSWF0[i] = S3_READ(SWF0 + (i << 2)); 730 s3_priv->saveSWF1[i] = S3_READ(SWF10 + (i << 2)); 731 } 732 for (i = 0; i < 3; i++) 733 s3_priv->saveSWF2[i] = S3_READ(SWF30 + (i << 2)); 734 735 736 i915_save_vga(dev); 737 /* 738 * Save page table control register 739 */ 740 s3_priv->pgtbl_ctl = S3_READ(I915REG_PGTBL_CTRL); 741 742 (void) pci_config_teardown(&conf_hdl); 743 744 return (DDI_SUCCESS); 745 } 746 747 /* 748 * This funtion check the length of memory mapped IO space to get the right bar. * And There are two possibilities here. 749 * 1. The MMIO registers is in memory map IO bar with 1M size. The bottom half 750 * of the 1M space is the MMIO registers. 751 * 2. The MMIO register is in memory map IO with 512K size. The whole 512K 752 * space is the MMIO registers. 753 */ 754 static int 755 i915_map_regs(dev_info_t *dip, caddr_t *save_addr, ddi_acc_handle_t *handlep) 756 { 757 int rnumber; 758 int nregs; 759 off_t size = 0; 760 761 if (ddi_dev_nregs(dip, &nregs)) { 762 cmn_err(CE_WARN, "i915_map_regs: failed to get nregs"); 763 return (DDI_FAILURE); 764 } 765 766 for (rnumber = 1; rnumber < nregs; rnumber++) { 767 (void) ddi_dev_regsize(dip, rnumber, &size); 768 if ((size == 0x80000) || 769 (size == 0x100000) || 770 (size == 0x400000)) 771 break; 772 } 773 774 if (rnumber >= nregs) { 775 cmn_err(CE_WARN, 776 "i915_map_regs: failed to find MMIO registers"); 777 return (DDI_FAILURE); 778 } 779 780 if (ddi_regs_map_setup(dip, rnumber, save_addr, 781 0, 0x80000, &s3_attr, handlep)) { 782 cmn_err(CE_WARN, 783 "i915_map_regs: failed to map bar %d", rnumber); 784 return (DDI_FAILURE); 785 } 786 787 return (DDI_SUCCESS); 788 } 789 static void 790 i915_unmap_regs(ddi_acc_handle_t *handlep) 791 { 792 ddi_regs_map_free(handlep); 793 } 794 static int 795 i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 796 { 797 drm_device_t *statep; 798 s3_i915_private_t *s3_private; 799 void *handle; 800 int unit; 801 802 unit = ddi_get_instance(dip); 803 switch (cmd) { 804 case DDI_ATTACH: 805 break; 806 case DDI_RESUME: 807 statep = ddi_get_soft_state(i915_statep, unit); 808 return (i915_resume(statep)); 809 default: 810 DRM_ERROR("i915_attach: attach and resume ops are supported"); 811 return (DDI_FAILURE); 812 813 } 814 815 if (ddi_soft_state_zalloc(i915_statep, unit) != DDI_SUCCESS) { 816 cmn_err(CE_WARN, 817 "i915_attach: failed to alloc softstate"); 818 return (DDI_FAILURE); 819 } 820 statep = ddi_get_soft_state(i915_statep, unit); 821 statep->dip = dip; 822 statep->driver = &i915_driver; 823 824 statep->s3_private = drm_alloc(sizeof(s3_i915_private_t), 825 DRM_MEM_DRIVER); 826 827 if (statep->s3_private == NULL) { 828 cmn_err(CE_WARN, "i915_attach: failed to allocate s3 priv"); 829 goto err_exit1; 830 } 831 832 /* 833 * Map in the mmio register space for s3. 834 */ 835 s3_private = (s3_i915_private_t *)statep->s3_private; 836 837 if (i915_map_regs(dip, &s3_private->saveAddr, 838 &s3_private->saveHandle)) { 839 cmn_err(CE_WARN, "i915_attach: failed to map MMIO"); 840 goto err_exit2; 841 } 842 843 /* 844 * Call drm_supp_register to create minor nodes for us 845 */ 846 handle = drm_supp_register(dip, statep); 847 if ( handle == NULL) { 848 DRM_ERROR("i915_attach: drm_supp_register failed"); 849 goto err_exit3; 850 } 851 statep->drm_handle = handle; 852 853 /* 854 * After drm_supp_register, we can call drm_xxx routine 855 */ 856 statep->drm_supported = DRM_UNSUPPORT; 857 if ( 858 drm_probe(statep, i915_pciidlist) != DDI_SUCCESS) { 859 DRM_ERROR("i915_open: " 860 "DRM current don't support this graphics card"); 861 goto err_exit4; 862 } 863 statep->drm_supported = DRM_SUPPORT; 864 865 /* call common attach code */ 866 if (drm_attach(statep) != DDI_SUCCESS) { 867 DRM_ERROR("i915_attach: drm_attach failed"); 868 goto err_exit4; 869 } 870 return (DDI_SUCCESS); 871 err_exit4: 872 (void) drm_supp_unregister(handle); 873 err_exit3: 874 i915_unmap_regs(&s3_private->saveHandle); 875 err_exit2: 876 drm_free(statep->s3_private, sizeof(s3_i915_private_t), 877 DRM_MEM_DRIVER); 878 err_exit1: 879 (void) ddi_soft_state_free(i915_statep, unit); 880 881 return (DDI_FAILURE); 882 883 } /* i915_attach() */ 884 885 static int 886 i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 887 { 888 drm_device_t *statep; 889 int unit; 890 s3_i915_private_t *s3_private; 891 892 if ((cmd != DDI_SUSPEND) && (cmd != DDI_DETACH)) { 893 DRM_ERROR("i915_detach: " 894 "only detach and resume ops are supported"); 895 return (DDI_FAILURE); 896 } 897 898 unit = ddi_get_instance(dip); 899 statep = ddi_get_soft_state(i915_statep, unit); 900 if (statep == NULL) { 901 DRM_ERROR("i915_detach: can not get soft state"); 902 return (DDI_FAILURE); 903 } 904 905 if (cmd == DDI_SUSPEND) 906 return (i915_suspend(statep)); 907 908 s3_private = (s3_i915_private_t *)statep->s3_private; 909 ddi_regs_map_free(&s3_private->saveHandle); 910 911 /* 912 * Free the struct for context saving in S3 913 */ 914 drm_free(statep->s3_private, sizeof(s3_i915_private_t), 915 DRM_MEM_DRIVER); 916 917 (void) drm_detach(statep); 918 (void) drm_supp_unregister(statep->drm_handle); 919 (void) ddi_soft_state_free(i915_statep, unit); 920 921 return (DDI_SUCCESS); 922 923 } /* i915_detach() */ 924 925 926 /*ARGSUSED*/ 927 static int 928 i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 929 { 930 drm_device_t *statep; 931 int error = DDI_SUCCESS; 932 int unit; 933 934 unit = drm_dev_to_instance((dev_t)arg); 935 switch (infocmd) { 936 case DDI_INFO_DEVT2DEVINFO: 937 statep = ddi_get_soft_state(i915_statep, unit); 938 if (statep == NULL || statep->dip == NULL) { 939 error = DDI_FAILURE; 940 } else { 941 *result = (void *) statep->dip; 942 error = DDI_SUCCESS; 943 } 944 break; 945 case DDI_INFO_DEVT2INSTANCE: 946 *result = (void *)(uintptr_t)unit; 947 error = DDI_SUCCESS; 948 break; 949 default: 950 error = DDI_FAILURE; 951 break; 952 } 953 return (error); 954 955 } /* i915_info() */ 956 957 958 static void i915_configure(drm_driver_t *driver) 959 { 960 driver->buf_priv_size = 1; /* No dev_priv */ 961 driver->load = i915_driver_load; 962 driver->unload = i915_driver_unload; 963 driver->preclose = i915_driver_preclose; 964 driver->lastclose = i915_driver_lastclose; 965 driver->device_is_agp = i915_driver_device_is_agp; 966 driver->get_vblank_counter = i915_get_vblank_counter; 967 driver->enable_vblank = i915_enable_vblank; 968 driver->disable_vblank = i915_disable_vblank; 969 driver->irq_preinstall = i915_driver_irq_preinstall; 970 driver->irq_postinstall = i915_driver_irq_postinstall; 971 driver->irq_uninstall = i915_driver_irq_uninstall; 972 driver->irq_handler = i915_driver_irq_handler; 973 974 driver->driver_ioctls = i915_ioctls; 975 driver->max_driver_ioctl = i915_max_ioctl; 976 977 driver->driver_name = DRIVER_NAME; 978 driver->driver_desc = DRIVER_DESC; 979 driver->driver_date = DRIVER_DATE; 980 driver->driver_major = DRIVER_MAJOR; 981 driver->driver_minor = DRIVER_MINOR; 982 driver->driver_patchlevel = DRIVER_PATCHLEVEL; 983 984 driver->use_agp = 1; 985 driver->require_agp = 1; 986 driver->use_irq = 1; 987 } 988 989 static int i915_quiesce(dev_info_t *dip) 990 { 991 drm_device_t *statep; 992 int unit; 993 994 unit = ddi_get_instance(dip); 995 statep = ddi_get_soft_state(i915_statep, unit); 996 if (statep == NULL) { 997 return (DDI_FAILURE); 998 } 999 i915_driver_irq_uninstall(statep); 1000 1001 return (DDI_SUCCESS); 1002 } 1003