1 /* BEGIN CSTYLED */ 2 3 /* 4 * i915_drv.c -- Intel i915 driver -*- linux-c -*- 5 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com 6 */ 7 8 /* 9 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 10 * All Rights Reserved. 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a 13 * copy of this software and associated documentation files (the "Software"), 14 * to deal in the Software without restriction, including without limitation 15 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 16 * and/or sell copies of the Software, and to permit persons to whom the 17 * Software is furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice (including the next 20 * paragraph) shall be included in all copies or substantial portions of the 21 * Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 26 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 27 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 28 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 29 * OTHER DEALINGS IN THE SOFTWARE. 30 * 31 * Authors: 32 * Gareth Hughes <gareth@valinux.com> 33 * 34 */ 35 36 /* 37 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 38 * Use is subject to license terms. 39 */ 40 41 /* 42 * I915 DRM Driver for Solaris 43 * 44 * This driver provides the hardware 3D acceleration support for Intel 45 * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the 46 * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here 47 * means the kernel device driver in DRI. 48 * 49 * I915 driver is a device dependent driver only, it depends on a misc module 50 * named drm for generic DRM operations. 51 */ 52 53 #include "drmP.h" 54 #include "i915_drm.h" 55 #include "i915_drv.h" 56 #include "drm_pciids.h" 57 58 #define i915_max_ioctl 0x20 /* changed from 15 */ 59 60 /* 61 * copied from vgasubr.h 62 */ 63 64 struct vgaregmap { 65 uint8_t *addr; 66 ddi_acc_handle_t handle; 67 boolean_t mapped; 68 }; 69 70 enum pipe { 71 PIPE_A = 0, 72 PIPE_B, 73 }; 74 75 76 /* 77 * cb_ops entrypoint 78 */ 79 extern struct cb_ops drm_cb_ops; 80 81 /* 82 * module entrypoint 83 */ 84 static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 85 static int i915_attach(dev_info_t *, ddi_attach_cmd_t); 86 static int i915_detach(dev_info_t *, ddi_detach_cmd_t); 87 88 89 /* drv_PCI_IDs comes from drm_pciids.h */ 90 static drm_pci_id_list_t i915_pciidlist[] = { 91 i915_PCI_IDS 92 }; 93 94 drm_ioctl_desc_t i915_ioctls[i915_max_ioctl]; 95 96 extern void i915_init_ioctl_arrays(void); 97 98 /* 99 * Local routines 100 */ 101 static void i915_configure(drm_driver_t *); 102 103 /* 104 * DRM driver 105 */ 106 static drm_driver_t i915_driver = {0}; 107 108 109 static struct dev_ops i915_dev_ops = { 110 DEVO_REV, /* devo_rev */ 111 0, /* devo_refcnt */ 112 i915_info, /* devo_getinfo */ 113 nulldev, /* devo_identify */ 114 nulldev, /* devo_probe */ 115 i915_attach, /* devo_attach */ 116 i915_detach, /* devo_detach */ 117 nodev, /* devo_reset */ 118 &drm_cb_ops, /* devo_cb_ops */ 119 NULL, /* devo_bus_ops */ 120 NULL, /* power */ 121 ddi_quiesce_not_supported, /* devo_quiesce */ 122 }; 123 124 static struct modldrv modldrv = { 125 &mod_driverops, /* drv_modops */ 126 "I915 DRM driver", /* drv_linkinfo */ 127 &i915_dev_ops, /* drv_dev_ops */ 128 }; 129 130 static struct modlinkage modlinkage = { 131 MODREV_1, (void *) &modldrv, NULL 132 }; 133 134 static ddi_device_acc_attr_t s3_attr = { 135 DDI_DEVICE_ATTR_V0, 136 DDI_NEVERSWAP_ACC, 137 DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */ 138 }; 139 140 /* 141 * softstate head 142 */ 143 static void *i915_statep; 144 145 int 146 _init(void) 147 { 148 int error; 149 150 i915_configure(&i915_driver); 151 152 if ((error = ddi_soft_state_init(&i915_statep, 153 sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0) 154 return (error); 155 156 if ((error = mod_install(&modlinkage)) != 0) { 157 ddi_soft_state_fini(&i915_statep); 158 return (error); 159 } 160 161 return (error); 162 163 } /* _init() */ 164 165 int 166 _fini(void) 167 { 168 int error; 169 170 if ((error = mod_remove(&modlinkage)) != 0) 171 return (error); 172 173 (void) ddi_soft_state_fini(&i915_statep); 174 175 return (0); 176 177 } /* _fini() */ 178 179 int 180 _info(struct modinfo *modinfop) 181 { 182 return (mod_info(&modlinkage, modinfop)); 183 184 } /* _info() */ 185 186 /* 187 * off range: 0x3b0 ~ 0x3ff 188 */ 189 190 static void 191 vga_reg_put8(struct vgaregmap *regmap, uint16_t off, uint8_t val) 192 { 193 ASSERT((off >= 0x3b0) && (off <= 0x3ff)); 194 195 ddi_put8(regmap->handle, regmap->addr + off, val); 196 } 197 198 /* 199 * off range: 0x3b0 ~ 0x3ff 200 */ 201 static uint8_t 202 vga_reg_get8(struct vgaregmap *regmap, uint16_t off) 203 { 204 205 ASSERT((off >= 0x3b0) && (off <= 0x3ff)); 206 207 return (ddi_get8(regmap->handle, regmap->addr + off)); 208 } 209 210 static void 211 i915_write_indexed(struct vgaregmap *regmap, 212 uint16_t index_port, uint16_t data_port, uint8_t index, uint8_t val) 213 { 214 vga_reg_put8(regmap, index_port, index); 215 vga_reg_put8(regmap, data_port, val); 216 } 217 218 static uint8_t 219 i915_read_indexed(struct vgaregmap *regmap, 220 uint16_t index_port, uint16_t data_port, uint8_t index) 221 { 222 vga_reg_put8(regmap, index_port, index); 223 return (vga_reg_get8(regmap, data_port)); 224 } 225 226 static void 227 i915_write_ar(struct vgaregmap *regmap, uint16_t st01, 228 uint8_t reg, uint8_t val, uint8_t palette_enable) 229 { 230 (void) vga_reg_get8(regmap, st01); 231 vga_reg_put8(regmap, VGA_AR_INDEX, palette_enable | reg); 232 vga_reg_put8(regmap, VGA_AR_DATA_WRITE, val); 233 } 234 235 static uint8_t 236 i915_read_ar(struct vgaregmap *regmap, uint16_t st01, 237 uint8_t index, uint8_t palette_enable) 238 { 239 (void) vga_reg_get8(regmap, st01); 240 vga_reg_put8(regmap, VGA_AR_INDEX, index | palette_enable); 241 return (vga_reg_get8(regmap, VGA_AR_DATA_READ)); 242 } 243 244 static int 245 i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 246 { 247 struct s3_i915_private *s3_priv = dev->s3_private; 248 249 if (pipe == PIPE_A) 250 return (S3_READ(DPLL_A) & DPLL_VCO_ENABLE); 251 else 252 return (S3_READ(DPLL_B) & DPLL_VCO_ENABLE); 253 } 254 255 static void 256 i915_save_palette(struct drm_device *dev, enum pipe pipe) 257 { 258 struct s3_i915_private *s3_priv = dev->s3_private; 259 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 260 uint32_t *array; 261 int i; 262 263 if (!i915_pipe_enabled(dev, pipe)) 264 return; 265 266 if (pipe == PIPE_A) 267 array = s3_priv->save_palette_a; 268 else 269 array = s3_priv->save_palette_b; 270 271 for(i = 0; i < 256; i++) 272 array[i] = S3_READ(reg + (i << 2)); 273 274 } 275 276 static void 277 i915_restore_palette(struct drm_device *dev, enum pipe pipe) 278 { 279 struct s3_i915_private *s3_priv = dev->s3_private; 280 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 281 uint32_t *array; 282 int i; 283 284 if (!i915_pipe_enabled(dev, pipe)) 285 return; 286 287 if (pipe == PIPE_A) 288 array = s3_priv->save_palette_a; 289 else 290 array = s3_priv->save_palette_b; 291 292 for(i = 0; i < 256; i++) 293 S3_WRITE(reg + (i << 2), array[i]); 294 } 295 296 static void 297 i915_save_vga(struct drm_device *dev) 298 { 299 struct s3_i915_private *s3_priv = dev->s3_private; 300 int i; 301 uint16_t cr_index, cr_data, st01; 302 struct vgaregmap regmap; 303 304 regmap.addr = (uint8_t *)s3_priv->saveAddr; 305 regmap.handle = s3_priv->saveHandle; 306 307 /* VGA color palette registers */ 308 s3_priv->saveDACMASK = vga_reg_get8(®map, VGA_DACMASK); 309 /* DACCRX automatically increments during read */ 310 vga_reg_put8(®map, VGA_DACRX, 0); 311 /* Read 3 bytes of color data from each index */ 312 for (i = 0; i < 256 * 3; i++) 313 s3_priv->saveDACDATA[i] = vga_reg_get8(®map, VGA_DACDATA); 314 315 /* MSR bits */ 316 s3_priv->saveMSR = vga_reg_get8(®map, VGA_MSR_READ); 317 if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) { 318 cr_index = VGA_CR_INDEX_CGA; 319 cr_data = VGA_CR_DATA_CGA; 320 st01 = VGA_ST01_CGA; 321 } else { 322 cr_index = VGA_CR_INDEX_MDA; 323 cr_data = VGA_CR_DATA_MDA; 324 st01 = VGA_ST01_MDA; 325 } 326 327 /* CRT controller regs */ 328 i915_write_indexed(®map, cr_index, cr_data, 0x11, 329 i915_read_indexed(®map, cr_index, cr_data, 0x11) & (~0x80)); 330 for (i = 0; i <= 0x24; i++) 331 s3_priv->saveCR[i] = 332 i915_read_indexed(®map, cr_index, cr_data, i); 333 /* Make sure we don't turn off CR group 0 writes */ 334 s3_priv->saveCR[0x11] &= ~0x80; 335 336 /* Attribute controller registers */ 337 (void) vga_reg_get8(®map, st01); 338 s3_priv->saveAR_INDEX = vga_reg_get8(®map, VGA_AR_INDEX); 339 for (i = 0; i <= 0x14; i++) 340 s3_priv->saveAR[i] = i915_read_ar(®map, st01, i, 0); 341 (void) vga_reg_get8(®map, st01); 342 vga_reg_put8(®map, VGA_AR_INDEX, s3_priv->saveAR_INDEX); 343 (void) vga_reg_get8(®map, st01); 344 345 /* Graphics controller registers */ 346 for (i = 0; i < 9; i++) 347 s3_priv->saveGR[i] = 348 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, i); 349 350 s3_priv->saveGR[0x10] = 351 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x10); 352 s3_priv->saveGR[0x11] = 353 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x11); 354 s3_priv->saveGR[0x18] = 355 i915_read_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x18); 356 357 /* Sequencer registers */ 358 for (i = 0; i < 8; i++) 359 s3_priv->saveSR[i] = 360 i915_read_indexed(®map, VGA_SR_INDEX, VGA_SR_DATA, i); 361 } 362 363 static void 364 i915_restore_vga(struct drm_device *dev) 365 { 366 struct s3_i915_private *s3_priv = dev->s3_private; 367 int i; 368 uint16_t cr_index, cr_data, st01; 369 struct vgaregmap regmap; 370 371 regmap.addr = (uint8_t *)s3_priv->saveAddr; 372 regmap.handle = s3_priv->saveHandle; 373 374 /* 375 * I/O Address Select. This bit selects 3Bxh or 3Dxh as the 376 * I/O address for the CRT Controller registers, 377 * the Feature Control Register (FCR), and Input Status Register 378 * 1 (ST01). Presently ignored (whole range is claimed), but 379 * will "ignore" 3Bx for color configuration or 3Dx for monochrome. 380 * Note that it is typical in AGP chipsets to shadow this bit 381 * and properly steer I/O cycles to the proper bus for operation 382 * where a MDA exists on another bus such as ISA. 383 * 0 = Select 3Bxh I/O address (MDA emulation) (default). 384 * 1 = Select 3Dxh I/O address (CGA emulation). 385 */ 386 vga_reg_put8(®map, VGA_MSR_WRITE, s3_priv->saveMSR); 387 388 if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) { 389 cr_index = VGA_CR_INDEX_CGA; 390 cr_data = VGA_CR_DATA_CGA; 391 st01 = VGA_ST01_CGA; 392 } else { 393 cr_index = VGA_CR_INDEX_MDA; 394 cr_data = VGA_CR_DATA_MDA; 395 st01 = VGA_ST01_MDA; 396 } 397 398 /* Sequencer registers, don't write SR07 */ 399 for (i = 0; i < 7; i++) 400 i915_write_indexed(®map, VGA_SR_INDEX, VGA_SR_DATA, i, 401 s3_priv->saveSR[i]); 402 /* CRT controller regs */ 403 /* Enable CR group 0 writes */ 404 i915_write_indexed(®map, cr_index, cr_data, 405 0x11, s3_priv->saveCR[0x11]); 406 for (i = 0; i <= 0x24; i++) 407 i915_write_indexed(®map, cr_index, 408 cr_data, i, s3_priv->saveCR[i]); 409 410 /* Graphics controller regs */ 411 for (i = 0; i < 9; i++) 412 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, i, 413 s3_priv->saveGR[i]); 414 415 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x10, 416 s3_priv->saveGR[0x10]); 417 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x11, 418 s3_priv->saveGR[0x11]); 419 i915_write_indexed(®map, VGA_GR_INDEX, VGA_GR_DATA, 0x18, 420 s3_priv->saveGR[0x18]); 421 422 /* Attribute controller registers */ 423 (void) vga_reg_get8(®map, st01); /* switch back to index mode */ 424 for (i = 0; i <= 0x14; i++) 425 i915_write_ar(®map, st01, i, s3_priv->saveAR[i], 0); 426 (void) vga_reg_get8(®map, st01); /* switch back to index mode */ 427 vga_reg_put8(®map, VGA_AR_INDEX, s3_priv->saveAR_INDEX | 0x20); 428 (void) vga_reg_get8(®map, st01); /* switch back to index mode */ 429 430 /* VGA color palette registers */ 431 vga_reg_put8(®map, VGA_DACMASK, s3_priv->saveDACMASK); 432 /* DACCRX automatically increments during read */ 433 vga_reg_put8(®map, VGA_DACWX, 0); 434 /* Read 3 bytes of color data from each index */ 435 for (i = 0; i < 256 * 3; i++) 436 vga_reg_put8(®map, VGA_DACDATA, s3_priv->saveDACDATA[i]); 437 } 438 439 static int 440 i915_resume(struct drm_device *dev) 441 { 442 ddi_acc_handle_t conf_hdl; 443 struct s3_i915_private *s3_priv = dev->s3_private; 444 int i; 445 446 if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) { 447 DRM_ERROR(("i915_resume: pci_config_setup fail")); 448 return (DDI_FAILURE); 449 } 450 /* 451 * Nexus driver will resume pci config space and set the power state 452 * for its children. So we needn't resume them explicitly here. 453 * see pci_pre_resume for detail. 454 */ 455 pci_config_put8(conf_hdl, LBB, s3_priv->saveLBB); 456 457 S3_WRITE(DSPARB, s3_priv->saveDSPARB); 458 459 /* 460 * Pipe & plane A info 461 * Prime the clock 462 */ 463 if (s3_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 464 S3_WRITE(DPLL_A, s3_priv->saveDPLL_A & 465 ~DPLL_VCO_ENABLE); 466 drv_usecwait(150); 467 } 468 S3_WRITE(FPA0, s3_priv->saveFPA0); 469 S3_WRITE(FPA1, s3_priv->saveFPA1); 470 /* Actually enable it */ 471 S3_WRITE(DPLL_A, s3_priv->saveDPLL_A); 472 drv_usecwait(150); 473 if (IS_I965G(dev)) 474 S3_WRITE(DPLL_A_MD, s3_priv->saveDPLL_A_MD); 475 drv_usecwait(150); 476 477 /* Restore mode */ 478 S3_WRITE(HTOTAL_A, s3_priv->saveHTOTAL_A); 479 S3_WRITE(HBLANK_A, s3_priv->saveHBLANK_A); 480 S3_WRITE(HSYNC_A, s3_priv->saveHSYNC_A); 481 S3_WRITE(VTOTAL_A, s3_priv->saveVTOTAL_A); 482 S3_WRITE(VBLANK_A, s3_priv->saveVBLANK_A); 483 S3_WRITE(VSYNC_A, s3_priv->saveVSYNC_A); 484 S3_WRITE(BCLRPAT_A, s3_priv->saveBCLRPAT_A); 485 486 /* Restore plane info */ 487 S3_WRITE(DSPASIZE, s3_priv->saveDSPASIZE); 488 S3_WRITE(DSPAPOS, s3_priv->saveDSPAPOS); 489 S3_WRITE(PIPEASRC, s3_priv->savePIPEASRC); 490 S3_WRITE(DSPABASE, s3_priv->saveDSPABASE); 491 S3_WRITE(DSPASTRIDE, s3_priv->saveDSPASTRIDE); 492 if (IS_I965G(dev)) { 493 S3_WRITE(DSPASURF, s3_priv->saveDSPASURF); 494 S3_WRITE(DSPATILEOFF, s3_priv->saveDSPATILEOFF); 495 } 496 S3_WRITE(PIPEACONF, s3_priv->savePIPEACONF); 497 i915_restore_palette(dev, PIPE_A); 498 /* Enable the plane */ 499 S3_WRITE(DSPACNTR, s3_priv->saveDSPACNTR); 500 S3_WRITE(DSPABASE, S3_READ(DSPABASE)); 501 502 /* Pipe & plane B info */ 503 if (s3_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 504 S3_WRITE(DPLL_B, s3_priv->saveDPLL_B & 505 ~DPLL_VCO_ENABLE); 506 drv_usecwait(150); 507 } 508 S3_WRITE(FPB0, s3_priv->saveFPB0); 509 S3_WRITE(FPB1, s3_priv->saveFPB1); 510 /* Actually enable it */ 511 S3_WRITE(DPLL_B, s3_priv->saveDPLL_B); 512 drv_usecwait(150); 513 if (IS_I965G(dev)) 514 S3_WRITE(DPLL_B_MD, s3_priv->saveDPLL_B_MD); 515 drv_usecwait(150); 516 517 /* Restore mode */ 518 S3_WRITE(HTOTAL_B, s3_priv->saveHTOTAL_B); 519 S3_WRITE(HBLANK_B, s3_priv->saveHBLANK_B); 520 S3_WRITE(HSYNC_B, s3_priv->saveHSYNC_B); 521 S3_WRITE(VTOTAL_B, s3_priv->saveVTOTAL_B); 522 S3_WRITE(VBLANK_B, s3_priv->saveVBLANK_B); 523 S3_WRITE(VSYNC_B, s3_priv->saveVSYNC_B); 524 S3_WRITE(BCLRPAT_B, s3_priv->saveBCLRPAT_B); 525 526 /* Restore plane info */ 527 S3_WRITE(DSPBSIZE, s3_priv->saveDSPBSIZE); 528 S3_WRITE(DSPBPOS, s3_priv->saveDSPBPOS); 529 S3_WRITE(PIPEBSRC, s3_priv->savePIPEBSRC); 530 S3_WRITE(DSPBBASE, s3_priv->saveDSPBBASE); 531 S3_WRITE(DSPBSTRIDE, s3_priv->saveDSPBSTRIDE); 532 if (IS_I965G(dev)) { 533 S3_WRITE(DSPBSURF, s3_priv->saveDSPBSURF); 534 S3_WRITE(DSPBTILEOFF, s3_priv->saveDSPBTILEOFF); 535 } 536 S3_WRITE(PIPEBCONF, s3_priv->savePIPEBCONF); 537 i915_restore_palette(dev, PIPE_B); 538 /* Enable the plane */ 539 S3_WRITE(DSPBCNTR, s3_priv->saveDSPBCNTR); 540 S3_WRITE(DSPBBASE, S3_READ(DSPBBASE)); 541 542 /* CRT state */ 543 S3_WRITE(ADPA, s3_priv->saveADPA); 544 545 /* LVDS state */ 546 if (IS_I965G(dev)) 547 S3_WRITE(BLC_PWM_CTL2, s3_priv->saveBLC_PWM_CTL2); 548 if (IS_MOBILE(dev) && !IS_I830(dev)) 549 S3_WRITE(LVDS, s3_priv->saveLVDS); 550 if (!IS_I830(dev) && !IS_845G(dev)) 551 S3_WRITE(PFIT_CONTROL, s3_priv->savePFIT_CONTROL); 552 553 S3_WRITE(PFIT_PGM_RATIOS, s3_priv->savePFIT_PGM_RATIOS); 554 S3_WRITE(BLC_PWM_CTL, s3_priv->saveBLC_PWM_CTL); 555 S3_WRITE(LVDSPP_ON, s3_priv->saveLVDSPP_ON); 556 S3_WRITE(LVDSPP_OFF, s3_priv->saveLVDSPP_OFF); 557 S3_WRITE(PP_CYCLE, s3_priv->savePP_CYCLE); 558 S3_WRITE(PP_CONTROL, s3_priv->savePP_CONTROL); 559 560 /* FIXME: restore TV & SDVO state */ 561 562 /* FBC info */ 563 S3_WRITE(FBC_CFB_BASE, s3_priv->saveFBC_CFB_BASE); 564 S3_WRITE(FBC_LL_BASE, s3_priv->saveFBC_LL_BASE); 565 S3_WRITE(FBC_CONTROL2, s3_priv->saveFBC_CONTROL2); 566 S3_WRITE(FBC_CONTROL, s3_priv->saveFBC_CONTROL); 567 568 /* VGA state */ 569 S3_WRITE(VGACNTRL, s3_priv->saveVGACNTRL); 570 S3_WRITE(VCLK_DIVISOR_VGA0, s3_priv->saveVCLK_DIVISOR_VGA0); 571 S3_WRITE(VCLK_DIVISOR_VGA1, s3_priv->saveVCLK_DIVISOR_VGA1); 572 S3_WRITE(VCLK_POST_DIV, s3_priv->saveVCLK_POST_DIV); 573 drv_usecwait(150); 574 575 /* Clock gating state */ 576 S3_WRITE (D_STATE, s3_priv->saveD_STATE); 577 S3_WRITE (CG_2D_DIS, s3_priv->saveCG_2D_DIS); 578 579 /* Cache mode state */ 580 S3_WRITE (CACHE_MODE_0, s3_priv->saveCACHE_MODE_0 | 0xffff0000); 581 582 /* Memory arbitration state */ 583 S3_WRITE (MI_ARB_STATE, s3_priv->saveMI_ARB_STATE | 0xffff0000); 584 585 for (i = 0; i < 16; i++) { 586 S3_WRITE(SWF0 + (i << 2), s3_priv->saveSWF0[i]); 587 S3_WRITE(SWF10 + (i << 2), s3_priv->saveSWF1[i+7]); 588 } 589 for (i = 0; i < 3; i++) 590 S3_WRITE(SWF30 + (i << 2), s3_priv->saveSWF2[i]); 591 592 i915_restore_vga(dev); 593 594 S3_WRITE(I915REG_PGTBL_CTRL, s3_priv->pgtbl_ctl); 595 596 (void) pci_config_teardown(&conf_hdl); 597 598 return (DDI_SUCCESS); 599 } 600 static int 601 i915_suspend(struct drm_device *dev) 602 { 603 ddi_acc_handle_t conf_hdl; 604 struct s3_i915_private *s3_priv = dev->s3_private; 605 int i; 606 607 608 if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) { 609 DRM_ERROR(("i915_suspend: pci_config_setup fail")); 610 return (DDI_FAILURE); 611 } 612 613 /* 614 * Nexus driver will resume pci config space for its children. 615 * So pci config registers are not saved here. 616 */ 617 s3_priv->saveLBB = pci_config_get8(conf_hdl, LBB); 618 619 /* Display arbitration control */ 620 s3_priv->saveDSPARB = S3_READ(DSPARB); 621 622 /* 623 * Pipe & plane A info. 624 */ 625 s3_priv->savePIPEACONF = S3_READ(PIPEACONF); 626 s3_priv->savePIPEASRC = S3_READ(PIPEASRC); 627 s3_priv->saveFPA0 = S3_READ(FPA0); 628 s3_priv->saveFPA1 = S3_READ(FPA1); 629 s3_priv->saveDPLL_A = S3_READ(DPLL_A); 630 if (IS_I965G(dev)) 631 s3_priv->saveDPLL_A_MD = S3_READ(DPLL_A_MD); 632 s3_priv->saveHTOTAL_A = S3_READ(HTOTAL_A); 633 s3_priv->saveHBLANK_A = S3_READ(HBLANK_A); 634 s3_priv->saveHSYNC_A = S3_READ(HSYNC_A); 635 s3_priv->saveVTOTAL_A = S3_READ(VTOTAL_A); 636 s3_priv->saveVBLANK_A = S3_READ(VBLANK_A); 637 s3_priv->saveVSYNC_A = S3_READ(VSYNC_A); 638 s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A); 639 640 s3_priv->saveDSPACNTR = S3_READ(DSPACNTR); 641 s3_priv->saveDSPASTRIDE = S3_READ(DSPASTRIDE); 642 s3_priv->saveDSPASIZE = S3_READ(DSPASIZE); 643 s3_priv->saveDSPAPOS = S3_READ(DSPAPOS); 644 s3_priv->saveDSPABASE = S3_READ(DSPABASE); 645 if (IS_I965G(dev)) { 646 s3_priv->saveDSPASURF = S3_READ(DSPASURF); 647 s3_priv->saveDSPATILEOFF = S3_READ(DSPATILEOFF); 648 } 649 i915_save_palette(dev, PIPE_A); 650 s3_priv->savePIPEASTAT = S3_READ(I915REG_PIPEASTAT); 651 652 /* 653 * Pipe & plane B info 654 */ 655 s3_priv->savePIPEBCONF = S3_READ(PIPEBCONF); 656 s3_priv->savePIPEBSRC = S3_READ(PIPEBSRC); 657 s3_priv->saveFPB0 = S3_READ(FPB0); 658 s3_priv->saveFPB1 = S3_READ(FPB1); 659 s3_priv->saveDPLL_B = S3_READ(DPLL_B); 660 if (IS_I965G(dev)) 661 s3_priv->saveDPLL_B_MD = S3_READ(DPLL_B_MD); 662 s3_priv->saveHTOTAL_B = S3_READ(HTOTAL_B); 663 s3_priv->saveHBLANK_B = S3_READ(HBLANK_B); 664 s3_priv->saveHSYNC_B = S3_READ(HSYNC_B); 665 s3_priv->saveVTOTAL_B = S3_READ(VTOTAL_B); 666 s3_priv->saveVBLANK_B = S3_READ(VBLANK_B); 667 s3_priv->saveVSYNC_B = S3_READ(VSYNC_B); 668 s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A); 669 670 s3_priv->saveDSPBCNTR = S3_READ(DSPBCNTR); 671 s3_priv->saveDSPBSTRIDE = S3_READ(DSPBSTRIDE); 672 s3_priv->saveDSPBSIZE = S3_READ(DSPBSIZE); 673 s3_priv->saveDSPBPOS = S3_READ(DSPBPOS); 674 s3_priv->saveDSPBBASE = S3_READ(DSPBBASE); 675 if (IS_I965GM(dev) || IS_GM45(dev)) { 676 s3_priv->saveDSPBSURF = S3_READ(DSPBSURF); 677 s3_priv->saveDSPBTILEOFF = S3_READ(DSPBTILEOFF); 678 } 679 i915_save_palette(dev, PIPE_B); 680 s3_priv->savePIPEBSTAT = S3_READ(I915REG_PIPEBSTAT); 681 682 /* 683 * CRT state 684 */ 685 s3_priv->saveADPA = S3_READ(ADPA); 686 687 /* 688 * LVDS state 689 */ 690 s3_priv->savePP_CONTROL = S3_READ(PP_CONTROL); 691 s3_priv->savePFIT_PGM_RATIOS = S3_READ(PFIT_PGM_RATIOS); 692 s3_priv->saveBLC_PWM_CTL = S3_READ(BLC_PWM_CTL); 693 if (IS_I965G(dev)) 694 s3_priv->saveBLC_PWM_CTL2 = S3_READ(BLC_PWM_CTL2); 695 if (IS_MOBILE(dev) && !IS_I830(dev)) 696 s3_priv->saveLVDS = S3_READ(LVDS); 697 if (!IS_I830(dev) && !IS_845G(dev)) 698 s3_priv->savePFIT_CONTROL = S3_READ(PFIT_CONTROL); 699 s3_priv->saveLVDSPP_ON = S3_READ(LVDSPP_ON); 700 s3_priv->saveLVDSPP_OFF = S3_READ(LVDSPP_OFF); 701 s3_priv->savePP_CYCLE = S3_READ(PP_CYCLE); 702 703 /* FIXME: save TV & SDVO state */ 704 705 /* FBC state */ 706 s3_priv->saveFBC_CFB_BASE = S3_READ(FBC_CFB_BASE); 707 s3_priv->saveFBC_LL_BASE = S3_READ(FBC_LL_BASE); 708 s3_priv->saveFBC_CONTROL2 = S3_READ(FBC_CONTROL2); 709 s3_priv->saveFBC_CONTROL = S3_READ(FBC_CONTROL); 710 711 /* Interrupt state */ 712 s3_priv->saveIIR = S3_READ(I915REG_INT_IDENTITY_R); 713 s3_priv->saveIER = S3_READ(I915REG_INT_ENABLE_R); 714 s3_priv->saveIMR = S3_READ(I915REG_INT_MASK_R); 715 716 /* VGA state */ 717 s3_priv->saveVCLK_DIVISOR_VGA0 = S3_READ(VCLK_DIVISOR_VGA0); 718 s3_priv->saveVCLK_DIVISOR_VGA1 = S3_READ(VCLK_DIVISOR_VGA1); 719 s3_priv->saveVCLK_POST_DIV = S3_READ(VCLK_POST_DIV); 720 s3_priv->saveVGACNTRL = S3_READ(VGACNTRL); 721 722 /* Clock gating state */ 723 s3_priv->saveD_STATE = S3_READ(D_STATE); 724 s3_priv->saveCG_2D_DIS = S3_READ(CG_2D_DIS); 725 726 /* Cache mode state */ 727 s3_priv->saveCACHE_MODE_0 = S3_READ(CACHE_MODE_0); 728 729 /* Memory Arbitration state */ 730 s3_priv->saveMI_ARB_STATE = S3_READ(MI_ARB_STATE); 731 732 /* Scratch space */ 733 for (i = 0; i < 16; i++) { 734 s3_priv->saveSWF0[i] = S3_READ(SWF0 + (i << 2)); 735 s3_priv->saveSWF1[i] = S3_READ(SWF10 + (i << 2)); 736 } 737 for (i = 0; i < 3; i++) 738 s3_priv->saveSWF2[i] = S3_READ(SWF30 + (i << 2)); 739 740 741 i915_save_vga(dev); 742 /* 743 * Save page table control register 744 */ 745 s3_priv->pgtbl_ctl = S3_READ(I915REG_PGTBL_CTRL); 746 747 (void) pci_config_teardown(&conf_hdl); 748 749 return (DDI_SUCCESS); 750 } 751 752 /* 753 * This funtion check the length of memory mapped IO space to get the right bar. * And There are two possibilities here. 754 * 1. The MMIO registers is in memory map IO bar with 1M size. The bottom half 755 * of the 1M space is the MMIO registers. 756 * 2. The MMIO register is in memory map IO with 512K size. The whole 512K 757 * space is the MMIO registers. 758 */ 759 static int 760 i915_map_regs(dev_info_t *dip, caddr_t *save_addr, ddi_acc_handle_t *handlep) 761 { 762 int rnumber; 763 int nregs; 764 off_t size = 0; 765 766 if (ddi_dev_nregs(dip, &nregs)) { 767 cmn_err(CE_WARN, "i915_map_regs: failed to get nregs"); 768 return (DDI_FAILURE); 769 } 770 771 for (rnumber = 1; rnumber < nregs; rnumber++) { 772 (void) ddi_dev_regsize(dip, rnumber, &size); 773 if ((size == 0x80000) || 774 (size == 0x100000) || 775 (size == 0x400000)) 776 break; 777 } 778 779 if (rnumber >= nregs) { 780 cmn_err(CE_WARN, 781 "i915_map_regs: failed to find MMIO registers"); 782 return (DDI_FAILURE); 783 } 784 785 if (ddi_regs_map_setup(dip, rnumber, save_addr, 786 0, 0x80000, &s3_attr, handlep)) { 787 cmn_err(CE_WARN, 788 "i915_map_regs: failed to map bar %d", rnumber); 789 return (DDI_FAILURE); 790 } 791 792 return (DDI_SUCCESS); 793 } 794 static void 795 i915_unmap_regs(ddi_acc_handle_t *handlep) 796 { 797 ddi_regs_map_free(handlep); 798 } 799 static int 800 i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 801 { 802 drm_device_t *statep; 803 s3_i915_private_t *s3_private; 804 void *handle; 805 int unit; 806 807 unit = ddi_get_instance(dip); 808 switch (cmd) { 809 case DDI_ATTACH: 810 break; 811 case DDI_RESUME: 812 statep = ddi_get_soft_state(i915_statep, unit); 813 return (i915_resume(statep)); 814 default: 815 DRM_ERROR("i915_attach: attach and resume ops are supported"); 816 return (DDI_FAILURE); 817 818 } 819 820 if (ddi_soft_state_zalloc(i915_statep, unit) != DDI_SUCCESS) { 821 cmn_err(CE_WARN, 822 "i915_attach: failed to alloc softstate"); 823 return (DDI_FAILURE); 824 } 825 statep = ddi_get_soft_state(i915_statep, unit); 826 statep->dip = dip; 827 statep->driver = &i915_driver; 828 829 statep->s3_private = drm_alloc(sizeof(s3_i915_private_t), 830 DRM_MEM_DRIVER); 831 832 if (statep->s3_private == NULL) { 833 cmn_err(CE_WARN, "i915_attach: failed to allocate s3 priv"); 834 goto err_exit1; 835 } 836 837 /* 838 * Map in the mmio register space for s3. 839 */ 840 s3_private = (s3_i915_private_t *)statep->s3_private; 841 842 if (i915_map_regs(dip, &s3_private->saveAddr, 843 &s3_private->saveHandle)) { 844 cmn_err(CE_WARN, "i915_attach: failed to map MMIO"); 845 goto err_exit2; 846 } 847 848 /* 849 * Call drm_supp_register to create minor nodes for us 850 */ 851 handle = drm_supp_register(dip, statep); 852 if ( handle == NULL) { 853 DRM_ERROR("i915_attach: drm_supp_register failed"); 854 goto err_exit3; 855 } 856 statep->drm_handle = handle; 857 858 /* 859 * After drm_supp_register, we can call drm_xxx routine 860 */ 861 statep->drm_supported = DRM_UNSUPPORT; 862 if ( 863 drm_probe(statep, i915_pciidlist) != DDI_SUCCESS) { 864 DRM_ERROR("i915_open: " 865 "DRM current don't support this graphics card"); 866 goto err_exit4; 867 } 868 statep->drm_supported = DRM_SUPPORT; 869 870 /* call common attach code */ 871 if (drm_attach(statep) != DDI_SUCCESS) { 872 DRM_ERROR("i915_attach: drm_attach failed"); 873 goto err_exit4; 874 } 875 return (DDI_SUCCESS); 876 err_exit4: 877 (void) drm_supp_unregister(handle); 878 err_exit3: 879 i915_unmap_regs(&s3_private->saveHandle); 880 err_exit2: 881 drm_free(statep->s3_private, sizeof(s3_i915_private_t), 882 DRM_MEM_DRIVER); 883 err_exit1: 884 (void) ddi_soft_state_free(i915_statep, unit); 885 886 return (DDI_FAILURE); 887 888 } /* i915_attach() */ 889 890 static int 891 i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 892 { 893 drm_device_t *statep; 894 int unit; 895 s3_i915_private_t *s3_private; 896 897 if ((cmd != DDI_SUSPEND) && (cmd != DDI_DETACH)) { 898 DRM_ERROR("i915_detach: " 899 "only detach and resume ops are supported"); 900 return (DDI_FAILURE); 901 } 902 903 unit = ddi_get_instance(dip); 904 statep = ddi_get_soft_state(i915_statep, unit); 905 if (statep == NULL) { 906 DRM_ERROR("i915_detach: can not get soft state"); 907 return (DDI_FAILURE); 908 } 909 910 if (cmd == DDI_SUSPEND) 911 return (i915_suspend(statep)); 912 913 s3_private = (s3_i915_private_t *)statep->s3_private; 914 ddi_regs_map_free(&s3_private->saveHandle); 915 916 /* 917 * Free the struct for context saving in S3 918 */ 919 drm_free(statep->s3_private, sizeof(s3_i915_private_t), 920 DRM_MEM_DRIVER); 921 922 (void) drm_detach(statep); 923 (void) drm_supp_unregister(statep->drm_handle); 924 (void) ddi_soft_state_free(i915_statep, unit); 925 926 return (DDI_SUCCESS); 927 928 } /* i915_detach() */ 929 930 931 /*ARGSUSED*/ 932 static int 933 i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 934 { 935 drm_device_t *statep; 936 int error = DDI_SUCCESS; 937 int unit; 938 939 unit = drm_dev_to_instance((dev_t)arg); 940 switch (infocmd) { 941 case DDI_INFO_DEVT2DEVINFO: 942 statep = ddi_get_soft_state(i915_statep, unit); 943 if (statep == NULL || statep->dip == NULL) { 944 error = DDI_FAILURE; 945 } else { 946 *result = (void *) statep->dip; 947 error = DDI_SUCCESS; 948 } 949 break; 950 case DDI_INFO_DEVT2INSTANCE: 951 *result = (void *)(uintptr_t)unit; 952 error = DDI_SUCCESS; 953 break; 954 default: 955 error = DDI_FAILURE; 956 break; 957 } 958 return (error); 959 960 } /* i915_info() */ 961 962 963 static void i915_configure(drm_driver_t *driver) 964 { 965 i915_init_ioctl_arrays(); 966 967 driver->buf_priv_size = 1; /* No dev_priv */ 968 driver->load = i915_driver_load; 969 driver->unload = i915_driver_unload; 970 driver->preclose = i915_driver_preclose; 971 driver->lastclose = i915_driver_lastclose; 972 driver->device_is_agp = i915_driver_device_is_agp; 973 driver->vblank_wait = i915_driver_vblank_wait; 974 driver->vblank_wait2 = i915_driver_vblank_wait2; 975 driver->irq_preinstall = i915_driver_irq_preinstall; 976 driver->irq_postinstall = i915_driver_irq_postinstall; 977 driver->irq_uninstall = i915_driver_irq_uninstall; 978 driver->irq_handler = i915_driver_irq_handler; 979 980 driver->driver_ioctls = i915_ioctls; 981 driver->max_driver_ioctl = i915_max_ioctl; 982 983 driver->driver_name = DRIVER_NAME; 984 driver->driver_desc = DRIVER_DESC; 985 driver->driver_date = DRIVER_DATE; 986 driver->driver_major = DRIVER_MAJOR; 987 driver->driver_minor = DRIVER_MINOR; 988 driver->driver_patchlevel = DRIVER_PATCHLEVEL; 989 990 driver->use_agp = 1; 991 driver->require_agp = 1; 992 driver->use_irq = 1; 993 driver->use_vbl_irq = 1; 994 driver->use_vbl_irq2 = 1; 995 } 996