1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2009, Intel Corporation. 24 * All Rights Reserved. 25 */ 26 27 /* 28 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 29 * Use is subject to license terms. 30 */ 31 32 33 #include <sys/systm.h> 34 #include <sys/conf.h> 35 #include <sys/modctl.h> 36 #include <sys/file.h> 37 #include <sys/stat.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/sunndi.h> 41 #include <sys/modctl.h> 42 #include <sys/sunldi.h> 43 #include <sys/pci.h> 44 #include <sys/agpgart.h> 45 #include <sys/agp/agpdefs.h> 46 #include <sys/agp/agptarget_io.h> 47 48 int agptarget_debug_var = 0; 49 #define TARGETDB_PRINT2(fmt) if (agptarget_debug_var >= 1) cmn_err fmt 50 #define INST2NODENUM(inst) (inst) 51 #define DEV2INST(dev) (getminor(dev)) 52 53 static ddi_device_acc_attr_t dev_attr = { 54 DDI_DEVICE_ATTR_V0, 55 DDI_NEVERSWAP_ACC, 56 DDI_STRICTORDER_ACC, 57 }; 58 59 static struct _i9xx_private_compat { 60 uint64_t physical; /* physical address */ 61 uint_t size; /* size of mapping */ 62 uint_t regnum; /* register number */ 63 caddr_t flush_page; /* kernel virtual address */ 64 ddi_acc_handle_t handle; /* data access handle */ 65 uint_t ra_alloced; 66 } i9xx_private = {0, 0, 0, 0, 0, 0}; 67 68 #define I915_IFPADDR 0x60 69 #define I965_IFPADDR 0x70 70 71 #define HIADDR(n) ((uint32_t)(((uint64_t)(n) & \ 72 0xFFFFFFFF00000000ULL) >> 32)) 73 #define LOADDR(n) ((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF)) 74 75 /* 76 * Using for GEM to flush the chipset global 77 * write buffers on certain intel chipset 78 */ 79 80 static void 81 intel_chipset_flush_setup(dev_info_t *dip, 82 ddi_acc_handle_t pci_acc_hdl, 83 int gms_off); 84 85 typedef struct agp_target_softstate { 86 dev_info_t *tsoft_dip; 87 ddi_acc_handle_t tsoft_pcihdl; 88 uint32_t tsoft_devid; 89 /* The offset of the ACAPID register */ 90 off_t tsoft_acaptr; 91 kmutex_t tsoft_lock; 92 int tsoft_gms_off; /* GMS offset in config */ 93 uint32_t tsoft_gms; 94 }agp_target_softstate_t; 95 96 /* 97 * To get the pre-allocated graphics mem size using Graphics Mode Select 98 * (GMS) value. 99 */ 100 typedef struct gms_mode { 101 uint32_t gm_devid; /* bridge vendor + device id */ 102 off_t gm_regoff; /* mode selection register offset */ 103 uint32_t gm_mask; /* GMS mask */ 104 uint32_t gm_num; /* number of modes in gm_vec */ 105 int *gm_vec; /* modes array */ 106 } gms_mode_t; 107 108 static void *agptarget_glob_soft_handle; 109 110 #define GETSOFTC(instance) ((agp_target_softstate_t *) \ 111 ddi_get_soft_state(agptarget_glob_soft_handle, instance)); 112 113 /* 114 * The AMD8151 bridge is the only supported 64 bit hardware 115 */ 116 static int 117 is_64bit_aper(agp_target_softstate_t *softstate) 118 { 119 return (softstate->tsoft_devid == AMD_BR_8151); 120 } 121 122 /* 123 * Check if it is an intel bridge 124 */ 125 static int 126 is_intel_br(agp_target_softstate_t *softstate) 127 { 128 return ((softstate->tsoft_devid & VENDOR_ID_MASK) == 129 INTEL_VENDOR_ID); 130 } 131 132 /* 133 * agp_target_cap_find() 134 * 135 * Description: 136 * This function searches the linked capability list to find the offset 137 * of the AGP capability register. When it was not found, return 0. 138 * This works for standard AGP chipsets, but not for some Intel chipsets, 139 * like the I830M/I830MP/I852PM/I852GME/I855GME. It will return 0 for 140 * these chipsets even if AGP is supported. So the offset of acapid 141 * should be set manually in thoses cases. 142 * 143 * Arguments: 144 * pci_handle ddi acc handle of pci config 145 * 146 * Returns: 147 * 0 No capability pointer register found 148 * nexcap The AGP capability pointer register offset 149 */ 150 static off_t 151 agp_target_cap_find(ddi_acc_handle_t pci_handle) 152 { 153 off_t nextcap = 0; 154 uint32_t ncapid = 0; 155 uint8_t value = 0; 156 157 /* Check if this device supports the capability pointer */ 158 value = (uint8_t)(pci_config_get16(pci_handle, PCI_CONF_STAT) 159 & PCI_CONF_CAP_MASK); 160 161 if (!value) 162 return (0); 163 /* Get the offset of the first capability pointer from CAPPTR */ 164 nextcap = (off_t)(pci_config_get8(pci_handle, AGP_CONF_CAPPTR)); 165 166 /* Check the AGP capability from the first capability pointer */ 167 while (nextcap) { 168 ncapid = pci_config_get32(pci_handle, nextcap); 169 /* 170 * AGP3.0 rev1.0 127 the capid was assigned by the PCI SIG, 171 * 845 data sheet page 69 172 */ 173 if ((ncapid & PCI_CONF_CAPID_MASK) == 174 AGP_CAP_ID) /* The AGP cap was found */ 175 break; 176 177 nextcap = (off_t)((ncapid & PCI_CONF_NCAPID_MASK) >> 8); 178 } 179 180 return (nextcap); 181 182 } 183 184 /* 185 * agp_target_get_aperbase() 186 * 187 * Description: 188 * This function gets the AGP aperture base address from the AGP target 189 * register, the AGP aperture base register was programmed by the BIOS. 190 * 191 * Arguments: 192 * softstate driver soft state pointer 193 * 194 * Returns: 195 * aper_base AGP aperture base address 196 * 197 * Notes: 198 * If a 64bit bridge device is available, the AGP aperture base address 199 * can be 64 bit. 200 */ 201 static uint64_t 202 agp_target_get_apbase(agp_target_softstate_t *softstate) 203 { 204 uint64_t aper_base; 205 206 if (is_intel_br(softstate)) { 207 aper_base = pci_config_get32(softstate->tsoft_pcihdl, 208 AGP_CONF_APERBASE) & AGP_32_APERBASE_MASK; 209 } else if (is_64bit_aper(softstate)) { 210 aper_base = pci_config_get64(softstate->tsoft_pcihdl, 211 AGP_CONF_APERBASE); 212 /* 32-bit or 64-bit aperbase base pointer */ 213 if ((aper_base & AGP_APER_TYPE_MASK) == 0) 214 aper_base &= AGP_32_APERBASE_MASK; 215 else 216 aper_base &= AGP_64_APERBASE_MASK; 217 } 218 219 return (aper_base); 220 } 221 222 /* 223 * agp_target_get_apsize() 224 * 225 * Description: 226 * This function gets the AGP aperture size by reading the AGP aperture 227 * size register. 228 * Arguments: 229 * softstate driver soft state pointer 230 * 231 * Return: 232 * size The AGP aperture size in megabytes 233 * 0 an unexpected error 234 */ 235 static size_t 236 agp_target_get_apsize(agp_target_softstate_t *softstate) 237 { 238 off_t cap; 239 uint16_t value; 240 size_t size, regsize; 241 242 ASSERT(softstate->tsoft_acaptr); 243 cap = softstate->tsoft_acaptr; 244 245 if (is_intel_br(softstate)) { 246 /* extend this value to 16 bit for later tests */ 247 value = (uint16_t)pci_config_get8(softstate->tsoft_pcihdl, 248 cap + AGP_CONF_APERSIZE) | AGP_APER_SIZE_MASK; 249 } else if (is_64bit_aper(softstate)) { 250 value = pci_config_get16(softstate->tsoft_pcihdl, 251 cap + AGP_CONF_APERSIZE); 252 } 253 254 if (value & AGP_APER_128M_MASK) { 255 switch (value & AGP_APER_128M_MASK) { 256 case AGP_APER_4M: 257 size = 4; /* 4M */ 258 break; 259 case AGP_APER_8M: 260 size = 8; /* 8M */ 261 break; 262 case AGP_APER_16M: 263 size = 16; /* 16M */ 264 break; 265 case AGP_APER_32M: 266 size = 32; /* 32M */ 267 break; 268 case AGP_APER_64M: 269 size = 64; /* 64M */ 270 break; 271 case AGP_APER_128M: 272 size = 128; /* 128M */ 273 break; 274 default: 275 size = 0; /* not true */ 276 } 277 } else { 278 switch (value & AGP_APER_4G_MASK) { 279 case AGP_APER_256M: 280 size = 256; /* 256 M */ 281 break; 282 case AGP_APER_512M: 283 size = 512; /* 512 M */ 284 break; 285 case AGP_APER_1024M: 286 size = 1024; /* 1024 M */ 287 break; 288 case AGP_APER_2048M: 289 size = 2048; /* 2048 M */ 290 break; 291 case AGP_APER_4G: 292 size = 4096; /* 4096 M */ 293 break; 294 default: 295 size = 0; /* not true */ 296 } 297 } 298 /* 299 * In some cases, there is no APSIZE register, so the size value 300 * of 256M could be wrong. Check the value by reading the size of 301 * the first register which was set in the PCI configuration space. 302 */ 303 if (size == 256) { 304 if (ddi_dev_regsize(softstate->tsoft_dip, 305 AGP_TARGET_BAR1, (off_t *)®size) == DDI_FAILURE) 306 return (0); 307 308 if (MB2BYTES(size) != regsize) { 309 TARGETDB_PRINT2((CE_WARN, 310 "APSIZE 256M doesn't match regsize %lx", 311 regsize)); 312 TARGETDB_PRINT2((CE_WARN, "Use regsize instead")); 313 size = BYTES2MB(regsize); 314 } 315 } 316 317 return (size); 318 } 319 320 static void 321 agp_target_set_gartaddr(agp_target_softstate_t *softstate, uint32_t gartaddr) 322 { 323 ASSERT(softstate->tsoft_acaptr); 324 325 /* Disable the GTLB for Intel chipsets */ 326 pci_config_put16(softstate->tsoft_pcihdl, 327 softstate->tsoft_acaptr + AGP_CONF_CONTROL, 0x0000); 328 329 pci_config_put32(softstate->tsoft_pcihdl, 330 softstate->tsoft_acaptr + AGP_CONF_ATTBASE, 331 gartaddr & AGP_ATTBASE_MASK); 332 } 333 334 /* 335 * Pre-allocated graphics memory for every type of Intel north bridge, mem size 336 * are specified in kbytes. 337 */ 338 #define GMS_MB(n) ((n) * 1024) 339 #define GMS_SHIFT 4 340 #define GMS_SIZE(a) (sizeof (a) / sizeof (int)) 341 342 /* 343 * Since value zero always means "No memory pre-allocated", value of (GMS - 1) 344 * is used to index these arrays, i.e. gms_xxx[1] contains the mem size (in kb) 345 * that GMS value 0x1 corresponding to. 346 * 347 * Assuming all "reserved" GMS value as zero bytes of pre-allocated graphics 348 * memory, unless some special BIOS settings exist. 349 */ 350 static int gms_810[12] = {0, 0, 0, 0, 0, 0, 0, 512, 0, 0, 0, GMS_MB(1)}; 351 static int gms_830_845[4] = {0, 512, GMS_MB(1), GMS_MB(8)}; 352 static int gms_855GM[5] = {GMS_MB(1), GMS_MB(4), GMS_MB(8), GMS_MB(16), 353 GMS_MB(32)}; 354 /* There is no modes for 16M in datasheet, but some BIOS add it. */ 355 static int gms_865_915GM[4] = {GMS_MB(1), 0, GMS_MB(8), GMS_MB(16)}; 356 static int gms_915_945_965[3] = {GMS_MB(1), 0, GMS_MB(8)}; 357 static int gms_965GM[7] = {GMS_MB(1), GMS_MB(4), GMS_MB(8), GMS_MB(16), 358 GMS_MB(32), GMS_MB(48), GMS_MB(64)}; 359 static int gms_X33[9] = {GMS_MB(1), GMS_MB(4), GMS_MB(8), GMS_MB(16), 360 GMS_MB(32), GMS_MB(48), GMS_MB(64), GMS_MB(128), GMS_MB(256)}; 361 static int gms_G4X[13] = {0, 0, 0, 0, 362 GMS_MB(32), GMS_MB(48), GMS_MB(64), GMS_MB(128), GMS_MB(256), 363 GMS_MB(96), GMS_MB(160), GMS_MB(224), GMS_MB(352)}; 364 365 static gms_mode_t gms_modes[] = { 366 {INTEL_BR_810, I810_CONF_SMRAM, I810_GMS_MASK, 367 GMS_SIZE(gms_810), gms_810}, 368 {INTEL_BR_810DC, I810_CONF_SMRAM, I810_GMS_MASK, 369 GMS_SIZE(gms_810), gms_810}, 370 {INTEL_BR_810E, I810_CONF_SMRAM, I810_GMS_MASK, 371 GMS_SIZE(gms_810), gms_810}, 372 {INTEL_BR_830M, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 373 GMS_SIZE(gms_830_845), gms_830_845}, 374 {INTEL_BR_845, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 375 GMS_SIZE(gms_830_845), gms_830_845}, 376 {INTEL_BR_855GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 377 GMS_SIZE(gms_855GM), gms_855GM}, 378 {INTEL_BR_865, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 379 GMS_SIZE(gms_865_915GM), gms_865_915GM}, 380 {INTEL_BR_915GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 381 GMS_SIZE(gms_865_915GM), gms_865_915GM}, 382 {INTEL_BR_915, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 383 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 384 {INTEL_BR_945, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 385 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 386 {INTEL_BR_945GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 387 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 388 {INTEL_BR_945GME, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 389 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 390 {INTEL_BR_946GZ, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 391 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 392 {INTEL_BR_965G1, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 393 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 394 {INTEL_BR_965G2, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 395 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 396 {INTEL_BR_965Q, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 397 GMS_SIZE(gms_915_945_965), gms_915_945_965}, 398 {INTEL_BR_965GM, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 399 GMS_SIZE(gms_965GM), gms_965GM}, 400 {INTEL_BR_965GME, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 401 GMS_SIZE(gms_965GM), gms_965GM}, 402 {INTEL_BR_Q35, I8XX_CONF_GC, IX33_GC_MODE_MASK, 403 GMS_SIZE(gms_X33), gms_X33}, 404 {INTEL_BR_G33, I8XX_CONF_GC, IX33_GC_MODE_MASK, 405 GMS_SIZE(gms_X33), gms_X33}, 406 {INTEL_BR_Q33, I8XX_CONF_GC, IX33_GC_MODE_MASK, 407 GMS_SIZE(gms_X33), gms_X33}, 408 {INTEL_BR_GM45, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 409 GMS_SIZE(gms_965GM), gms_965GM}, 410 {INTEL_BR_EL, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 411 GMS_SIZE(gms_G4X), gms_G4X}, 412 {INTEL_BR_Q45, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 413 GMS_SIZE(gms_G4X), gms_G4X}, 414 {INTEL_BR_G45, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 415 GMS_SIZE(gms_G4X), gms_G4X}, 416 {INTEL_BR_G41, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 417 GMS_SIZE(gms_G4X), gms_G4X}, 418 {INTEL_BR_IGDNG_D, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 419 GMS_SIZE(gms_G4X), gms_G4X}, 420 {INTEL_BR_IGDNG_M, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 421 GMS_SIZE(gms_G4X), gms_G4X}, 422 {INTEL_BR_IGDNG_MA, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 423 GMS_SIZE(gms_G4X), gms_G4X}, 424 {INTEL_BR_IGDNG_MC2, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 425 GMS_SIZE(gms_G4X), gms_G4X}, 426 {INTEL_BR_B43, I8XX_CONF_GC, I8XX_GC_MODE_MASK, 427 GMS_SIZE(gms_G4X), gms_G4X} 428 }; 429 static int 430 get_chip_gms(uint32_t devid) 431 { 432 int num_modes; 433 int i; 434 435 num_modes = (sizeof (gms_modes) / sizeof (gms_mode_t)); 436 437 for (i = 0; i < num_modes; i++) { 438 if (gms_modes[i].gm_devid == devid) 439 break; 440 } 441 442 return ((i == num_modes) ? -1 : i); 443 } 444 445 /* Returns the size (kbytes) of pre-allocated graphics memory */ 446 static size_t 447 i8xx_biosmem_detect(agp_target_softstate_t *softstate) 448 { 449 uint8_t memval; 450 size_t kbytes; 451 int gms_off; 452 453 kbytes = 0; 454 gms_off = softstate->tsoft_gms_off; 455 456 /* fetch the GMS value from DRAM controller */ 457 memval = pci_config_get8(softstate->tsoft_pcihdl, 458 gms_modes[gms_off].gm_regoff); 459 TARGETDB_PRINT2((CE_NOTE, "i8xx_biosmem_detect: memval = %x", memval)); 460 memval = (memval & gms_modes[gms_off].gm_mask) >> GMS_SHIFT; 461 /* assuming zero byte for 0 or "reserved" GMS values */ 462 if (memval == 0 || memval > gms_modes[gms_off].gm_num) { 463 TARGETDB_PRINT2((CE_WARN, "i8xx_biosmem_detect: " 464 "devid = %x, GMS = %x. assuming zero byte of " 465 "pre-allocated memory", 466 gms_modes[gms_off].gm_devid, memval)); 467 goto done; 468 } 469 memval--; /* use (GMS_value - 1) as index */ 470 kbytes = (gms_modes[gms_off].gm_vec)[memval]; 471 472 done: 473 TARGETDB_PRINT2((CE_NOTE, 474 "i8xx_biosmem_detect: %ldKB BIOS pre-allocated memory detected", 475 kbytes)); 476 return (kbytes); 477 } 478 479 /*ARGSUSED*/ 480 static int agptarget_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, 481 void *arg, void **resultp) 482 { 483 agp_target_softstate_t *st; 484 int instance, rval = DDI_FAILURE; 485 dev_t dev; 486 487 switch (cmd) { 488 case DDI_INFO_DEVT2DEVINFO: 489 dev = (dev_t)arg; 490 instance = DEV2INST(dev); 491 st = ddi_get_soft_state(agptarget_glob_soft_handle, instance); 492 if (st != NULL) { 493 mutex_enter(&st->tsoft_lock); 494 *resultp = st->tsoft_dip; 495 mutex_exit(&st->tsoft_lock); 496 rval = DDI_SUCCESS; 497 } else 498 *resultp = NULL; 499 500 break; 501 case DDI_INFO_DEVT2INSTANCE: 502 dev = (dev_t)arg; 503 instance = DEV2INST(dev); 504 *resultp = (void *)(uintptr_t)instance; 505 rval = DDI_SUCCESS; 506 default: 507 break; 508 } 509 510 return (rval); 511 } 512 513 static int 514 intel_br_resume(agp_target_softstate_t *softstate) 515 { 516 int gms_off; 517 518 gms_off = softstate->tsoft_gms_off; 519 520 /* 521 * We recover the gmch graphics control register here 522 */ 523 pci_config_put16(softstate->tsoft_pcihdl, 524 gms_modes[gms_off].gm_regoff, softstate->tsoft_gms); 525 526 return (DDI_SUCCESS); 527 } 528 static int 529 intel_br_suspend(agp_target_softstate_t *softstate) 530 { 531 int gms_off; 532 533 gms_off = softstate->tsoft_gms_off; 534 softstate->tsoft_gms = pci_config_get16(softstate->tsoft_pcihdl, 535 gms_modes[gms_off].gm_regoff); 536 537 return (DDI_SUCCESS); 538 } 539 540 static void 541 intel_chipset_flush_setup(dev_info_t *dip, 542 ddi_acc_handle_t pci_acc_hdl, int gms_off) 543 { 544 uint32_t temp_hi, temp_lo; 545 uint64_t phys_base, phys_len; 546 uint32_t phys_hi_mask = 0; 547 pci_regspec_t *old_regs = NULL, *new_regs = NULL; 548 int old_len = 0, new_len = 0; 549 uint32_t old_regnum, new_regnum; 550 int circular = 0, prop_updated = 0; 551 int ret; 552 553 if (i9xx_private.handle) 554 return; 555 556 /* IS_I965 || IS_G33 || IS_G4X */ 557 if (gms_off > 11) { 558 temp_hi = pci_config_get32(pci_acc_hdl, I965_IFPADDR + 4); 559 temp_lo = pci_config_get32(pci_acc_hdl, I965_IFPADDR); 560 phys_hi_mask |= PCI_ADDR_MEM64 | I965_IFPADDR; 561 } else { 562 temp_lo = pci_config_get32(pci_acc_hdl, I915_IFPADDR); 563 phys_hi_mask |= PCI_ADDR_MEM32 | I915_IFPADDR; 564 } 565 566 if (!(temp_lo & 0x1)) { 567 ndi_ra_request_t request; 568 569 bzero((caddr_t)&request, sizeof (ndi_ra_request_t)); 570 request.ra_flags |= NDI_RA_ALIGN_SIZE | NDI_RA_ALLOC_BOUNDED; 571 request.ra_boundbase = 0; 572 request.ra_boundlen = 0xffffffff; 573 request.ra_len = AGP_PAGE_SIZE; 574 575 /* allocate space from the allocator */ 576 ndi_devi_enter(ddi_get_parent(dip), &circular); 577 if (ndi_ra_alloc(ddi_get_parent(dip), &request, &phys_base, 578 &phys_len, NDI_RA_TYPE_MEM, NDI_RA_PASS) != NDI_SUCCESS) { 579 TARGETDB_PRINT2((CE_WARN, 580 "intel_chipset_flush_setup: " 581 "ndi_ra_alloc failed!")); 582 ndi_devi_exit(ddi_get_parent(dip), circular); 583 goto error; 584 } 585 ndi_devi_exit(ddi_get_parent(dip), circular); 586 i9xx_private.ra_alloced = 1; 587 588 TARGETDB_PRINT2((CE_WARN, 589 "intel_chipset_flush_setup: " 590 "addr = 0x%x.0x%x len [0x%x]\n", 591 HIADDR(phys_base), LOADDR(phys_base), 592 (uint32_t)phys_len)); 593 594 if (gms_off > 11) { 595 pci_config_put32(pci_acc_hdl, I965_IFPADDR + 4, 596 HIADDR(phys_base)); 597 pci_config_put32(pci_acc_hdl, I965_IFPADDR, 598 LOADDR(phys_base) | 0x1); 599 } else { 600 pci_config_put32(pci_acc_hdl, I915_IFPADDR, 601 LOADDR(phys_base) | 0x1); 602 } 603 } else { 604 temp_lo &= ~0x1; 605 phys_base = ((uint64_t)temp_hi << 32) | temp_lo; 606 } 607 608 temp_hi = pci_config_get32(pci_acc_hdl, I965_IFPADDR + 4); 609 temp_lo = pci_config_get32(pci_acc_hdl, I965_IFPADDR); 610 611 /* set pci props */ 612 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 613 "reg", (caddr_t)&old_regs, &old_len) != DDI_PROP_SUCCESS) { 614 TARGETDB_PRINT2((CE_WARN, 615 "intel_chipset_flush_setup: " 616 "ddi_getlongprop(1) failed!")); 617 goto error; 618 } 619 620 old_regnum = old_len / sizeof (pci_regspec_t); 621 TARGETDB_PRINT2((CE_WARN, 622 "intel_chipset_flush_setup: old_regnum = %d", old_regnum)); 623 624 new_regnum = old_regnum + 1; 625 new_len = new_regnum * sizeof (pci_regspec_t); 626 new_regs = kmem_zalloc(new_len, KM_SLEEP); 627 if (memcpy(new_regs, old_regs, (size_t)old_len) == NULL) { 628 TARGETDB_PRINT2((CE_WARN, 629 "intel_chipset_flush_setup: memcpy failed")); 630 goto error; 631 } 632 633 /* Bus=0, Dev=0, Func=0 0x82001000 */ 634 new_regs[old_regnum].pci_phys_hi = PCI_REG_REL_M | phys_hi_mask; 635 new_regs[old_regnum].pci_phys_mid = HIADDR(phys_base); 636 new_regs[old_regnum].pci_phys_low = LOADDR(phys_base); 637 new_regs[old_regnum].pci_size_hi = 0x00000000; 638 new_regs[old_regnum].pci_size_low = AGP_PAGE_SIZE; 639 640 ret = ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 641 "reg", (int *)new_regs, (uint_t)5 * new_regnum); 642 if (ret != DDI_PROP_SUCCESS) { 643 TARGETDB_PRINT2((CE_WARN, 644 "intel_chipset_flush_setup: " 645 "ndi_prop_update_int_array failed %d", ret)); 646 goto error; 647 } 648 kmem_free(new_regs, (size_t)new_len); 649 new_regs = NULL; 650 651 prop_updated = 1; 652 653 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 654 "reg", (caddr_t)&new_regs, &new_len) != DDI_PROP_SUCCESS) { 655 TARGETDB_PRINT2((CE_WARN, 656 "intel_chipset_flush_setup: " 657 "ddi_getlongprop(2) failed")); 658 goto error; 659 } 660 kmem_free(new_regs, (size_t)new_len); 661 new_regs = NULL; 662 663 new_regnum = new_len / sizeof (pci_regspec_t); 664 665 i9xx_private.physical = phys_base; 666 i9xx_private.size = AGP_PAGE_SIZE; 667 i9xx_private.regnum = new_regnum - 1; 668 669 ret = ddi_regs_map_setup(dip, i9xx_private.regnum, 670 (caddr_t *)&(i9xx_private.flush_page), 0, 671 i9xx_private.size, &dev_attr, &i9xx_private.handle); 672 if (ret != DDI_SUCCESS) { 673 TARGETDB_PRINT2((CE_WARN, 674 "intel_chipset_flush_setup: ddi_regs_map_setup failed")); 675 i9xx_private.handle = NULL; 676 goto error; 677 } 678 679 kmem_free(old_regs, (size_t)old_len); 680 return; 681 error: 682 if (prop_updated) 683 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 684 "reg", (int *)old_regs, (uint_t)5 * old_regnum); 685 if (new_regs) 686 kmem_free(new_regs, (size_t)new_len); 687 if (old_regs) 688 kmem_free(old_regs, (size_t)old_len); 689 if (i9xx_private.ra_alloced) { 690 ndi_devi_enter(ddi_get_parent(dip), &circular); 691 (void) ndi_ra_free(ddi_get_parent(dip), 692 phys_base, phys_len, NDI_RA_TYPE_MEM, NDI_RA_PASS); 693 ndi_devi_exit(ddi_get_parent(dip), circular); 694 i9xx_private.ra_alloced = 0; 695 } 696 } 697 698 static void 699 intel_chipset_flush_free(dev_info_t *dip) 700 { 701 pci_regspec_t *regs = NULL; 702 int len = 0, regnum; 703 int circular = 0; 704 705 if (i9xx_private.handle == NULL) 706 return; 707 708 ddi_regs_map_free(&i9xx_private.handle); 709 i9xx_private.handle = NULL; 710 711 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 712 "reg", (caddr_t)®s, &len) == DDI_PROP_SUCCESS) { 713 regnum = len / sizeof (pci_regspec_t) - 1; 714 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, 715 "reg", (int *)regs, (uint_t)5 * regnum); 716 } 717 if (regs) 718 kmem_free(regs, (size_t)len); 719 720 if (i9xx_private.ra_alloced) { 721 ndi_devi_enter(ddi_get_parent(dip), &circular); 722 (void) ndi_ra_free(ddi_get_parent(dip), 723 i9xx_private.physical, i9xx_private.size, 724 NDI_RA_TYPE_MEM, NDI_RA_PASS); 725 ndi_devi_exit(ddi_get_parent(dip), circular); 726 i9xx_private.ra_alloced = 0; 727 } 728 } 729 730 static int 731 agp_target_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 732 { 733 agp_target_softstate_t *softstate; 734 int instance; 735 int status; 736 737 instance = ddi_get_instance(dip); 738 739 switch (cmd) { 740 case DDI_ATTACH: 741 break; 742 case DDI_RESUME: 743 softstate = 744 ddi_get_soft_state(agptarget_glob_soft_handle, instance); 745 return (intel_br_resume(softstate)); 746 default: 747 TARGETDB_PRINT2((CE_WARN, "agp_target_attach:" 748 "only attach and resume ops are supported")); 749 return (DDI_FAILURE); 750 } 751 752 if (ddi_soft_state_zalloc(agptarget_glob_soft_handle, 753 instance) != DDI_SUCCESS) { 754 TARGETDB_PRINT2((CE_WARN, "agp_target_attach:" 755 "soft state zalloc failed")); 756 return (DDI_FAILURE); 757 } 758 759 softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance); 760 mutex_init(&softstate->tsoft_lock, NULL, MUTEX_DRIVER, NULL); 761 softstate->tsoft_dip = dip; 762 status = pci_config_setup(dip, &softstate->tsoft_pcihdl); 763 if (status != DDI_SUCCESS) { 764 TARGETDB_PRINT2((CE_WARN, "agp_target_attach:" 765 "pci config setup failed")); 766 ddi_soft_state_free(agptarget_glob_soft_handle, 767 instance); 768 return (DDI_FAILURE); 769 } 770 771 softstate->tsoft_devid = pci_config_get32(softstate->tsoft_pcihdl, 772 PCI_CONF_VENID); 773 softstate->tsoft_gms_off = get_chip_gms(softstate->tsoft_devid); 774 if (softstate->tsoft_gms_off < 0) { 775 TARGETDB_PRINT2((CE_WARN, "agp_target_attach:" 776 "read gms offset failed")); 777 pci_config_teardown(&softstate->tsoft_pcihdl); 778 ddi_soft_state_free(agptarget_glob_soft_handle, 779 instance); 780 return (DDI_FAILURE); 781 } 782 softstate->tsoft_acaptr = agp_target_cap_find(softstate->tsoft_pcihdl); 783 if (softstate->tsoft_acaptr == 0) { 784 /* Make a correction for some Intel chipsets */ 785 if (is_intel_br(softstate)) 786 softstate->tsoft_acaptr = AGP_CAP_OFF_DEF; 787 else { 788 TARGETDB_PRINT2((CE_WARN, "agp_target_attach:" 789 "Not a supposed corretion")); 790 pci_config_teardown(&softstate->tsoft_pcihdl); 791 ddi_soft_state_free(agptarget_glob_soft_handle, 792 instance); 793 return (DDI_FAILURE); 794 } 795 } 796 797 status = ddi_create_minor_node(dip, AGPTARGET_NAME, S_IFCHR, 798 INST2NODENUM(instance), DDI_NT_AGP_TARGET, 0); 799 800 if (status != DDI_SUCCESS) { 801 TARGETDB_PRINT2((CE_WARN, "agp_target_attach:" 802 "Create minor node failed")); 803 pci_config_teardown(&softstate->tsoft_pcihdl); 804 ddi_soft_state_free(agptarget_glob_soft_handle, instance); 805 return (DDI_FAILURE); 806 } 807 808 return (DDI_SUCCESS); 809 } 810 811 /*ARGSUSED*/ 812 static int 813 agp_target_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 814 { 815 int instance; 816 agp_target_softstate_t *softstate; 817 818 instance = ddi_get_instance(dip); 819 softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance); 820 821 if (cmd == DDI_SUSPEND) { 822 /* get GMS modes list entry */ 823 return (intel_br_suspend(softstate)); 824 } 825 826 if (cmd != DDI_DETACH) { 827 TARGETDB_PRINT2((CE_WARN, "agp_target_detach:" 828 "only detach and suspend ops are supported")); 829 return (DDI_FAILURE); 830 } 831 832 ddi_remove_minor_node(dip, AGPTARGET_NAME); 833 pci_config_teardown(&softstate->tsoft_pcihdl); 834 mutex_destroy(&softstate->tsoft_lock); 835 ddi_soft_state_free(agptarget_glob_soft_handle, instance); 836 return (DDI_SUCCESS); 837 } 838 839 /*ARGSUSED*/ 840 static int 841 agp_target_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 842 cred_t *cred, int *rval) 843 { 844 int instance = DEV2INST(dev); 845 agp_target_softstate_t *st; 846 static char kernel_only[] = 847 "amd64_gart_ioctl: is a kernel only ioctl"; 848 849 if (!(mode & FKIOCTL)) { 850 TARGETDB_PRINT2((CE_CONT, kernel_only)); 851 return (ENXIO); 852 } 853 st = GETSOFTC(instance); 854 855 if (st == NULL) 856 return (ENXIO); 857 858 mutex_enter(&st->tsoft_lock); 859 860 switch (cmd) { 861 case CHIP_DETECT: 862 { 863 int type = 0; 864 865 if (is_intel_br(st)) 866 type = CHIP_IS_INTEL; 867 else if (is_64bit_aper(st)) 868 type = CHIP_IS_AMD; 869 else { 870 type = 0; 871 TARGETDB_PRINT2((CE_WARN, "Unknown bridge!")); 872 } 873 874 if (ddi_copyout(&type, (void *)data, sizeof (int), mode)) { 875 mutex_exit(&st->tsoft_lock); 876 return (EFAULT); 877 } 878 879 break; 880 } 881 case I8XX_GET_PREALLOC_SIZE: 882 { 883 size_t prealloc_size; 884 885 if (!is_intel_br(st)) { 886 mutex_exit(&st->tsoft_lock); 887 return (EINVAL); 888 } 889 890 prealloc_size = i8xx_biosmem_detect(st); 891 if (ddi_copyout(&prealloc_size, (void *)data, 892 sizeof (size_t), mode)) { 893 mutex_exit(&st->tsoft_lock); 894 return (EFAULT); 895 } 896 897 break; 898 } 899 case AGP_TARGET_GETINFO: 900 { 901 i_agp_info_t info; 902 uint32_t value; 903 off_t cap; 904 905 ASSERT(st->tsoft_acaptr); 906 907 cap = st->tsoft_acaptr; 908 value = pci_config_get32(st->tsoft_pcihdl, cap); 909 info.iagp_ver.agpv_major = (uint16_t)((value >> 20) & 0xf); 910 info.iagp_ver.agpv_minor = (uint16_t)((value >> 16) & 0xf); 911 info.iagp_devid = st->tsoft_devid; 912 info.iagp_mode = pci_config_get32(st->tsoft_pcihdl, 913 cap + AGP_CONF_STATUS); 914 info.iagp_aperbase = agp_target_get_apbase(st); 915 info.iagp_apersize = agp_target_get_apsize(st); 916 917 if (ddi_copyout(&info, (void *)data, 918 sizeof (i_agp_info_t), mode)) { 919 mutex_exit(&st->tsoft_lock); 920 return (EFAULT); 921 } 922 break; 923 924 } 925 /* 926 * This ioctl is only for Intel AGP chipsets. 927 * It is not necessary for the AMD8151 AGP bridge, because 928 * this register in the AMD8151 does not control any hardware. 929 * It is only provided for compatibility with an Intel AGP bridge. 930 * Please refer to the <<AMD8151 data sheet>> page 24, 931 * AGP device GART pointer. 932 */ 933 case AGP_TARGET_SET_GATTADDR: 934 { 935 uint32_t gartaddr; 936 937 if (ddi_copyin((void *)data, &gartaddr, 938 sizeof (uint32_t), mode)) { 939 mutex_exit(&st->tsoft_lock); 940 return (EFAULT); 941 } 942 943 agp_target_set_gartaddr(st, gartaddr); 944 break; 945 } 946 case AGP_TARGET_SETCMD: 947 { 948 uint32_t command; 949 950 if (ddi_copyin((void *)data, &command, 951 sizeof (uint32_t), mode)) { 952 mutex_exit(&st->tsoft_lock); 953 return (EFAULT); 954 } 955 956 ASSERT(st->tsoft_acaptr); 957 958 pci_config_put32(st->tsoft_pcihdl, 959 st->tsoft_acaptr + AGP_CONF_COMMAND, 960 command); 961 break; 962 963 } 964 case AGP_TARGET_FLUSH_GTLB: 965 { 966 uint16_t value; 967 968 ASSERT(st->tsoft_acaptr); 969 970 value = pci_config_get16(st->tsoft_pcihdl, 971 st->tsoft_acaptr + AGP_CONF_CONTROL); 972 value &= ~AGPCTRL_GTLBEN; 973 pci_config_put16(st->tsoft_pcihdl, 974 st->tsoft_acaptr + AGP_CONF_CONTROL, value); 975 value |= AGPCTRL_GTLBEN; 976 pci_config_put16(st->tsoft_pcihdl, 977 st->tsoft_acaptr + AGP_CONF_CONTROL, value); 978 979 break; 980 } 981 case AGP_TARGET_CONFIGURE: 982 { 983 uint8_t value; 984 985 ASSERT(st->tsoft_acaptr); 986 987 /* 988 * In Intel agp bridges, agp misc register offset 989 * is indexed from 0 instead of capability register. 990 * AMD agp bridges have no such misc register 991 * to control the aperture access, and they have 992 * similar regsiters in CPU gart devices instead. 993 */ 994 995 if (is_intel_br(st)) { 996 value = pci_config_get8(st->tsoft_pcihdl, 997 st->tsoft_acaptr + AGP_CONF_MISC); 998 value |= AGP_MISC_APEN; 999 pci_config_put8(st->tsoft_pcihdl, 1000 st->tsoft_acaptr + AGP_CONF_MISC, value); 1001 } 1002 break; 1003 1004 } 1005 case AGP_TARGET_UNCONFIG: 1006 { 1007 uint32_t value1; 1008 uint8_t value2; 1009 1010 ASSERT(st->tsoft_acaptr); 1011 1012 pci_config_put16(st->tsoft_pcihdl, 1013 st->tsoft_acaptr + AGP_CONF_CONTROL, 0x0); 1014 1015 if (is_intel_br(st)) { 1016 value2 = pci_config_get8(st->tsoft_pcihdl, 1017 st->tsoft_acaptr + AGP_CONF_MISC); 1018 value2 &= ~AGP_MISC_APEN; 1019 pci_config_put8(st->tsoft_pcihdl, 1020 st->tsoft_acaptr + AGP_CONF_MISC, value2); 1021 } 1022 1023 value1 = pci_config_get32(st->tsoft_pcihdl, 1024 st->tsoft_acaptr + AGP_CONF_COMMAND); 1025 value1 &= ~AGPCMD_AGPEN; 1026 pci_config_put32(st->tsoft_pcihdl, 1027 st->tsoft_acaptr + AGP_CONF_COMMAND, 1028 value1); 1029 1030 pci_config_put32(st->tsoft_pcihdl, 1031 st->tsoft_acaptr + AGP_CONF_ATTBASE, 0x0); 1032 1033 break; 1034 } 1035 1036 case INTEL_CHIPSET_FLUSH_SETUP: 1037 { 1038 intel_chipset_flush_setup(st->tsoft_dip, 1039 st->tsoft_pcihdl, st->tsoft_gms_off); 1040 break; 1041 } 1042 case INTEL_CHIPSET_FLUSH: 1043 { 1044 if (i9xx_private.handle != NULL) 1045 ddi_put32(i9xx_private.handle, 1046 (uint32_t *)(uintptr_t)i9xx_private.flush_page, 1); 1047 1048 break; 1049 } 1050 case INTEL_CHIPSET_FLUSH_FREE: 1051 { 1052 intel_chipset_flush_free(st->tsoft_dip); 1053 break; 1054 } 1055 default: 1056 mutex_exit(&st->tsoft_lock); 1057 return (ENXIO); 1058 } /* end switch */ 1059 1060 mutex_exit(&st->tsoft_lock); 1061 1062 return (0); 1063 } 1064 1065 /*ARGSUSED*/ 1066 static int 1067 agp_target_open(dev_t *devp, int flag, int otyp, cred_t *cred) 1068 { 1069 int instance = DEV2INST(*devp); 1070 agp_target_softstate_t *st; 1071 1072 if (!(flag & FKLYR)) 1073 return (ENXIO); 1074 1075 st = GETSOFTC(instance); 1076 1077 if (st == NULL) 1078 return (ENXIO); 1079 1080 return (0); 1081 } 1082 1083 /*ARGSUSED*/ 1084 static int 1085 agp_target_close(dev_t dev, int flag, int otyp, cred_t *cred) 1086 { 1087 int instance = DEV2INST(dev); 1088 agp_target_softstate_t *st; 1089 1090 st = GETSOFTC(instance); 1091 1092 if (st == NULL) 1093 return (ENXIO); 1094 1095 return (0); 1096 } 1097 1098 static struct cb_ops agp_target_cb_ops = { 1099 agp_target_open, /* cb_open */ 1100 agp_target_close, /* cb_close */ 1101 nodev, /* cb_strategy */ 1102 nodev, /* cb_print */ 1103 nodev, /* cb_dump */ 1104 nodev, /* cb_read() */ 1105 nodev, /* cb_write() */ 1106 agp_target_ioctl, /* cb_ioctl */ 1107 nodev, /* cb_devmap */ 1108 nodev, /* cb_mmap */ 1109 nodev, /* cb_segmap */ 1110 nochpoll, /* cb_chpoll */ 1111 ddi_prop_op, /* cb_prop_op */ 1112 0, /* cb_stream */ 1113 D_NEW | D_MP, /* cb_flag */ 1114 CB_REV, /* cb_ops version? */ 1115 nodev, /* cb_aread() */ 1116 nodev, /* cb_awrite() */ 1117 }; 1118 1119 /* device operations */ 1120 static struct dev_ops agp_target_ops = { 1121 DEVO_REV, /* devo_rev */ 1122 0, /* devo_refcnt */ 1123 agptarget_getinfo, /* devo_getinfo */ 1124 nulldev, /* devo_identify */ 1125 nulldev, /* devo_probe */ 1126 agp_target_attach, /* devo_attach */ 1127 agp_target_detach, /* devo_detach */ 1128 nodev, /* devo_reset */ 1129 &agp_target_cb_ops, /* devo_cb_ops */ 1130 0, /* devo_bus_ops */ 1131 0, /* devo_power */ 1132 ddi_quiesce_not_needed, /* devo_quiesce */ 1133 }; 1134 1135 static struct modldrv modldrv = { 1136 &mod_driverops, 1137 "AGP target driver", 1138 &agp_target_ops, 1139 }; 1140 1141 static struct modlinkage modlinkage = { 1142 MODREV_1, /* MODREV_1 is indicated by manual */ 1143 {&modldrv, NULL, NULL, NULL} 1144 }; 1145 1146 int 1147 _init(void) 1148 { 1149 int ret; 1150 1151 ret = ddi_soft_state_init(&agptarget_glob_soft_handle, 1152 sizeof (agp_target_softstate_t), 1); 1153 1154 if (ret) 1155 goto err1; 1156 1157 if ((ret = mod_install(&modlinkage)) != 0) { 1158 goto err2; 1159 } 1160 1161 return (DDI_SUCCESS); 1162 err2: 1163 ddi_soft_state_fini(&agptarget_glob_soft_handle); 1164 err1: 1165 return (ret); 1166 } 1167 1168 int 1169 _info(struct modinfo *modinfop) 1170 { 1171 return (mod_info(&modlinkage, modinfop)); 1172 } 1173 1174 int 1175 _fini(void) 1176 { 1177 int ret; 1178 1179 if ((ret = mod_remove(&modlinkage)) == 0) { 1180 ddi_soft_state_fini(&agptarget_glob_soft_handle); 1181 } 1182 return (ret); 1183 } 1184