1 /* 2 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 /* 6 * Portions Philip Brown phil@bolthole.com Dec 2001 7 */ 8 9 #pragma ident "%Z%%M% %I% %E% SMI" 10 11 /* 12 * agpgart driver 13 * 14 * This driver is primary targeted at providing memory support for INTEL 15 * AGP device, INTEL memory less video card, and AMD64 cpu GART devices. 16 * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP, 17 * ARC_AMD64AGP, ARC_AMD64NOAGP to agpgart driver. However, the memory 18 * interfaces are the same for these architectures. The difference is how to 19 * manage the hardware GART table for them. 20 * 21 * For large memory allocation, this driver use direct mapping to userland 22 * application interface to save kernel virtual memory . 23 */ 24 25 #include <sys/types.h> 26 #include <sys/pci.h> 27 #include <sys/systm.h> 28 #include <sys/conf.h> 29 #include <sys/file.h> 30 #include <sys/kstat.h> 31 #include <sys/stat.h> 32 #include <sys/modctl.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/sunldi.h> 36 #include <sys/policy.h> 37 #include <sys/ddidevmap.h> 38 #include <vm/seg_dev.h> 39 #include <sys/pmem.h> 40 #include <sys/agpgart.h> 41 #include <sys/agp/agpdefs.h> 42 #include <sys/agp/agpgart_impl.h> 43 #include <sys/agp/agpamd64gart_io.h> 44 #include <sys/agp/agpmaster_io.h> 45 #include <sys/agp/agptarget_io.h> 46 47 /* Dynamic debug support */ 48 int agp_debug_var = 0; 49 #define AGPDB_PRINT1(fmt) if (agp_debug_var == 1) cmn_err fmt 50 #define AGPDB_PRINT2(fmt) if (agp_debug_var >= 1) cmn_err fmt 51 52 /* Driver global softstate handle */ 53 static void *agpgart_glob_soft_handle; 54 55 #define MAX_INSTNUM 16 56 57 #define AGP_DEV2INST(devt) (getminor((devt)) >> 4) 58 #define AGP_INST2MINOR(instance) ((instance) << 4) 59 #define IS_INTEL_830(type) ((type) == ARC_IGD830) 60 #define IS_TRUE_AGP(type) (((type) == ARC_INTELAGP) || \ 61 ((type) == ARC_AMD64AGP)) 62 63 #define agpinfo_default_to_32(v, v32) \ 64 { \ 65 (v32).agpi32_version = (v).agpi_version; \ 66 (v32).agpi32_devid = (v).agpi_devid; \ 67 (v32).agpi32_mode = (v).agpi_mode; \ 68 (v32).agpi32_aperbase = (v).agpi_aperbase; \ 69 (v32).agpi32_apersize = (v).agpi_apersize; \ 70 (v32).agpi32_pgtotal = (v).agpi_pgtotal; \ 71 (v32).agpi32_pgsystem = (v).agpi_pgsystem; \ 72 (v32).agpi32_pgused = (v).agpi_pgused; \ 73 } 74 75 static ddi_dma_attr_t agpgart_dma_attr = { 76 DMA_ATTR_V0, 77 0U, /* dma_attr_addr_lo */ 78 0xffffffffU, /* dma_attr_addr_hi */ 79 0xffffffffU, /* dma_attr_count_max */ 80 (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */ 81 1, /* dma_attr_burstsizes */ 82 1, /* dma_attr_minxfer */ 83 0xffffffffU, /* dma_attr_maxxfer */ 84 0xffffffffU, /* dma_attr_seg */ 85 1, /* dma_attr_sgllen, variable */ 86 4, /* dma_attr_granular */ 87 0 /* dma_attr_flags */ 88 }; 89 90 /* 91 * AMD64 supports gart table above 4G. See alloc_gart_table. 92 */ 93 static ddi_dma_attr_t garttable_dma_attr = { 94 DMA_ATTR_V0, 95 0U, /* dma_attr_addr_lo */ 96 0xffffffffU, /* dma_attr_addr_hi */ 97 0xffffffffU, /* dma_attr_count_max */ 98 (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */ 99 1, /* dma_attr_burstsizes */ 100 1, /* dma_attr_minxfer */ 101 0xffffffffU, /* dma_attr_maxxfer */ 102 0xffffffffU, /* dma_attr_seg */ 103 1, /* dma_attr_sgllen, variable */ 104 4, /* dma_attr_granular */ 105 0 /* dma_attr_flags */ 106 }; 107 108 /* 109 * AGPGART table need a physical contiguous memory. To assure that 110 * each access to gart table is strongly ordered and uncachable, 111 * we use DDI_STRICTORDER_ACC. 112 */ 113 static ddi_device_acc_attr_t gart_dev_acc_attr = { 114 DDI_DEVICE_ATTR_V0, 115 DDI_NEVERSWAP_ACC, 116 DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */ 117 }; 118 119 /* 120 * AGP memory is usually used as texture memory or for a framebuffer, so we 121 * can set the memory attribute to write combining. Video drivers will 122 * determine the frame buffer attributes, for example the memory is write 123 * combinging or non-cachable. However, the interface between Xorg and agpgart 124 * driver to support attribute selcetion doesn't exist yet. So we set agp memory 125 * to non-cachable by default now. This attribute might be overridden 126 * by MTTR in X86. 127 */ 128 static ddi_device_acc_attr_t mem_dev_acc_attr = { 129 DDI_DEVICE_ATTR_V0, 130 DDI_NEVERSWAP_ACC, 131 DDI_STRICTORDER_ACC /* Can be DDI_MERGING_OK_ACC */ 132 }; 133 134 static keytable_ent_t * 135 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset); 136 static void 137 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts); 138 139 140 static void 141 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate, 142 offset_t off, size_t len, devmap_cookie_t new_handle1, 143 void **new_devprivate1, devmap_cookie_t new_handle2, 144 void **new_devprivate2) 145 { 146 147 struct keytable_ent *mementry; 148 agpgart_softstate_t *softstate; 149 agpgart_ctx_t *ctxp, *newctxp1, *newctxp2; 150 151 ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off)); 152 ASSERT(devprivate); 153 ASSERT(handle); 154 155 ctxp = (agpgart_ctx_t *)devprivate; 156 softstate = ctxp->actx_sc; 157 ASSERT(softstate); 158 159 if (new_handle1 != NULL) { 160 newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 161 newctxp1->actx_sc = softstate; 162 newctxp1->actx_off = ctxp->actx_off; 163 *new_devprivate1 = newctxp1; 164 } 165 166 if (new_handle2 != NULL) { 167 newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 168 newctxp2->actx_sc = softstate; 169 newctxp2->actx_off = off + len; 170 *new_devprivate2 = newctxp2; 171 } 172 173 mutex_enter(&softstate->asoft_instmutex); 174 if ((new_handle1 == NULL) && (new_handle2 == NULL)) { 175 mementry = 176 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off)); 177 ASSERT(mementry); 178 mementry->kte_refcnt--; 179 } else if ((new_handle1 != NULL) && (new_handle2 != NULL)) { 180 mementry = 181 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off)); 182 ASSERT(mementry); 183 mementry->kte_refcnt++; 184 } 185 ASSERT(mementry->kte_refcnt >= 0); 186 mutex_exit(&softstate->asoft_instmutex); 187 kmem_free(ctxp, sizeof (struct agpgart_ctx)); 188 } 189 190 /*ARGSUSED*/ 191 static int 192 agp_devmap_map(devmap_cookie_t handle, dev_t dev, 193 uint_t flags, offset_t offset, size_t len, void **new_devprivate) 194 { 195 agpgart_softstate_t *softstate; 196 int instance; 197 struct keytable_ent *mementry; 198 agpgart_ctx_t *newctxp; 199 200 ASSERT(handle); 201 instance = AGP_DEV2INST(dev); 202 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 203 if (softstate == NULL) { 204 AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err")); 205 return (ENXIO); 206 } 207 208 ASSERT(softstate); 209 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 210 ASSERT(len); 211 ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len)); 212 213 mementry = 214 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset)); 215 ASSERT(mementry); 216 mementry->kte_refcnt++; 217 ASSERT(mementry->kte_refcnt >= 0); 218 newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 219 newctxp->actx_off = offset; 220 newctxp->actx_sc = softstate; 221 *new_devprivate = newctxp; 222 223 return (0); 224 } 225 226 /*ARGSUSED*/ 227 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate, 228 devmap_cookie_t new_handle, void **new_devprivate) 229 { 230 struct keytable_ent *mementry; 231 agpgart_ctx_t *newctxp, *ctxp; 232 agpgart_softstate_t *softstate; 233 234 ASSERT(devprivate); 235 ASSERT(handle && new_handle); 236 237 ctxp = (agpgart_ctx_t *)devprivate; 238 ASSERT(AGP_ALIGNED(ctxp->actx_off)); 239 240 newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 241 newctxp->actx_off = ctxp->actx_off; 242 newctxp->actx_sc = ctxp->actx_sc; 243 softstate = (agpgart_softstate_t *)newctxp->actx_sc; 244 245 mutex_enter(&softstate->asoft_instmutex); 246 mementry = agp_find_bound_keyent(softstate, 247 AGP_BYTES2PAGES(newctxp->actx_off)); 248 mementry->kte_refcnt++; 249 ASSERT(mementry->kte_refcnt >= 0); 250 mutex_exit(&softstate->asoft_instmutex); 251 *new_devprivate = newctxp; 252 253 return (0); 254 } 255 256 struct devmap_callback_ctl agp_devmap_cb = { 257 DEVMAP_OPS_REV, /* rev */ 258 agp_devmap_map, /* map */ 259 NULL, /* access */ 260 agp_devmap_dup, /* dup */ 261 agp_devmap_unmap, /* unmap */ 262 }; 263 264 /* 265 * agp_master_regis_byname() 266 * 267 * Description: 268 * Open the AGP master device node by device path name and 269 * register the device handle for later operations. 270 * We check all possible driver instance from 0 271 * to MAX_INSTNUM because the master device could be 272 * at any instance number. Only one AGP master is supported. 273 * 274 * Arguments: 275 * master_hdlp AGP master device LDI handle pointer 276 * agpgart_l AGPGART driver LDI identifier 277 * 278 * Returns: 279 * -1 failed 280 * 0 success 281 */ 282 static int 283 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li) 284 { 285 int i; 286 char buf[MAXPATHLEN]; 287 288 ASSERT(master_hdlp); 289 ASSERT(agpgart_li); 290 291 /* 292 * Search all possible instance numbers for the agp master device. 293 * Only one master device is supported now, so the search ends 294 * when one master device is found. 295 */ 296 for (i = 0; i < MAX_INSTNUM; i++) { 297 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i); 298 if ((ldi_open_by_name(buf, 0, kcred, 299 master_hdlp, agpgart_li))) 300 continue; 301 AGPDB_PRINT1((CE_NOTE, 302 "master device found: instance number=%d", i)); 303 break; 304 305 } 306 307 /* AGP master device not found */ 308 if (i == MAX_INSTNUM) 309 return (-1); 310 311 return (0); 312 } 313 314 /* 315 * agp_target_regis_byname() 316 * 317 * Description: 318 * This function opens agp bridge device node by 319 * device path name and registers the device handle 320 * for later operations. 321 * We check driver instance from 0 to MAX_INSTNUM 322 * because the master device could be at any instance 323 * number. Only one agp target is supported. 324 * 325 * 326 * Arguments: 327 * target_hdlp AGP target device LDI handle pointer 328 * agpgart_l AGPGART driver LDI identifier 329 * 330 * Returns: 331 * -1 failed 332 * 0 success 333 */ 334 static int 335 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li) 336 { 337 int i; 338 char buf[MAXPATHLEN]; 339 340 ASSERT(target_hdlp); 341 ASSERT(agpgart_li); 342 343 for (i = 0; i < MAX_INSTNUM; i++) { 344 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i); 345 if ((ldi_open_by_name(buf, 0, kcred, 346 target_hdlp, agpgart_li))) 347 continue; 348 349 AGPDB_PRINT1((CE_NOTE, 350 "bridge device found: instance number=%d", i)); 351 break; 352 353 } 354 355 /* AGP bridge device not found */ 356 if (i == MAX_INSTNUM) { 357 AGPDB_PRINT2((CE_WARN, "bridge device not found")); 358 return (-1); 359 } 360 361 return (0); 362 } 363 364 /* 365 * amd64_gart_regis_byname() 366 * 367 * Description: 368 * Open all amd64 gart device nodes by deice path name and 369 * register the device handles for later operations. Each cpu 370 * has its own amd64 gart device. 371 * 372 * Arguments: 373 * cpu_garts cpu garts device list header 374 * agpgart_l AGPGART driver LDI identifier 375 * 376 * Returns: 377 * -1 failed 378 * 0 success 379 */ 380 static int 381 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li) 382 { 383 amd64_gart_dev_list_t *gart_list; 384 int i; 385 char buf[MAXPATHLEN]; 386 ldi_handle_t gart_hdl; 387 int ret; 388 389 ASSERT(cpu_garts); 390 ASSERT(agpgart_li); 391 392 /* 393 * Search all possible instance numbers for the gart devices. 394 * There can be multiple on-cpu gart devices for Opteron server. 395 */ 396 for (i = 0; i < MAX_INSTNUM; i++) { 397 (void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i); 398 ret = ldi_open_by_name(buf, 0, kcred, 399 &gart_hdl, agpgart_li); 400 401 if (ret == ENODEV) 402 continue; 403 else if (ret != 0) { /* There was an error opening the device */ 404 amd64_gart_unregister(cpu_garts); 405 return (ret); 406 } 407 408 AGPDB_PRINT1((CE_NOTE, 409 "amd64 gart device found: instance number=%d", i)); 410 411 gart_list = (amd64_gart_dev_list_t *) 412 kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP); 413 414 /* Add new item to the head of the gart device list */ 415 gart_list->gart_devhdl = gart_hdl; 416 gart_list->next = cpu_garts->gart_dev_list_head; 417 cpu_garts->gart_dev_list_head = gart_list; 418 cpu_garts->gart_device_num++; 419 } 420 421 if (cpu_garts->gart_device_num == 0) 422 return (ENODEV); 423 return (0); 424 } 425 426 /* 427 * Unregister agp master device handle 428 */ 429 static void 430 agp_master_unregister(ldi_handle_t *master_hdlp) 431 { 432 ASSERT(master_hdlp); 433 434 if (master_hdlp) { 435 (void) ldi_close(*master_hdlp, 0, kcred); 436 *master_hdlp = NULL; 437 } 438 } 439 440 /* 441 * Unregister agp bridge device handle 442 */ 443 static void 444 agp_target_unregister(ldi_handle_t *target_hdlp) 445 { 446 if (target_hdlp) { 447 (void) ldi_close(*target_hdlp, 0, kcred); 448 *target_hdlp = NULL; 449 } 450 } 451 452 /* 453 * Unregister all amd64 gart device handles 454 */ 455 static void 456 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts) 457 { 458 amd64_gart_dev_list_t *gart_list; 459 amd64_gart_dev_list_t *next; 460 461 ASSERT(cpu_garts); 462 463 for (gart_list = cpu_garts->gart_dev_list_head; 464 gart_list; gart_list = next) { 465 466 ASSERT(gart_list->gart_devhdl); 467 (void) ldi_close(gart_list->gart_devhdl, 0, kcred); 468 next = gart_list->next; 469 /* Free allocated memory */ 470 kmem_free(gart_list, sizeof (amd64_gart_dev_list_t)); 471 } 472 cpu_garts->gart_dev_list_head = NULL; 473 cpu_garts->gart_device_num = 0; 474 } 475 476 /* 477 * lyr_detect_master_type() 478 * 479 * Description: 480 * This function gets agp master type by querying agp master device. 481 * 482 * Arguments: 483 * master_hdlp agp master device ldi handle pointer 484 * 485 * Returns: 486 * -1 unsupported device 487 * DEVICE_IS_I810 i810 series 488 * DEVICE_IS_I810 i830 series 489 * DEVICE_IS_AGP true agp master 490 */ 491 static int 492 lyr_detect_master_type(ldi_handle_t *master_hdlp) 493 { 494 int vtype; 495 int err; 496 497 ASSERT(master_hdlp); 498 499 /* ldi_ioctl(agpmaster) */ 500 err = ldi_ioctl(*master_hdlp, DEVICE_DETECT, 501 (intptr_t)&vtype, FKIOCTL, kcred, 0); 502 if (err) /* Unsupported graphics device */ 503 return (-1); 504 return (vtype); 505 } 506 507 /* 508 * devtect_target_type() 509 * 510 * Description: 511 * This function gets the host bridge chipset type by querying the agp 512 * target device. 513 * 514 * Arguments: 515 * target_hdlp agp target device LDI handle pointer 516 * 517 * Returns: 518 * CHIP_IS_INTEL Intel agp chipsets 519 * CHIP_IS_AMD AMD agp chipset 520 * -1 unsupported chipset 521 */ 522 static int 523 lyr_detect_target_type(ldi_handle_t *target_hdlp) 524 { 525 int btype; 526 int err; 527 528 ASSERT(target_hdlp); 529 530 err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype, 531 FKIOCTL, kcred, 0); 532 if (err) /* Unsupported bridge device */ 533 return (-1); 534 return (btype); 535 } 536 537 /* 538 * lyr_init() 539 * 540 * Description: 541 * This function detects the graphics system architecture and 542 * registers all relative device handles in a global structure 543 * "agp_regdev". Then it stores the system arc type in driver 544 * soft state. 545 * 546 * Arguments: 547 * agp_regdev AGP devices registration struct pointer 548 * agpgart_l AGPGART driver LDI identifier 549 * 550 * Returns: 551 * 0 System arc supported and agp devices registration successed. 552 * -1 System arc not supported or device registration failed. 553 */ 554 int 555 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li) 556 { 557 ldi_handle_t *master_hdlp; 558 ldi_handle_t *target_hdlp; 559 amd64_garts_dev_t *garts_dev; 560 int card_type, chip_type; 561 int ret; 562 563 ASSERT(agp_regdev); 564 565 bzero(agp_regdev, sizeof (agp_registered_dev_t)); 566 agp_regdev->agprd_arctype = ARC_UNKNOWN; 567 /* 568 * Register agp devices, assuming all instances attached, and 569 * detect which agp architucture this server belongs to. This 570 * must be done before the agpgart driver starts to use layered 571 * driver interfaces. 572 */ 573 master_hdlp = &agp_regdev->agprd_masterhdl; 574 target_hdlp = &agp_regdev->agprd_targethdl; 575 garts_dev = &agp_regdev->agprd_cpugarts; 576 577 /* Check whether the system is amd64 arc */ 578 if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) { 579 /* No amd64 gart devices */ 580 AGPDB_PRINT1((CE_NOTE, 581 "lyr_init: this is not an amd64 system")); 582 if (agp_master_regis_byname(master_hdlp, agpgart_li)) { 583 AGPDB_PRINT2((CE_WARN, 584 "lyr_init: register master device unsuccessful")); 585 goto err1; 586 } 587 if (agp_target_regis_byname(target_hdlp, agpgart_li)) { 588 AGPDB_PRINT2((CE_WARN, 589 "lyr_init: register target device unsuccessful")); 590 goto err2; 591 } 592 card_type = lyr_detect_master_type(master_hdlp); 593 /* 594 * Detect system arc by master device. If it is a intel 595 * integrated device, finish the detection successfully. 596 */ 597 switch (card_type) { 598 case DEVICE_IS_I810: /* I810 likewise graphics */ 599 AGPDB_PRINT1((CE_NOTE, 600 "lyr_init: the system is Intel 810 arch")); 601 agp_regdev->agprd_arctype = ARC_IGD810; 602 return (0); 603 case DEVICE_IS_I830: /* I830 likewise graphics */ 604 AGPDB_PRINT1((CE_NOTE, 605 "lyr_init: the system is Intel 830 arch")); 606 agp_regdev->agprd_arctype = ARC_IGD830; 607 return (0); 608 case DEVICE_IS_AGP: /* AGP graphics */ 609 break; 610 default: /* Non IGD/AGP graphics */ 611 AGPDB_PRINT2((CE_WARN, 612 "lyr_init: non-supported master device")); 613 goto err3; 614 } 615 616 chip_type = lyr_detect_target_type(target_hdlp); 617 618 /* Continue to detect AGP arc by target device */ 619 switch (chip_type) { 620 case CHIP_IS_INTEL: /* Intel chipset */ 621 AGPDB_PRINT1((CE_NOTE, 622 "lyr_init: Intel AGP arch detected")); 623 agp_regdev->agprd_arctype = ARC_INTELAGP; 624 return (0); 625 case CHIP_IS_AMD: /* AMD chipset */ 626 AGPDB_PRINT2((CE_WARN, 627 "lyr_init: no cpu gart, but have AMD64 chipsets")); 628 goto err3; 629 default: /* Non supported chipset */ 630 AGPDB_PRINT2((CE_WARN, 631 "lyr_init: detection can not continue")); 632 goto err3; 633 } 634 635 } 636 637 if (ret) 638 return (-1); /* Errors in open amd64 cpu gart devices */ 639 640 /* 641 * AMD64 cpu gart device exsits, continue detection 642 */ 643 644 if (agp_master_regis_byname(master_hdlp, agpgart_li)) { 645 AGPDB_PRINT1((CE_NOTE, 646 "lyr_init: register master device unsuccessful")); 647 648 agp_regdev->agprd_arctype = ARC_AMD64NOAGP; 649 AGPDB_PRINT1((CE_NOTE, 650 "lyr_init: no AGP master, but supports IOMMU in amd64")); 651 return (0); /* Finished successfully */ 652 } 653 654 if (agp_target_regis_byname(target_hdlp, agpgart_li)) { 655 AGPDB_PRINT1((CE_NOTE, 656 "lyr_init: register target device unsuccessful")); 657 658 agp_regdev->agprd_arctype = ARC_AMD64NOAGP; 659 660 AGPDB_PRINT1((CE_NOTE, 661 "lyr_init: no AGP bridge, but supports IOMMU in amd64")); 662 663 agp_master_unregister(&agp_regdev->agprd_masterhdl); 664 return (0); /* Finished successfully */ 665 666 } 667 668 669 AGPDB_PRINT1((CE_NOTE, 670 "lyr_init: the system is AMD64 AGP architecture")); 671 672 agp_regdev->agprd_arctype = ARC_AMD64AGP; 673 674 return (0); /* Finished successfully */ 675 676 err3: 677 agp_target_unregister(&agp_regdev->agprd_targethdl); 678 err2: 679 agp_master_unregister(&agp_regdev->agprd_masterhdl); 680 err1: 681 agp_regdev->agprd_arctype = ARC_UNKNOWN; 682 return (-1); 683 } 684 685 void 686 lyr_end(agp_registered_dev_t *agp_regdev) 687 { 688 ASSERT(agp_regdev); 689 690 switch (agp_regdev->agprd_arctype) { 691 case ARC_IGD810: 692 case ARC_IGD830: 693 case ARC_INTELAGP: 694 agp_master_unregister(&agp_regdev->agprd_masterhdl); 695 agp_target_unregister(&agp_regdev->agprd_targethdl); 696 697 return; 698 case ARC_AMD64AGP: 699 agp_master_unregister(&agp_regdev->agprd_masterhdl); 700 agp_target_unregister(&agp_regdev->agprd_targethdl); 701 amd64_gart_unregister(&agp_regdev->agprd_cpugarts); 702 703 return; 704 case ARC_AMD64NOAGP: 705 amd64_gart_unregister(&agp_regdev->agprd_cpugarts); 706 707 return; 708 default: 709 ASSERT(0); 710 return; 711 } 712 } 713 714 int 715 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev) 716 { 717 ldi_handle_t hdl; 718 igd_info_t value1; 719 i_agp_info_t value2; 720 amdgart_info_t value3; 721 size_t prealloc_size; 722 int err; 723 amd64_gart_dev_list_t *gart_head; 724 725 ASSERT(info); 726 ASSERT(agp_regdev); 727 728 switch (agp_regdev->agprd_arctype) { 729 case ARC_IGD810: 730 hdl = agp_regdev->agprd_masterhdl; 731 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1, 732 FKIOCTL, kcred, 0); 733 if (err) 734 return (-1); 735 info->agpki_mdevid = value1.igd_devid; 736 info->agpki_aperbase = value1.igd_aperbase; 737 info->agpki_apersize = value1.igd_apersize; 738 739 hdl = agp_regdev->agprd_targethdl; 740 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE, 741 (intptr_t)&prealloc_size, FKIOCTL, kcred, 0); 742 if (err) 743 return (-1); 744 info->agpki_presize = prealloc_size; 745 746 break; 747 748 case ARC_IGD830: 749 hdl = agp_regdev->agprd_masterhdl; 750 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1, 751 FKIOCTL, kcred, 0); 752 if (err) 753 return (-1); 754 info->agpki_mdevid = value1.igd_devid; 755 info->agpki_aperbase = value1.igd_aperbase; 756 info->agpki_apersize = value1.igd_apersize; 757 758 hdl = agp_regdev->agprd_targethdl; 759 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE, 760 (intptr_t)&prealloc_size, FKIOCTL, kcred, 0); 761 if (err) 762 return (-1); 763 764 /* 765 * Assume all units are kilobytes unless explicitly 766 * stated below: 767 * preallocated GTT memory = preallocated memory - GTT size 768 * - scratch page size 769 * 770 * scratch page size = 4 771 * GTT size = aperture size (in MBs) 772 * this algorithm came from Xorg source code 773 */ 774 prealloc_size = prealloc_size - info->agpki_apersize - 4; 775 info->agpki_presize = prealloc_size; 776 break; 777 case ARC_INTELAGP: 778 case ARC_AMD64AGP: 779 /* AGP devices */ 780 hdl = agp_regdev->agprd_masterhdl; 781 err = ldi_ioctl(hdl, AGP_MASTER_GETINFO, 782 (intptr_t)&value2, FKIOCTL, kcred, 0); 783 if (err) 784 return (-1); 785 info->agpki_mdevid = value2.iagp_devid; 786 info->agpki_mver = value2.iagp_ver; 787 info->agpki_mstatus = value2.iagp_mode; 788 hdl = agp_regdev->agprd_targethdl; 789 err = ldi_ioctl(hdl, AGP_TARGET_GETINFO, 790 (intptr_t)&value2, FKIOCTL, kcred, 0); 791 if (err) 792 return (-1); 793 info->agpki_tdevid = value2.iagp_devid; 794 info->agpki_tver = value2.iagp_ver; 795 info->agpki_tstatus = value2.iagp_mode; 796 info->agpki_aperbase = value2.iagp_aperbase; 797 info->agpki_apersize = value2.iagp_apersize; 798 break; 799 case ARC_AMD64NOAGP: 800 /* Meaningful for IOMMU support only */ 801 gart_head = agp_regdev->agprd_cpugarts.gart_dev_list_head; 802 err = ldi_ioctl(gart_head->gart_devhdl, AMD64_GET_INFO, 803 (intptr_t)&value3, FKIOCTL, kcred, 0); 804 if (err) 805 return (-1); 806 info->agpki_aperbase = value3.cgart_aperbase; 807 info->agpki_apersize = value3.cgart_apersize; 808 break; 809 default: 810 AGPDB_PRINT2((CE_WARN, 811 "lyr_get_info: function doesn't work for unknown arc")); 812 return (-1); 813 } 814 if ((info->agpki_apersize >= MAXAPERMEGAS) || 815 (info->agpki_apersize == 0) || 816 (info->agpki_aperbase == 0)) { 817 AGPDB_PRINT2((CE_WARN, 818 "lyr_get_info: aperture is not programmed correctly!")); 819 return (-1); 820 } 821 822 return (0); 823 } 824 825 /* 826 * lyr_i8xx_add_to_gtt() 827 * 828 * Description: 829 * This function sets up the integrated video device gtt table 830 * via an ioclt to the AGP master driver. 831 * 832 * Arguments: 833 * pg_offset The start entry to be setup 834 * keyent Keytable entity pointer 835 * agp_regdev AGP devices registration struct pointer 836 * 837 * Returns: 838 * 0 success 839 * -1 invalid operations 840 */ 841 int 842 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent, 843 agp_registered_dev_t *agp_regdev) 844 { 845 int err = 0; 846 int rval; 847 ldi_handle_t hdl; 848 igd_gtt_seg_t gttseg; 849 uint32_t *addrp, i; 850 uint32_t npages; 851 852 ASSERT(keyent); 853 ASSERT(agp_regdev); 854 gttseg.igs_pgstart = pg_offset; 855 npages = keyent->kte_pages; 856 gttseg.igs_npage = npages; 857 gttseg.igs_type = keyent->kte_type; 858 gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc 859 (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP); 860 861 addrp = gttseg.igs_phyaddr; 862 for (i = 0; i < npages; i++, addrp++) { 863 *addrp = 864 (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT); 865 } 866 867 hdl = agp_regdev->agprd_masterhdl; 868 if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)>tseg, FKIOCTL, 869 kcred, &rval)) { 870 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error")); 871 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x", 872 gttseg.igs_pgstart)); 873 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x", 874 gttseg.igs_npage)); 875 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x", 876 gttseg.igs_type)); 877 err = -1; 878 } 879 kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage); 880 return (err); 881 } 882 883 /* 884 * lyr_i8xx_remove_from_gtt() 885 * 886 * Description: 887 * This function clears the integrated video device gtt table via 888 * an ioctl to the agp master device. 889 * 890 * Arguments: 891 * pg_offset The starting entry to be cleared 892 * npage The number of entries to be cleared 893 * agp_regdev AGP devices struct pointer 894 * 895 * Returns: 896 * 0 success 897 * -1 invalid operations 898 */ 899 int 900 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage, 901 agp_registered_dev_t *agp_regdev) 902 { 903 int rval; 904 ldi_handle_t hdl; 905 igd_gtt_seg_t gttseg; 906 907 gttseg.igs_pgstart = pg_offset; 908 gttseg.igs_npage = npage; 909 910 hdl = agp_regdev->agprd_masterhdl; 911 if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)>tseg, FKIOCTL, 912 kcred, &rval)) 913 return (-1); 914 915 return (0); 916 } 917 918 /* 919 * lyr_set_gart_addr() 920 * 921 * Description: 922 * This function puts the gart table physical address in the 923 * gart base register. 924 * Please refer to gart and gtt table base register format for 925 * gart base register format in agpdefs.h. 926 * 927 * Arguments: 928 * phy_base The base physical address of gart table 929 * agp_regdev AGP devices registration struct pointer 930 * 931 * Returns: 932 * 0 success 933 * -1 failed 934 * 935 */ 936 937 int 938 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev) 939 { 940 amd64_gart_dev_list_t *gart_list; 941 ldi_handle_t hdl; 942 int err = 0; 943 944 ASSERT(agp_regdev); 945 switch (agp_regdev->agprd_arctype) { 946 case ARC_IGD810: 947 { 948 uint32_t base; 949 950 ASSERT((phy_base & ~I810_POINTER_MASK) == 0); 951 base = (uint32_t)phy_base; 952 953 hdl = agp_regdev->agprd_masterhdl; 954 err = ldi_ioctl(hdl, I810_SET_GTT_BASE, 955 (intptr_t)&base, FKIOCTL, kcred, 0); 956 break; 957 } 958 case ARC_INTELAGP: 959 { 960 uint32_t addr; 961 addr = (uint32_t)phy_base; 962 963 ASSERT((phy_base & ~GTT_POINTER_MASK) == 0); 964 hdl = agp_regdev->agprd_targethdl; 965 err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR, 966 (intptr_t)&addr, FKIOCTL, kcred, 0); 967 break; 968 } 969 case ARC_AMD64NOAGP: 970 case ARC_AMD64AGP: 971 { 972 uint32_t addr; 973 974 ASSERT((phy_base & ~AMD64_POINTER_MASK) == 0); 975 addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT) 976 & AMD64_GARTBASE_MASK); 977 978 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 979 gart_list; 980 gart_list = gart_list->next) { 981 hdl = gart_list->gart_devhdl; 982 if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR, 983 (intptr_t)&addr, FKIOCTL, kcred, 0)) { 984 err = -1; 985 break; 986 } 987 } 988 break; 989 } 990 default: 991 err = -1; 992 } 993 994 if (err) 995 return (-1); 996 997 return (0); 998 } 999 1000 int 1001 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev) 1002 { 1003 ldi_handle_t hdl; 1004 uint32_t command; 1005 1006 ASSERT(agp_regdev); 1007 command = cmd; 1008 hdl = agp_regdev->agprd_targethdl; 1009 if (ldi_ioctl(hdl, AGP_TARGET_SETCMD, 1010 (intptr_t)&command, FKIOCTL, kcred, 0)) 1011 return (-1); 1012 hdl = agp_regdev->agprd_masterhdl; 1013 if (ldi_ioctl(hdl, AGP_MASTER_SETCMD, 1014 (intptr_t)&command, FKIOCTL, kcred, 0)) 1015 return (-1); 1016 1017 return (0); 1018 } 1019 1020 int 1021 lyr_config_devices(agp_registered_dev_t *agp_regdev) 1022 { 1023 amd64_gart_dev_list_t *gart_list; 1024 ldi_handle_t hdl; 1025 int rc = 0; 1026 1027 ASSERT(agp_regdev); 1028 switch (agp_regdev->agprd_arctype) { 1029 case ARC_IGD830: 1030 case ARC_IGD810: 1031 break; 1032 case ARC_INTELAGP: 1033 { 1034 hdl = agp_regdev->agprd_targethdl; 1035 rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE, 1036 0, FKIOCTL, kcred, 0); 1037 break; 1038 } 1039 case ARC_AMD64NOAGP: 1040 case ARC_AMD64AGP: 1041 { 1042 /* 1043 * BIOS always shadow registers such like Aperture Base 1044 * register, Aperture Size Register from the AGP bridge 1045 * to the AMD64 CPU host bridge. If future BIOSes are broken 1046 * in this regard, we may need to shadow these registers 1047 * in driver. 1048 */ 1049 1050 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 1051 gart_list; 1052 gart_list = gart_list->next) { 1053 hdl = gart_list->gart_devhdl; 1054 if (ldi_ioctl(hdl, AMD64_CONFIGURE, 1055 0, FKIOCTL, kcred, 0)) { 1056 rc = -1; 1057 break; 1058 } 1059 } 1060 break; 1061 } 1062 default: 1063 rc = -1; 1064 } 1065 1066 if (rc) 1067 return (-1); 1068 1069 return (0); 1070 } 1071 1072 int 1073 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev) 1074 { 1075 amd64_gart_dev_list_t *gart_list; 1076 ldi_handle_t hdl; 1077 int rc = 0; 1078 1079 ASSERT(agp_regdev); 1080 switch (agp_regdev->agprd_arctype) { 1081 case ARC_IGD830: 1082 case ARC_IGD810: 1083 { 1084 hdl = agp_regdev->agprd_masterhdl; 1085 rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0); 1086 break; 1087 } 1088 case ARC_INTELAGP: 1089 { 1090 hdl = agp_regdev->agprd_targethdl; 1091 rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG, 1092 0, FKIOCTL, kcred, 0); 1093 break; 1094 } 1095 case ARC_AMD64NOAGP: 1096 case ARC_AMD64AGP: 1097 { 1098 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 1099 gart_list; gart_list = gart_list->next) { 1100 hdl = gart_list->gart_devhdl; 1101 if (ldi_ioctl(hdl, AMD64_UNCONFIG, 1102 0, FKIOCTL, kcred, 0)) { 1103 rc = -1; 1104 break; 1105 } 1106 } 1107 break; 1108 } 1109 default: 1110 rc = -1; 1111 } 1112 1113 if (rc) 1114 return (-1); 1115 1116 return (0); 1117 } 1118 1119 /* 1120 * lyr_flush_gart_cache() 1121 * 1122 * Description: 1123 * This function flushes the GART translation look-aside buffer. All 1124 * GART translation caches will be flushed after this operation. 1125 * 1126 * Arguments: 1127 * agp_regdev AGP devices struct pointer 1128 */ 1129 void 1130 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev) 1131 { 1132 amd64_gart_dev_list_t *gart_list; 1133 ldi_handle_t hdl; 1134 1135 ASSERT(agp_regdev); 1136 if ((agp_regdev->agprd_arctype == ARC_AMD64AGP) || 1137 (agp_regdev->agprd_arctype == ARC_AMD64NOAGP)) { 1138 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 1139 gart_list; gart_list = gart_list->next) { 1140 hdl = gart_list->gart_devhdl; 1141 (void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB, 1142 0, FKIOCTL, kcred, 0); 1143 } 1144 } else if (agp_regdev->agprd_arctype == ARC_INTELAGP) { 1145 hdl = agp_regdev->agprd_targethdl; 1146 (void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0, 1147 FKIOCTL, kcred, 0); 1148 } 1149 } 1150 1151 /* 1152 * get_max_pages() 1153 * 1154 * Description: 1155 * This function compute the total pages allowed for agp aperture 1156 * based on the ammount of physical pages. 1157 * The algorithm is: compare the aperture size with 1/4 of total 1158 * physical pages, and use the smaller one to for the max available 1159 * pages. 1160 * 1161 * Arguments: 1162 * aper_size system agp aperture size (in MB) 1163 * 1164 * Returns: 1165 * The max possible number of agp memory pages available to users 1166 */ 1167 static uint32_t 1168 get_max_pages(uint32_t aper_size) 1169 { 1170 uint32_t i, j; 1171 1172 ASSERT(aper_size <= MAXAPERMEGAS); 1173 1174 i = AGP_MB2PAGES(aper_size); 1175 j = (physmem >> 2); 1176 1177 return ((i < j) ? i : j); 1178 } 1179 1180 /* 1181 * agp_fill_empty_keyent() 1182 * 1183 * Description: 1184 * This function finds a empty key table slot and 1185 * fills it with a new entity. 1186 * 1187 * Arguments: 1188 * softsate driver soft state pointer 1189 * entryp new entity data pointer 1190 * 1191 * Returns: 1192 * NULL no key table slot available 1193 * entryp the new entity slot pointer 1194 */ 1195 static keytable_ent_t * 1196 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp) 1197 { 1198 int key; 1199 keytable_ent_t *newentryp; 1200 1201 ASSERT(softstate); 1202 ASSERT(entryp); 1203 ASSERT(entryp->kte_memhdl); 1204 ASSERT(entryp->kte_pfnarray); 1205 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 1206 1207 for (key = 0; key < AGP_MAXKEYS; key++) { 1208 newentryp = &softstate->asoft_table[key]; 1209 if (newentryp->kte_memhdl == NULL) { 1210 break; 1211 } 1212 } 1213 1214 if (key >= AGP_MAXKEYS) { 1215 AGPDB_PRINT2((CE_WARN, 1216 "agp_fill_empty_keyent: key table exhausted")); 1217 return (NULL); 1218 } 1219 1220 ASSERT(newentryp->kte_pfnarray == NULL); 1221 bcopy(entryp, newentryp, sizeof (keytable_ent_t)); 1222 newentryp->kte_key = key; 1223 1224 return (newentryp); 1225 } 1226 1227 /* 1228 * agp_find_bound_keyent() 1229 * 1230 * Description: 1231 * This function finds the key table entity by agp aperture page offset. 1232 * Every keytable entity will have an agp aperture range after the binding 1233 * operation. 1234 * 1235 * Arguments: 1236 * softsate driver soft state pointer 1237 * pg_offset agp aperture page offset 1238 * 1239 * Returns: 1240 * NULL no such keytable entity 1241 * pointer key table entity pointer found 1242 */ 1243 static keytable_ent_t * 1244 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset) 1245 { 1246 int keycount; 1247 keytable_ent_t *entryp; 1248 1249 ASSERT(softstate); 1250 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 1251 1252 for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) { 1253 entryp = &softstate->asoft_table[keycount]; 1254 if (entryp->kte_bound == 0) { 1255 continue; 1256 } 1257 1258 if (pg_offset < entryp->kte_pgoff) 1259 continue; 1260 if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages)) 1261 continue; 1262 1263 ASSERT(entryp->kte_memhdl); 1264 ASSERT(entryp->kte_pfnarray); 1265 1266 return (entryp); 1267 } 1268 1269 return (NULL); 1270 } 1271 1272 /* 1273 * agp_check_off() 1274 * 1275 * Description: 1276 * This function checks whether an AGP aperture range to be bound 1277 * overlaps with AGP offset already bound. 1278 * 1279 * Arguments: 1280 * entryp key table start entry pointer 1281 * pg_start AGP range start page offset 1282 * pg_num pages number to be bound 1283 * 1284 * Returns: 1285 * 0 Does not overlap 1286 * -1 Overlaps 1287 */ 1288 1289 static int 1290 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num) 1291 { 1292 int key; 1293 uint64_t pg_end; 1294 uint64_t kpg_end; 1295 1296 ASSERT(entryp); 1297 1298 pg_end = pg_start + pg_num; 1299 for (key = 0; key < AGP_MAXKEYS; key++) { 1300 if (!entryp[key].kte_bound) 1301 continue; 1302 1303 kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages; 1304 if (!((pg_end <= entryp[key].kte_pgoff) || 1305 (pg_start >= kpg_end))) 1306 break; 1307 } 1308 1309 if (key == AGP_MAXKEYS) 1310 return (0); 1311 else 1312 return (-1); 1313 } 1314 1315 static int 1316 is_controlling_proc(agpgart_softstate_t *st) 1317 { 1318 ASSERT(st); 1319 1320 if (!st->asoft_acquired) { 1321 AGPDB_PRINT2((CE_WARN, 1322 "ioctl_agpgart_setup: gart not acquired")); 1323 return (-1); 1324 } 1325 if (st->asoft_curpid != ddi_get_pid()) { 1326 AGPDB_PRINT2((CE_WARN, 1327 "ioctl_agpgart_release: not controlling process")); 1328 return (-1); 1329 } 1330 1331 return (0); 1332 } 1333 1334 static void release_control(agpgart_softstate_t *st) 1335 { 1336 st->asoft_curpid = 0; 1337 st->asoft_acquired = 0; 1338 } 1339 1340 static void acquire_control(agpgart_softstate_t *st) 1341 { 1342 st->asoft_curpid = ddi_get_pid(); 1343 st->asoft_acquired = 1; 1344 } 1345 1346 /* 1347 * agp_remove_from_gart() 1348 * 1349 * Description: 1350 * This function fills the gart table entries by a given page 1351 * frame number array and setup the agp aperture page to physical 1352 * memory page translation. 1353 * Arguments: 1354 * pg_offset Starting aperture page to be bound 1355 * entries the number of pages to be bound 1356 * acc_hdl GART table dma memory acc handle 1357 * tablep GART table kernel virtual address 1358 */ 1359 static void 1360 agp_remove_from_gart( 1361 uint32_t pg_offset, 1362 uint32_t entries, 1363 ddi_dma_handle_t dma_hdl, 1364 uint32_t *tablep) 1365 { 1366 uint32_t items = 0; 1367 uint32_t *entryp; 1368 1369 entryp = tablep + pg_offset; 1370 while (items < entries) { 1371 *(entryp + items) = 0; 1372 items++; 1373 } 1374 (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t), 1375 entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV); 1376 } 1377 1378 /* 1379 * agp_unbind_key() 1380 * 1381 * Description: 1382 * This function unbinds AGP memory from the gart table. It will clear 1383 * all the gart entries related to this agp memory. 1384 * 1385 * Arguments: 1386 * softstate driver soft state pointer 1387 * entryp key table entity pointer 1388 * 1389 * Returns: 1390 * EINVAL invalid key table entity pointer 1391 * 0 success 1392 * 1393 */ 1394 static int 1395 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp) 1396 { 1397 int retval = 0; 1398 1399 ASSERT(entryp); 1400 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 1401 1402 if (!entryp->kte_bound) { 1403 AGPDB_PRINT2((CE_WARN, 1404 "agp_unbind_key: key = 0x%x, not bound", 1405 entryp->kte_key)); 1406 return (EINVAL); 1407 } 1408 if (entryp->kte_refcnt) { 1409 AGPDB_PRINT2((CE_WARN, 1410 "agp_unbind_key: memory is exported to users")); 1411 return (EINVAL); 1412 } 1413 1414 ASSERT((entryp->kte_pgoff + entryp->kte_pages) <= 1415 AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)); 1416 ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN)); 1417 1418 switch (softstate->asoft_devreg.agprd_arctype) { 1419 case ARC_IGD810: 1420 case ARC_IGD830: 1421 retval = lyr_i8xx_remove_from_gtt( 1422 entryp->kte_pgoff, entryp->kte_pages, 1423 &softstate->asoft_devreg); 1424 if (retval) { 1425 AGPDB_PRINT2((CE_WARN, 1426 "agp_unbind_key: Key = 0x%x, clear table error", 1427 entryp->kte_key)); 1428 return (EIO); 1429 } 1430 break; 1431 case ARC_INTELAGP: 1432 case ARC_AMD64NOAGP: 1433 case ARC_AMD64AGP: 1434 agp_remove_from_gart(entryp->kte_pgoff, 1435 entryp->kte_pages, 1436 softstate->gart_dma_handle, 1437 (uint32_t *)softstate->gart_vbase); 1438 /* Flush GTLB table */ 1439 lyr_flush_gart_cache(&softstate->asoft_devreg); 1440 1441 break; 1442 } 1443 1444 entryp->kte_bound = 0; 1445 1446 return (0); 1447 } 1448 1449 /* 1450 * agp_dealloc_kmem() 1451 * 1452 * Description: 1453 * This function deallocates dma memory resources for userland 1454 * applications. 1455 * 1456 * Arguments: 1457 * entryp keytable entity pointer 1458 */ 1459 static void 1460 agp_dealloc_kmem(keytable_ent_t *entryp) 1461 { 1462 kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages); 1463 entryp->kte_pfnarray = NULL; 1464 1465 (void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle); 1466 KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0; 1467 ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl); 1468 KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL; 1469 KMEMP(entryp->kte_memhdl)->kmem_reallen = 0; 1470 KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL; 1471 1472 ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle)); 1473 KMEMP(entryp->kte_memhdl)->kmem_handle = NULL; 1474 1475 kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t)); 1476 entryp->kte_memhdl = NULL; 1477 } 1478 1479 /* 1480 * agp_dealloc_pmem() 1481 * 1482 * Description: 1483 * This function deallocates memory resource for direct mapping to 1484 * userland applications. 1485 * 1486 * Arguments: 1487 * entryp key table entity pointer 1488 * 1489 */ 1490 static void 1491 agp_dealloc_pmem(keytable_ent_t *entryp) 1492 { 1493 devmap_pmem_free(PMEMP(entryp->kte_memhdl)->pmem_cookie); 1494 PMEMP(entryp->kte_memhdl)->pmem_cookie = NULL; 1495 kmem_free(entryp->kte_memhdl, sizeof (agp_pmem_handle_t)); 1496 entryp->kte_memhdl = NULL; 1497 1498 /* free the page frame number array */ 1499 kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages); 1500 entryp->kte_pfnarray = NULL; 1501 } 1502 1503 /* 1504 * agp_dealloc_mem() 1505 * 1506 * Description: 1507 * This function deallocates physical memory resources allocated for 1508 * userland applications. 1509 * 1510 * Arguments: 1511 * st driver soft state pointer 1512 * entryp key table entity pointer 1513 * 1514 * Returns: 1515 * -1 not a valid memory type or the memory is mapped by 1516 * user area applications 1517 * 0 success 1518 */ 1519 static int 1520 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t *entryp) 1521 { 1522 1523 ASSERT(entryp); 1524 ASSERT(st); 1525 ASSERT(entryp->kte_memhdl); 1526 ASSERT(mutex_owned(&st->asoft_instmutex)); 1527 1528 /* auto unbind here */ 1529 if (entryp->kte_bound && !entryp->kte_refcnt) { 1530 AGPDB_PRINT2((CE_WARN, 1531 "agp_dealloc_mem: key=0x%x, auto unbind", 1532 entryp->kte_key)); 1533 1534 /* 1535 * agp_dealloc_mem may be called indirectly by agp_detach. 1536 * In the agp_detach function, agpgart_close is already 1537 * called which will free the gart table. agp_unbind_key 1538 * will panic if no valid gart table exists. So test if 1539 * gart table exsits here. 1540 */ 1541 if (st->asoft_opened) 1542 (void) agp_unbind_key(st, entryp); 1543 } 1544 if (entryp->kte_refcnt) { 1545 AGPDB_PRINT2((CE_WARN, 1546 "agp_dealloc_pmem: memory is exported to users")); 1547 return (-1); 1548 } 1549 1550 switch (entryp->kte_type) { 1551 case AGP_NORMAL: 1552 agp_dealloc_pmem(entryp); 1553 break; 1554 case AGP_PHYSICAL: 1555 agp_dealloc_kmem(entryp); 1556 break; 1557 default: 1558 return (-1); 1559 } 1560 1561 return (0); 1562 } 1563 1564 /* 1565 * agp_del_allkeys() 1566 * 1567 * Description: 1568 * This function calls agp_dealloc_mem to release all the agp memory 1569 * resource allocated. 1570 * 1571 * Arguments: 1572 * softsate driver soft state pointer 1573 * Returns: 1574 * -1 can not free all agp memory 1575 * 0 success 1576 * 1577 */ 1578 static int 1579 agp_del_allkeys(agpgart_softstate_t *softstate) 1580 { 1581 int key; 1582 int ret = 0; 1583 1584 ASSERT(softstate); 1585 for (key = 0; key < AGP_MAXKEYS; key++) { 1586 if (softstate->asoft_table[key].kte_memhdl != NULL) { 1587 /* 1588 * Check if we can free agp memory now. 1589 * If agp memory is exported to user 1590 * applications, agp_dealloc_mem will fail. 1591 */ 1592 if (agp_dealloc_mem(softstate, 1593 &softstate->asoft_table[key])) 1594 ret = -1; 1595 } 1596 } 1597 1598 return (ret); 1599 } 1600 1601 /* 1602 * pfn2gartentry() 1603 * 1604 * Description: 1605 * This function converts a physical address to GART entry. 1606 * For AMD64, hardware only support addresses below 40bits, 1607 * about 1024G physical address, so the largest pfn 1608 * number is below 28 bits. Please refer to GART and GTT entry 1609 * format table in agpdefs.h for entry format. Intel IGD only 1610 * only supports GTT entry below 1G. Intel AGP only supports 1611 * GART entry below 4G. 1612 * 1613 * Arguments: 1614 * arc_type system agp arc type 1615 * pfn page frame number 1616 * itemv the entry item to be returned 1617 * Returns: 1618 * -1 not a invalid page frame 1619 * 0 conversion success 1620 */ 1621 static int 1622 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv) 1623 { 1624 uint64_t paddr; 1625 1626 paddr = pfn<<AGP_PAGE_SHIFT; 1627 1628 switch (arc_type) { 1629 case ARC_INTELAGP: 1630 { 1631 /* Only support 32-bit hardware address */ 1632 if ((paddr & ~AGP_INTEL_POINTER_MASK) != 0) { 1633 AGPDB_PRINT2((CE_WARN, 1634 "INTEL AGP Hardware only support 32 bits")); 1635 return (-1); 1636 } 1637 *itemv = (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID; 1638 1639 break; 1640 } 1641 case ARC_AMD64NOAGP: 1642 case ARC_AMD64AGP: 1643 { 1644 uint32_t value1, value2; 1645 /* Physaddr should not exceed 40-bit */ 1646 if ((paddr & ~AMD64_POINTER_MASK) != 0) { 1647 AGPDB_PRINT2((CE_WARN, 1648 "AMD64 GART hardware only supoort 40 bits")); 1649 return (-1); 1650 } 1651 value1 = (uint32_t)pfn >> 20; 1652 value1 <<= 4; 1653 value2 = (uint32_t)pfn << 12; 1654 1655 *itemv = value1 | value2 | AMD64_ENTRY_VALID; 1656 break; 1657 } 1658 case ARC_IGD810: 1659 if ((paddr & ~I810_POINTER_MASK) != 0) { 1660 AGPDB_PRINT2((CE_WARN, 1661 "Intel i810 only support 30 bits")); 1662 return (-1); 1663 } 1664 break; 1665 1666 case ARC_IGD830: 1667 if ((paddr & ~GTT_POINTER_MASK) != 0) { 1668 AGPDB_PRINT2((CE_WARN, 1669 "Intel IGD only support 32 bits")); 1670 return (-1); 1671 } 1672 break; 1673 default: 1674 AGPDB_PRINT2((CE_WARN, 1675 "pfn2gartentry: arc type = %d, not support", arc_type)); 1676 return (-1); 1677 } 1678 return (0); 1679 } 1680 1681 /* 1682 * Check allocated physical pages validity, only called in DEBUG 1683 * mode. 1684 */ 1685 static int 1686 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items) 1687 { 1688 int count; 1689 uint32_t ret; 1690 1691 for (count = 0; count < items; count++) { 1692 if (pfn2gartentry(arc_type, pfnarray[count], &ret)) 1693 break; 1694 } 1695 if (count < items) 1696 return (-1); 1697 else 1698 return (0); 1699 } 1700 1701 /* 1702 * kmem_getpfns() 1703 * 1704 * Description: 1705 * This function gets page frame numbers from dma handle. 1706 * 1707 * Arguments: 1708 * dma_handle dma hanle allocated by ddi_dma_alloc_handle 1709 * dma_cookip dma cookie pointer 1710 * cookies_num cookies number 1711 * pfnarray array to store page frames 1712 * 1713 * Returns: 1714 * 0 success 1715 */ 1716 static int 1717 kmem_getpfns( 1718 ddi_dma_handle_t dma_handle, 1719 ddi_dma_cookie_t *dma_cookiep, 1720 int cookies_num, 1721 pfn_t *pfnarray) 1722 { 1723 int num_cookies; 1724 int index = 0; 1725 1726 num_cookies = cookies_num; 1727 1728 while (num_cookies > 0) { 1729 uint64_t ck_startaddr, ck_length, ck_end; 1730 ck_startaddr = dma_cookiep->dmac_address; 1731 ck_length = dma_cookiep->dmac_size; 1732 1733 ck_end = ck_startaddr + ck_length; 1734 while (ck_startaddr < ck_end) { 1735 pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT; 1736 ck_startaddr += AGP_PAGE_SIZE; 1737 index++; 1738 } 1739 1740 num_cookies--; 1741 if (num_cookies > 0) { 1742 ddi_dma_nextcookie(dma_handle, dma_cookiep); 1743 } 1744 } 1745 1746 return (0); 1747 } 1748 1749 static int 1750 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info) 1751 { 1752 switch (softstate->asoft_devreg.agprd_arctype) { 1753 case ARC_IGD810: 1754 case ARC_IGD830: 1755 info->agpi_version.agpv_major = 0; 1756 info->agpi_version.agpv_minor = 0; 1757 info->agpi_devid = softstate->asoft_info.agpki_mdevid; 1758 info->agpi_mode = 0; 1759 break; 1760 case ARC_INTELAGP: 1761 case ARC_AMD64AGP: 1762 info->agpi_version = softstate->asoft_info.agpki_tver; 1763 info->agpi_devid = softstate->asoft_info.agpki_tdevid; 1764 info->agpi_mode = softstate->asoft_info.agpki_tstatus; 1765 break; 1766 case ARC_AMD64NOAGP: 1767 break; 1768 default: 1769 AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC")); 1770 return (-1); 1771 } 1772 /* 1773 * 64bit->32bit conversion possible 1774 */ 1775 info->agpi_aperbase = softstate->asoft_info.agpki_aperbase; 1776 info->agpi_apersize = softstate->asoft_info.agpki_apersize; 1777 info->agpi_pgtotal = softstate->asoft_pgtotal; 1778 info->agpi_pgsystem = info->agpi_pgtotal; 1779 info->agpi_pgused = softstate->asoft_pgused; 1780 1781 return (0); 1782 } 1783 1784 static uint32_t 1785 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode) 1786 { 1787 uint32_t cmd; 1788 int rq, sba, over4g, fw, rate; 1789 1790 /* 1791 * tstatus: target device status 1792 * mstatus: master device status 1793 * mode: the agp mode to be sent 1794 */ 1795 1796 /* 1797 * RQ - Request Queue size 1798 * set RQ to the min of mode and tstatus 1799 * if mode set a RQ larger than hardware can support, 1800 * use the max RQ which hardware can support. 1801 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support 1802 * Corelogic will enqueue agp transaction 1803 */ 1804 rq = mode & AGPSTAT_RQ_MASK; 1805 if ((tstatus & AGPSTAT_RQ_MASK) < rq) 1806 rq = tstatus & AGPSTAT_RQ_MASK; 1807 1808 /* 1809 * SBA - Sideband Addressing 1810 * 1811 * Sideband Addressing provides an additional bus to pass requests 1812 * (address and command) to the target from the master. 1813 * 1814 * set SBA if all three support it 1815 */ 1816 sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA) 1817 & (mode & AGPSTAT_SBA); 1818 1819 /* set OVER4G if all three support it */ 1820 over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G) 1821 & (mode & AGPSTAT_OVER4G); 1822 1823 /* 1824 * FW - fast write 1825 * 1826 * acceleration of memory write transactions from the corelogic to the 1827 * A.G.P. master device acting like a PCI target. 1828 * 1829 * set FW if all three support it 1830 */ 1831 fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW) 1832 & (mode & AGPSTAT_FW); 1833 1834 /* 1835 * figure out the max rate 1836 * AGP v2 support: 4X, 2X, 1X speed 1837 * status bit meaning 1838 * --------------------------------------------- 1839 * 7:3 others 1840 * 3 0 stand for V2 support 1841 * 0:2 001:1X, 010:2X, 100:4X 1842 * ---------------------------------------------- 1843 */ 1844 rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK) 1845 & (mode & AGPSTAT_RATE_MASK); 1846 if (rate & AGP2_RATE_4X) 1847 rate = AGP2_RATE_4X; 1848 else if (rate & AGP2_RATE_2X) 1849 rate = AGP2_RATE_2X; 1850 else 1851 rate = AGP2_RATE_1X; 1852 1853 cmd = rq | sba | over4g | fw | rate; 1854 /* enable agp mode */ 1855 cmd |= AGPCMD_AGPEN; 1856 1857 return (cmd); 1858 } 1859 1860 static uint32_t 1861 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode) 1862 { 1863 uint32_t cmd = 0; 1864 uint32_t rq, arqsz, cal, sba, over4g, fw, rate; 1865 1866 /* 1867 * tstatus: target device status 1868 * mstatus: master device status 1869 * mode: the agp mode to be set 1870 */ 1871 1872 /* 1873 * RQ - Request Queue size 1874 * Set RQ to the min of mode and tstatus 1875 * If mode set a RQ larger than hardware can support, 1876 * use the max RQ which hardware can support. 1877 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support 1878 * Corelogic will enqueue agp transaction; 1879 */ 1880 rq = mode & AGPSTAT_RQ_MASK; 1881 if ((tstatus & AGPSTAT_RQ_MASK) < rq) 1882 rq = tstatus & AGPSTAT_RQ_MASK; 1883 1884 /* 1885 * ARQSZ - Asynchronous Request Queue size 1886 * Set the value equal to tstatus. 1887 * Don't allow the mode register to override values 1888 */ 1889 arqsz = tstatus & AGPSTAT_ARQSZ_MASK; 1890 1891 /* 1892 * CAL - Calibration cycle 1893 * Set to the min of tstatus and mstatus 1894 * Don't allow override by mode register 1895 */ 1896 cal = tstatus & AGPSTAT_CAL_MASK; 1897 if ((mstatus & AGPSTAT_CAL_MASK) < cal) 1898 cal = mstatus & AGPSTAT_CAL_MASK; 1899 1900 /* 1901 * SBA - Sideband Addressing 1902 * 1903 * Sideband Addressing provides an additional bus to pass requests 1904 * (address and command) to the target from the master. 1905 * 1906 * SBA in agp v3.0 must be set 1907 */ 1908 sba = AGPCMD_SBAEN; 1909 1910 /* GART64B is not set since no hardware supports it now */ 1911 1912 /* Set OVER4G if all three support it */ 1913 over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G) 1914 & (mode & AGPSTAT_OVER4G); 1915 1916 /* 1917 * FW - fast write 1918 * 1919 * Acceleration of memory write transactions from the corelogic to the 1920 * A.G.P. master device acting like a PCI target. 1921 * 1922 * Always set FW in AGP 3.0 1923 */ 1924 fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW) 1925 & (mode & AGPSTAT_FW); 1926 1927 /* 1928 * Figure out the max rate 1929 * 1930 * AGP v3 support: 8X, 4X speed 1931 * 1932 * status bit meaning 1933 * --------------------------------------------- 1934 * 7:3 others 1935 * 3 1 stand for V3 support 1936 * 0:2 001:4X, 010:8X, 011:4X,8X 1937 * ---------------------------------------------- 1938 */ 1939 rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK) 1940 & (mode & AGPSTAT_RATE_MASK); 1941 if (rate & AGP3_RATE_8X) 1942 rate = AGP3_RATE_8X; 1943 else 1944 rate = AGP3_RATE_4X; 1945 1946 cmd = rq | arqsz | cal | sba | over4g | fw | rate; 1947 /* Enable AGP mode */ 1948 cmd |= AGPCMD_AGPEN; 1949 1950 return (cmd); 1951 } 1952 1953 static int 1954 agp_setup(agpgart_softstate_t *softstate, uint32_t mode) 1955 { 1956 uint32_t tstatus, mstatus; 1957 uint32_t agp_mode; 1958 1959 tstatus = softstate->asoft_info.agpki_tstatus; 1960 mstatus = softstate->asoft_info.agpki_mstatus; 1961 1962 /* 1963 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0 1964 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we 1965 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in 1966 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register, 1967 * we can get which mode it is working at. The working mode of 1968 * AGP master and AGP target must be consistent. That is, both 1969 * of them must work on AGP 3.0 mode or AGP 2.0 mode. 1970 */ 1971 if ((softstate->asoft_info.agpki_tver.agpv_major == 3) && 1972 (tstatus & AGPSTAT_MODE3)) { 1973 /* Master device should be 3.0 mode, too */ 1974 if ((softstate->asoft_info.agpki_mver.agpv_major != 3) || 1975 ((mstatus & AGPSTAT_MODE3) == 0)) 1976 return (EIO); 1977 1978 agp_mode = agp_v3_setup(tstatus, mstatus, mode); 1979 /* Write to the AGPCMD register of target and master devices */ 1980 if (lyr_set_agp_cmd(agp_mode, 1981 &softstate->asoft_devreg)) 1982 return (EIO); 1983 1984 softstate->asoft_mode = agp_mode; 1985 1986 return (0); 1987 } 1988 1989 /* 1990 * If agp taget device doesn't work in AGP 3.0 mode, 1991 * it must work in AGP 2.0 mode. And make sure 1992 * master device work in AGP 2.0 mode too 1993 */ 1994 if ((softstate->asoft_info.agpki_mver.agpv_major == 3) && 1995 (mstatus & AGPSTAT_MODE3)) 1996 return (EIO); 1997 1998 agp_mode = agp_v2_setup(tstatus, mstatus, mode); 1999 if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg)) 2000 return (EIO); 2001 softstate->asoft_mode = agp_mode; 2002 2003 return (0); 2004 } 2005 2006 /* 2007 * agp_alloc_pmem() 2008 * 2009 * Description: 2010 * This function allocates physical memory for direct mapping to userland 2011 * applications. 2012 * 2013 * Arguments: 2014 * softsate driver soft state pointer 2015 * length memory size 2016 * type AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical 2017 * memory type for intel i810 IGD 2018 * 2019 * Returns: 2020 * entryp new key table entity pointer 2021 * NULL no key table slot available 2022 */ 2023 static keytable_ent_t * 2024 agp_alloc_pmem(agpgart_softstate_t *softstate, size_t length, int type) 2025 { 2026 keytable_ent_t keyentry; 2027 keytable_ent_t *entryp; 2028 2029 ASSERT(AGP_ALIGNED(length)); 2030 bzero(&keyentry, sizeof (keytable_ent_t)); 2031 2032 keyentry.kte_pages = AGP_BYTES2PAGES(length); 2033 keyentry.kte_type = type; 2034 2035 keyentry.kte_memhdl = 2036 (agp_pmem_handle_t *)kmem_zalloc(sizeof (agp_pmem_handle_t), 2037 KM_SLEEP); 2038 2039 if (devmap_pmem_alloc(length, 2040 PMEM_SLEEP, 2041 &PMEMP(keyentry.kte_memhdl)->pmem_cookie) != DDI_SUCCESS) 2042 goto err1; 2043 2044 keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) * 2045 keyentry.kte_pages, KM_SLEEP); 2046 2047 if (devmap_pmem_getpfns( 2048 PMEMP(keyentry.kte_memhdl)->pmem_cookie, 2049 0, keyentry.kte_pages, keyentry.kte_pfnarray) != DDI_SUCCESS) { 2050 AGPDB_PRINT2((CE_WARN, 2051 "agp_alloc_pmem: devmap_map_getpfns failed")); 2052 goto err2; 2053 } 2054 ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype, 2055 keyentry.kte_pfnarray, keyentry.kte_pages)); 2056 entryp = agp_fill_empty_keyent(softstate, &keyentry); 2057 2058 if (!entryp) { 2059 AGPDB_PRINT2((CE_WARN, 2060 "agp_alloc_pmem: agp_fill_empty_keyent error")); 2061 goto err2; 2062 } 2063 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 2064 2065 return (entryp); 2066 2067 err2: 2068 kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages); 2069 keyentry.kte_pfnarray = NULL; 2070 devmap_pmem_free(PMEMP(keyentry.kte_memhdl)->pmem_cookie); 2071 PMEMP(keyentry.kte_memhdl)->pmem_cookie = NULL; 2072 err1: 2073 kmem_free(keyentry.kte_memhdl, sizeof (agp_pmem_handle_t)); 2074 keyentry.kte_memhdl = NULL; 2075 2076 return (NULL); 2077 2078 } 2079 2080 /* 2081 * agp_alloc_kmem() 2082 * 2083 * Description: 2084 * This function allocates physical memory for userland applications 2085 * by ddi interfaces. This function can only be called to allocate 2086 * small phsyical contiguous pages, usually tens of kilobytes. 2087 * 2088 * Arguments: 2089 * softsate driver soft state pointer 2090 * length memory size 2091 * 2092 * Returns: 2093 * entryp new keytable entity pointer 2094 * NULL no keytable slot available or no physical 2095 * memory available 2096 */ 2097 static keytable_ent_t * 2098 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length) 2099 { 2100 keytable_ent_t keyentry; 2101 keytable_ent_t *entryp; 2102 int ret; 2103 2104 ASSERT(AGP_ALIGNED(length)); 2105 2106 bzero(&keyentry, sizeof (keytable_ent_t)); 2107 2108 keyentry.kte_pages = AGP_BYTES2PAGES(length); 2109 keyentry.kte_type = AGP_PHYSICAL; 2110 2111 /* 2112 * Set dma_attr_sgllen to assure contiguous physical pages 2113 */ 2114 agpgart_dma_attr.dma_attr_sgllen = 1; 2115 2116 /* 4k size pages */ 2117 keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP); 2118 2119 if (ddi_dma_alloc_handle(softstate->asoft_dip, 2120 &agpgart_dma_attr, 2121 DDI_DMA_SLEEP, NULL, 2122 &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) { 2123 AGPDB_PRINT2((CE_WARN, 2124 "agp_alloc_kmem: ddi_dma_allco_hanlde error")); 2125 goto err4; 2126 } 2127 2128 if ((ret = ddi_dma_mem_alloc( 2129 KMEMP(keyentry.kte_memhdl)->kmem_handle, 2130 length, 2131 &gart_dev_acc_attr, 2132 DDI_DMA_CONSISTENT, 2133 DDI_DMA_SLEEP, NULL, 2134 &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr, 2135 &KMEMP(keyentry.kte_memhdl)->kmem_reallen, 2136 &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) { 2137 AGPDB_PRINT2((CE_WARN, 2138 "agp_alloc_kmem: ddi_dma_mem_alloc error")); 2139 2140 goto err3; 2141 } 2142 2143 ret = ddi_dma_addr_bind_handle( 2144 KMEMP(keyentry.kte_memhdl)->kmem_handle, 2145 NULL, 2146 KMEMP(keyentry.kte_memhdl)->kmem_kvaddr, 2147 length, 2148 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2149 DDI_DMA_SLEEP, 2150 NULL, 2151 &KMEMP(keyentry.kte_memhdl)->kmem_dcookie, 2152 &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num); 2153 2154 /* 2155 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more 2156 * than one cookie, we check this in the if statement. 2157 */ 2158 2159 if ((ret != DDI_DMA_MAPPED) || 2160 (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1)) { 2161 AGPDB_PRINT2((CE_WARN, 2162 "agp_alloc_kmem: can not alloc physical memory properly")); 2163 goto err2; 2164 } 2165 2166 keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) * 2167 keyentry.kte_pages, KM_SLEEP); 2168 2169 if (kmem_getpfns( 2170 KMEMP(keyentry.kte_memhdl)->kmem_handle, 2171 &KMEMP(keyentry.kte_memhdl)->kmem_dcookie, 2172 KMEMP(keyentry.kte_memhdl)->kmem_cookies_num, 2173 keyentry.kte_pfnarray)) { 2174 AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error")); 2175 goto err1; 2176 } 2177 2178 ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype, 2179 keyentry.kte_pfnarray, keyentry.kte_pages)); 2180 entryp = agp_fill_empty_keyent(softstate, &keyentry); 2181 if (!entryp) { 2182 AGPDB_PRINT2((CE_WARN, 2183 "agp_alloc_kmem: agp_fill_empty_keyent error")); 2184 2185 goto err1; 2186 } 2187 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 2188 2189 return (entryp); 2190 2191 err1: 2192 kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages); 2193 keyentry.kte_pfnarray = NULL; 2194 (void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle); 2195 KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0; 2196 err2: 2197 ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl); 2198 KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL; 2199 KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0; 2200 KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL; 2201 err3: 2202 ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle)); 2203 KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL; 2204 err4: 2205 kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t)); 2206 keyentry.kte_memhdl = NULL; 2207 return (NULL); 2208 2209 } 2210 2211 /* 2212 * agp_alloc_mem() 2213 * 2214 * Description: 2215 * This function allocate physical memory for userland applications, 2216 * in order to save kernel virtual space, we use the direct mapping 2217 * memory interface if it is available. 2218 * 2219 * Arguments: 2220 * st driver soft state pointer 2221 * length memory size 2222 * type AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical 2223 * memory type for intel i810 IGD 2224 * 2225 * Returns: 2226 * NULL Invalid memory type or can not allocate memory 2227 * Keytable entry pointer returned by agp_alloc_kmem or agp_alloc_pmem 2228 */ 2229 static keytable_ent_t * 2230 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type) 2231 { 2232 2233 /* 2234 * AGP_PHYSICAL type require contiguous physical pages exported 2235 * to X drivers, like i810 HW cursor, ARGB cursor. the number of 2236 * pages needed is usuallysmall and contiguous, 4K, 16K. So we 2237 * use DDI interface to allocated such memory. And X use xsvc 2238 * drivers to map this memory into its own address space. 2239 */ 2240 ASSERT(st); 2241 2242 switch (type) { 2243 case AGP_NORMAL: 2244 return (agp_alloc_pmem(st, length, type)); 2245 case AGP_PHYSICAL: 2246 return (agp_alloc_kmem(st, length)); 2247 default: 2248 return (NULL); 2249 } 2250 } 2251 2252 /* 2253 * free_gart_table() 2254 * 2255 * Description: 2256 * This function frees the gart table memory allocated by driver. 2257 * Must disable gart table before calling this function. 2258 * 2259 * Arguments: 2260 * softstate driver soft state pointer 2261 * 2262 */ 2263 static void 2264 free_gart_table(agpgart_softstate_t *st) 2265 { 2266 2267 if (st->gart_dma_handle == NULL) 2268 return; 2269 2270 (void) ddi_dma_unbind_handle(st->gart_dma_handle); 2271 ddi_dma_mem_free(&st->gart_dma_acc_handle); 2272 st->gart_dma_acc_handle = NULL; 2273 ddi_dma_free_handle(&st->gart_dma_handle); 2274 st->gart_dma_handle = NULL; 2275 st->gart_vbase = 0; 2276 st->gart_size = 0; 2277 } 2278 2279 /* 2280 * alloc_gart_table() 2281 * 2282 * Description: 2283 * This function allocates one physical continuous gart table. 2284 * INTEL integrated video device except i810 have their special 2285 * video bios; No need to allocate gart table for them. 2286 * 2287 * Arguments: 2288 * st driver soft state pointer 2289 * 2290 * Returns: 2291 * 0 success 2292 * -1 can not allocate gart tabl 2293 */ 2294 static int 2295 alloc_gart_table(agpgart_softstate_t *st) 2296 { 2297 int num_pages; 2298 size_t table_size; 2299 int ret = DDI_SUCCESS; 2300 ddi_dma_cookie_t cookie; 2301 uint32_t num_cookies; 2302 2303 num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize); 2304 2305 /* 2306 * Only 40-bit maximum physical memory is supported by today's 2307 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses). 2308 * No one supports 64-bit gart entries now, so the size of gart 2309 * entries defaults to 32-bit though AGP3.0 specifies the possibility 2310 * of 64-bit gart entries. 2311 */ 2312 2313 table_size = num_pages * (sizeof (uint32_t)); 2314 2315 /* 2316 * Only AMD64 can put gart table above 4G, 40 bits at maximum 2317 */ 2318 if ((st->asoft_devreg.agprd_arctype == ARC_AMD64AGP) || 2319 (st->asoft_devreg.agprd_arctype == ARC_AMD64NOAGP)) 2320 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL; 2321 else 2322 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU; 2323 /* Allocate physical continuous page frame for gart table */ 2324 if (ret = ddi_dma_alloc_handle(st->asoft_dip, 2325 &garttable_dma_attr, 2326 DDI_DMA_SLEEP, 2327 NULL, &st->gart_dma_handle)) { 2328 AGPDB_PRINT2((CE_WARN, 2329 "alloc_gart_table: ddi_dma_alloc_handle failed")); 2330 goto err3; 2331 } 2332 2333 if (ret = ddi_dma_mem_alloc(st->gart_dma_handle, 2334 table_size, 2335 &gart_dev_acc_attr, 2336 DDI_DMA_CONSISTENT, 2337 DDI_DMA_SLEEP, NULL, 2338 &st->gart_vbase, 2339 &st->gart_size, 2340 &st->gart_dma_acc_handle)) { 2341 AGPDB_PRINT2((CE_WARN, 2342 "alloc_gart_table: ddi_dma_mem_alloc failed")); 2343 goto err2; 2344 2345 } 2346 2347 ret = ddi_dma_addr_bind_handle(st->gart_dma_handle, 2348 NULL, st->gart_vbase, 2349 table_size, 2350 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2351 DDI_DMA_SLEEP, NULL, 2352 &cookie, &num_cookies); 2353 2354 st->gart_pbase = cookie.dmac_address; 2355 2356 if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) { 2357 if (num_cookies > 1) 2358 (void) ddi_dma_unbind_handle(st->gart_dma_handle); 2359 AGPDB_PRINT2((CE_WARN, 2360 "alloc_gart_table: alloc contiguous phys memory failed")); 2361 goto err1; 2362 } 2363 2364 return (0); 2365 err1: 2366 ddi_dma_mem_free(&st->gart_dma_acc_handle); 2367 st->gart_dma_acc_handle = NULL; 2368 err2: 2369 ddi_dma_free_handle(&st->gart_dma_handle); 2370 st->gart_dma_handle = NULL; 2371 err3: 2372 st->gart_pbase = 0; 2373 st->gart_size = 0; 2374 st->gart_vbase = 0; 2375 2376 return (-1); 2377 } 2378 2379 /* 2380 * agp_add_to_gart() 2381 * 2382 * Description: 2383 * This function fills the gart table entries by a given page frame number 2384 * array and set up the agp aperture page to physical memory page 2385 * translation. 2386 * Arguments: 2387 * type valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP, 2388 * ARC_AMD64AGP 2389 * pfnarray allocated physical page frame number array 2390 * pg_offset agp aperture start page to be bound 2391 * entries the number of pages to be bound 2392 * dma_hdl gart table dma memory handle 2393 * tablep gart table kernel virtual address 2394 * Returns: 2395 * -1 failed 2396 * 0 success 2397 */ 2398 static int 2399 agp_add_to_gart( 2400 agp_arc_type_t type, 2401 pfn_t *pfnarray, 2402 uint32_t pg_offset, 2403 uint32_t entries, 2404 ddi_dma_handle_t dma_hdl, 2405 uint32_t *tablep) 2406 { 2407 int items = 0; 2408 uint32_t *entryp; 2409 uint32_t itemv; 2410 2411 entryp = tablep + pg_offset; 2412 while (items < entries) { 2413 if (pfn2gartentry(type, pfnarray[items], &itemv)) 2414 break; 2415 *(entryp + items) = itemv; 2416 items++; 2417 } 2418 if (items < entries) 2419 return (-1); 2420 2421 (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t), 2422 entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV); 2423 2424 return (0); 2425 } 2426 2427 /* 2428 * agp_bind_key() 2429 * 2430 * Description: 2431 * This function will call low level gart table access functions to 2432 * set up gart table translation. Also it will do some sanity 2433 * checking on key table entry. 2434 * 2435 * Arguments: 2436 * softstate driver soft state pointer 2437 * keyent key table entity pointer to be bound 2438 * pg_offset aperture start page to be bound 2439 * Returns: 2440 * EINVAL not a valid operation 2441 */ 2442 static int 2443 agp_bind_key(agpgart_softstate_t *softstate, 2444 keytable_ent_t *keyent, uint32_t pg_offset) 2445 { 2446 uint64_t pg_end; 2447 int ret = 0; 2448 2449 ASSERT(keyent); 2450 ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS)); 2451 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 2452 2453 pg_end = pg_offset + keyent->kte_pages; 2454 2455 if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) { 2456 AGPDB_PRINT2((CE_WARN, 2457 "agp_bind_key: key=0x%x,exceed aper range", 2458 keyent->kte_key)); 2459 2460 return (EINVAL); 2461 } 2462 2463 if (agp_check_off(softstate->asoft_table, 2464 pg_offset, keyent->kte_pages)) { 2465 AGPDB_PRINT2((CE_WARN, 2466 "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped", 2467 pg_offset, keyent->kte_pages)); 2468 return (EINVAL); 2469 } 2470 2471 ASSERT(keyent->kte_pfnarray != NULL); 2472 2473 switch (softstate->asoft_devreg.agprd_arctype) { 2474 case ARC_IGD810: 2475 case ARC_IGD830: 2476 ret = lyr_i8xx_add_to_gtt(pg_offset, keyent, 2477 &softstate->asoft_devreg); 2478 if (ret) 2479 return (EIO); 2480 break; 2481 case ARC_INTELAGP: 2482 case ARC_AMD64NOAGP: 2483 case ARC_AMD64AGP: 2484 ret = agp_add_to_gart( 2485 softstate->asoft_devreg.agprd_arctype, 2486 keyent->kte_pfnarray, 2487 pg_offset, 2488 keyent->kte_pages, 2489 softstate->gart_dma_handle, 2490 (uint32_t *)softstate->gart_vbase); 2491 if (ret) 2492 return (EINVAL); 2493 /* Flush GTLB table */ 2494 lyr_flush_gart_cache(&softstate->asoft_devreg); 2495 break; 2496 default: 2497 AGPDB_PRINT2((CE_WARN, 2498 "agp_bind_key: arc type = 0x%x unsupported", 2499 softstate->asoft_devreg.agprd_arctype)); 2500 return (EINVAL); 2501 } 2502 return (0); 2503 } 2504 2505 static int 2506 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2507 { 2508 int instance; 2509 agpgart_softstate_t *softstate; 2510 2511 if (cmd != DDI_ATTACH) { 2512 AGPDB_PRINT2((CE_WARN, 2513 "agpgart_attach: only attach op supported")); 2514 return (DDI_FAILURE); 2515 } 2516 instance = ddi_get_instance(dip); 2517 2518 if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance) 2519 != DDI_SUCCESS) { 2520 AGPDB_PRINT2((CE_WARN, 2521 "agpgart_attach: soft state zalloc failed")); 2522 goto err1; 2523 2524 } 2525 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2526 mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL); 2527 softstate->asoft_dip = dip; 2528 /* 2529 * Allocate LDI identifier for agpgart driver 2530 * Agpgart driver is the kernel consumer 2531 */ 2532 if (ldi_ident_from_dip(dip, &softstate->asoft_li)) { 2533 AGPDB_PRINT2((CE_WARN, 2534 "agpgart_attach: LDI indentifier allcation failed")); 2535 goto err2; 2536 } 2537 2538 softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN; 2539 /* Install agp kstat */ 2540 if (agp_init_kstats(softstate)) { 2541 AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error")); 2542 goto err3; 2543 } 2544 /* 2545 * devfs will create /dev/agpgart 2546 * and /devices/agpgart:agpgart 2547 */ 2548 2549 if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR, 2550 AGP_INST2MINOR(instance), 2551 DDI_NT_AGP_PSEUDO, 0)) { 2552 AGPDB_PRINT2((CE_WARN, 2553 "agpgart_attach: Can not create minor node")); 2554 goto err4; 2555 } 2556 2557 softstate->asoft_table = kmem_zalloc( 2558 AGP_MAXKEYS * (sizeof (keytable_ent_t)), 2559 KM_SLEEP); 2560 2561 return (DDI_SUCCESS); 2562 err4: 2563 agp_fini_kstats(softstate); 2564 err3: 2565 ldi_ident_release(softstate->asoft_li); 2566 err2: 2567 ddi_soft_state_free(agpgart_glob_soft_handle, instance); 2568 err1: 2569 return (DDI_FAILURE); 2570 } 2571 2572 static int 2573 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2574 { 2575 int instance; 2576 agpgart_softstate_t *st; 2577 2578 instance = ddi_get_instance(dip); 2579 2580 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2581 2582 if (cmd != DDI_DETACH) 2583 return (DDI_FAILURE); 2584 2585 /* 2586 * Caller should free all the memory allocated explicitly. 2587 * We release the memory allocated by caller which is not 2588 * properly freed. mutex_enter here make sure assertion on 2589 * softstate mutex success in agp_dealloc_mem. 2590 */ 2591 mutex_enter(&st->asoft_instmutex); 2592 if (agp_del_allkeys(st)) { 2593 AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err")); 2594 AGPDB_PRINT2((CE_WARN, 2595 "you might free agp memory exported to your applications")); 2596 2597 mutex_exit(&st->asoft_instmutex); 2598 return (DDI_FAILURE); 2599 } 2600 mutex_exit(&st->asoft_instmutex); 2601 if (st->asoft_table) { 2602 kmem_free(st->asoft_table, 2603 AGP_MAXKEYS * (sizeof (keytable_ent_t))); 2604 st->asoft_table = 0; 2605 } 2606 2607 ddi_remove_minor_node(dip, AGPGART_DEVNODE); 2608 agp_fini_kstats(st); 2609 ldi_ident_release(st->asoft_li); 2610 mutex_destroy(&st->asoft_instmutex); 2611 ddi_soft_state_free(agpgart_glob_soft_handle, instance); 2612 2613 return (DDI_SUCCESS); 2614 } 2615 2616 /*ARGSUSED*/ 2617 static int 2618 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 2619 void **resultp) 2620 { 2621 agpgart_softstate_t *st; 2622 int instance, rval = DDI_FAILURE; 2623 dev_t dev; 2624 2625 switch (cmd) { 2626 case DDI_INFO_DEVT2DEVINFO: 2627 dev = (dev_t)arg; 2628 instance = AGP_DEV2INST(dev); 2629 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2630 if (st != NULL) { 2631 mutex_enter(&st->asoft_instmutex); 2632 *resultp = st->asoft_dip; 2633 mutex_exit(&st->asoft_instmutex); 2634 rval = DDI_SUCCESS; 2635 } else 2636 *resultp = NULL; 2637 2638 break; 2639 case DDI_INFO_DEVT2INSTANCE: 2640 dev = (dev_t)arg; 2641 instance = AGP_DEV2INST(dev); 2642 *resultp = (void *)(uintptr_t)instance; 2643 rval = DDI_SUCCESS; 2644 2645 break; 2646 default: 2647 break; 2648 } 2649 2650 return (rval); 2651 } 2652 2653 /* 2654 * agpgart_open() 2655 * 2656 * Description: 2657 * This function is the driver open entry point. If it is the 2658 * first time the agpgart driver is opened, the driver will 2659 * open other agp related layered drivers and set up the agpgart 2660 * table properly. 2661 * 2662 * Arguments: 2663 * dev device number pointer 2664 * openflags open flags 2665 * otyp OTYP_BLK, OTYP_CHR 2666 * credp user's credential's struct pointer 2667 * 2668 * Returns: 2669 * ENXIO operation error 2670 * EAGAIN resoure temporarily unvailable 2671 * 0 success 2672 */ 2673 /*ARGSUSED*/ 2674 static int 2675 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 2676 { 2677 int instance = AGP_DEV2INST(*dev); 2678 agpgart_softstate_t *softstate; 2679 int rc = 0; 2680 2681 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2682 if (softstate == NULL) { 2683 AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err")); 2684 return (ENXIO); 2685 } 2686 mutex_enter(&softstate->asoft_instmutex); 2687 2688 if (softstate->asoft_opened) { 2689 softstate->asoft_opened++; 2690 mutex_exit(&softstate->asoft_instmutex); 2691 return (0); 2692 } 2693 2694 /* 2695 * The driver is opened first time, so we initialize layered 2696 * driver interface and softstate member here. 2697 */ 2698 softstate->asoft_pgused = 0; 2699 if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) { 2700 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed")); 2701 mutex_exit(&softstate->asoft_instmutex); 2702 return (EAGAIN); 2703 } 2704 2705 /* Call into layered driver */ 2706 if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) { 2707 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error")); 2708 lyr_end(&softstate->asoft_devreg); 2709 mutex_exit(&softstate->asoft_instmutex); 2710 return (EIO); 2711 } 2712 2713 /* 2714 * BIOS already set up gtt table for ARC_IGD830 2715 */ 2716 if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) { 2717 softstate->asoft_opened++; 2718 2719 softstate->asoft_pgtotal = 2720 get_max_pages(softstate->asoft_info.agpki_apersize); 2721 2722 if (lyr_config_devices(&softstate->asoft_devreg)) { 2723 AGPDB_PRINT2((CE_WARN, 2724 "agpgart_open: lyr_config_devices error")); 2725 lyr_end(&softstate->asoft_devreg); 2726 mutex_exit(&softstate->asoft_instmutex); 2727 2728 return (EIO); 2729 } 2730 mutex_exit(&softstate->asoft_instmutex); 2731 return (0); 2732 } 2733 2734 rc = alloc_gart_table(softstate); 2735 2736 /* 2737 * Allocate physically contiguous pages for AGP arc or 2738 * i810 arc. If failed, divide aper_size by 2 to 2739 * reduce gart table size until 4 megabytes. This 2740 * is just a workaround for systems with very few 2741 * physically contiguous memory. 2742 */ 2743 if (rc) { 2744 while ((softstate->asoft_info.agpki_apersize >= 4) && 2745 (alloc_gart_table(softstate))) { 2746 softstate->asoft_info.agpki_apersize >>= 1; 2747 } 2748 if (softstate->asoft_info.agpki_apersize >= 4) 2749 rc = 0; 2750 } 2751 2752 if (rc != 0) { 2753 AGPDB_PRINT2((CE_WARN, 2754 "agpgart_open: alloc gart table failed")); 2755 lyr_end(&softstate->asoft_devreg); 2756 mutex_exit(&softstate->asoft_instmutex); 2757 return (EAGAIN); 2758 } 2759 2760 softstate->asoft_pgtotal = 2761 get_max_pages(softstate->asoft_info.agpki_apersize); 2762 /* 2763 * BIOS doesn't initialize GTT for i810, 2764 * So i810 GTT must be created by driver. 2765 * 2766 * Set up gart table and enable it. 2767 */ 2768 if (lyr_set_gart_addr(softstate->gart_pbase, 2769 &softstate->asoft_devreg)) { 2770 AGPDB_PRINT2((CE_WARN, 2771 "agpgart_open: set gart table addr failed")); 2772 free_gart_table(softstate); 2773 lyr_end(&softstate->asoft_devreg); 2774 mutex_exit(&softstate->asoft_instmutex); 2775 return (EIO); 2776 } 2777 if (lyr_config_devices(&softstate->asoft_devreg)) { 2778 AGPDB_PRINT2((CE_WARN, 2779 "agpgart_open: lyr_config_devices failed")); 2780 free_gart_table(softstate); 2781 lyr_end(&softstate->asoft_devreg); 2782 mutex_exit(&softstate->asoft_instmutex); 2783 return (EIO); 2784 } 2785 2786 softstate->asoft_opened++; 2787 mutex_exit(&softstate->asoft_instmutex); 2788 2789 return (0); 2790 } 2791 2792 /* 2793 * agpgart_close() 2794 * 2795 * Description: 2796 * agpgart_close will release resources allocated in the first open 2797 * and close other open layered drivers. Also it frees the memory 2798 * allocated by ioctls. 2799 * 2800 * Arguments: 2801 * dev device number 2802 * flag file status flag 2803 * otyp OTYP_BLK, OTYP_CHR 2804 * credp user's credential's struct pointer 2805 * 2806 * Returns: 2807 * ENXIO not an error, to support "deferred attach" 2808 * 0 success 2809 */ 2810 /*ARGSUSED*/ 2811 static int 2812 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp) 2813 { 2814 int instance = AGP_DEV2INST(dev); 2815 agpgart_softstate_t *softstate; 2816 2817 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2818 if (softstate == NULL) { 2819 AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err")); 2820 return (ENXIO); 2821 } 2822 2823 mutex_enter(&softstate->asoft_instmutex); 2824 ASSERT(softstate->asoft_opened); 2825 2826 2827 /* 2828 * If the last process close this device is not the controlling 2829 * process, also release the control over agpgart driver here if the 2830 * the controlling process fails to release the control before it 2831 * close the driver. 2832 */ 2833 if (softstate->asoft_acquired == 1) { 2834 AGPDB_PRINT2((CE_WARN, 2835 "agpgart_close: auto release control over driver")); 2836 release_control(softstate); 2837 } 2838 2839 if (lyr_unconfig_devices(&softstate->asoft_devreg)) { 2840 AGPDB_PRINT2((CE_WARN, 2841 "agpgart_close: lyr_unconfig_device error")); 2842 mutex_exit(&softstate->asoft_instmutex); 2843 return (EIO); 2844 } 2845 softstate->asoft_agpen = 0; 2846 2847 if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) { 2848 free_gart_table(softstate); 2849 } 2850 2851 lyr_end(&softstate->asoft_devreg); 2852 2853 /* 2854 * This statement must be positioned before agp_del_allkeys 2855 * agp_dealloc_mem indirectly called by agp_del_allkeys 2856 * will test this variable. 2857 */ 2858 softstate->asoft_opened = 0; 2859 2860 /* 2861 * Free the memory allocated by user applications which 2862 * was never deallocated. 2863 */ 2864 (void) agp_del_allkeys(softstate); 2865 2866 mutex_exit(&softstate->asoft_instmutex); 2867 2868 return (0); 2869 } 2870 2871 static int 2872 ioctl_agpgart_info(agpgart_softstate_t *softstate, void *arg, int flags) 2873 { 2874 agp_info_t infostruct; 2875 #ifdef _MULTI_DATAMODEL 2876 agp_info32_t infostruct32; 2877 #endif 2878 2879 bzero(&infostruct, sizeof (agp_info_t)); 2880 2881 #ifdef _MULTI_DATAMODEL 2882 bzero(&infostruct32, sizeof (agp_info32_t)); 2883 if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) { 2884 if (copyinfo(softstate, &infostruct)) 2885 return (EINVAL); 2886 2887 agpinfo_default_to_32(infostruct, infostruct32); 2888 if (ddi_copyout(&infostruct32, arg, 2889 sizeof (agp_info32_t), flags) != 0) 2890 return (EFAULT); 2891 2892 return (0); 2893 } 2894 #endif /* _MULTI_DATAMODEL */ 2895 if (copyinfo(softstate, &infostruct)) 2896 return (EINVAL); 2897 2898 if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) { 2899 return (EFAULT); 2900 } 2901 2902 return (0); 2903 } 2904 2905 static int 2906 ioctl_agpgart_acquire(agpgart_softstate_t *st) 2907 { 2908 if (st->asoft_acquired) { 2909 AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired")); 2910 return (EBUSY); 2911 } 2912 acquire_control(st); 2913 return (0); 2914 } 2915 2916 static int 2917 ioctl_agpgart_release(agpgart_softstate_t *st) 2918 { 2919 if (is_controlling_proc(st) < 0) { 2920 AGPDB_PRINT2((CE_WARN, 2921 "ioctl_agpgart_release: not a controlling process")); 2922 return (EPERM); 2923 } 2924 release_control(st); 2925 return (0); 2926 } 2927 2928 static int 2929 ioctl_agpgart_setup(agpgart_softstate_t *st, void *arg, int flags) 2930 { 2931 agp_setup_t data; 2932 int rc = 0; 2933 2934 if (is_controlling_proc(st) < 0) { 2935 AGPDB_PRINT2((CE_WARN, 2936 "ioctl_agpgart_setup: not a controlling process")); 2937 return (EPERM); 2938 } 2939 2940 if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) { 2941 AGPDB_PRINT2((CE_WARN, 2942 "ioctl_agpgart_setup: no true agp bridge")); 2943 return (EINVAL); 2944 } 2945 2946 if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0) 2947 return (EFAULT); 2948 2949 if (rc = agp_setup(st, data.agps_mode)) 2950 return (rc); 2951 /* Store agp mode status for kstat */ 2952 st->asoft_agpen = 1; 2953 return (0); 2954 } 2955 2956 static int 2957 ioctl_agpgart_alloc(agpgart_softstate_t *st, void *arg, int flags) 2958 { 2959 agp_allocate_t alloc_info; 2960 keytable_ent_t *entryp; 2961 size_t length; 2962 uint64_t pg_num; 2963 2964 if (is_controlling_proc(st) < 0) { 2965 AGPDB_PRINT2((CE_WARN, 2966 "ioctl_agpgart_alloc: not a controlling process")); 2967 return (EPERM); 2968 } 2969 2970 if (ddi_copyin(arg, &alloc_info, 2971 sizeof (agp_allocate_t), flags) != 0) { 2972 return (EFAULT); 2973 } 2974 pg_num = st->asoft_pgused + alloc_info.agpa_pgcount; 2975 if (pg_num > st->asoft_pgtotal) { 2976 AGPDB_PRINT2((CE_WARN, 2977 "ioctl_agpgart_alloc: exceeding the memory pages limit")); 2978 AGPDB_PRINT2((CE_WARN, 2979 "ioctl_agpgart_alloc: request %x pages failed", 2980 alloc_info.agpa_pgcount)); 2981 AGPDB_PRINT2((CE_WARN, 2982 "ioctl_agpgart_alloc: pages used %x total is %x", 2983 st->asoft_pgused, st->asoft_pgtotal)); 2984 2985 return (EINVAL); 2986 } 2987 2988 length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount); 2989 entryp = agp_alloc_mem(st, length, alloc_info.agpa_type); 2990 if (!entryp) { 2991 AGPDB_PRINT2((CE_WARN, 2992 "ioctl_agpgart_alloc: allocate 0x%lx bytes failed", 2993 length)); 2994 return (ENOMEM); 2995 } 2996 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 2997 alloc_info.agpa_key = entryp->kte_key; 2998 if (alloc_info.agpa_type == AGP_PHYSICAL) { 2999 alloc_info.agpa_physical = 3000 (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT); 3001 } 3002 /* Update the memory pagse used */ 3003 st->asoft_pgused += alloc_info.agpa_pgcount; 3004 3005 if (ddi_copyout(&alloc_info, arg, 3006 sizeof (agp_allocate_t), flags) != 0) { 3007 3008 return (EFAULT); 3009 } 3010 3011 return (0); 3012 } 3013 3014 static int 3015 ioctl_agpgart_dealloc(agpgart_softstate_t *st, intptr_t arg) 3016 { 3017 int key; 3018 keytable_ent_t *keyent; 3019 3020 if (is_controlling_proc(st) < 0) { 3021 AGPDB_PRINT2((CE_WARN, 3022 "ioctl_agpgart_dealloc: not a controlling process")); 3023 return (EPERM); 3024 } 3025 key = (int)arg; 3026 if ((key >= AGP_MAXKEYS) || key < 0) { 3027 return (EINVAL); 3028 } 3029 keyent = &st->asoft_table[key]; 3030 if (!keyent->kte_memhdl) { 3031 return (EINVAL); 3032 } 3033 3034 if (agp_dealloc_mem(st, keyent)) 3035 return (EINVAL); 3036 3037 /* Update the memory pages used */ 3038 st->asoft_pgused -= keyent->kte_pages; 3039 bzero(keyent, sizeof (keytable_ent_t)); 3040 3041 return (0); 3042 } 3043 3044 static int 3045 ioctl_agpgart_bind(agpgart_softstate_t *st, void *arg, int flags) 3046 { 3047 agp_bind_t bind_info; 3048 keytable_ent_t *keyent; 3049 int key; 3050 uint32_t pg_offset; 3051 int retval = 0; 3052 3053 if (is_controlling_proc(st) < 0) { 3054 AGPDB_PRINT2((CE_WARN, 3055 "ioctl_agpgart_bind: not a controlling process")); 3056 return (EPERM); 3057 } 3058 3059 if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) { 3060 return (EFAULT); 3061 } 3062 3063 key = bind_info.agpb_key; 3064 if ((key >= AGP_MAXKEYS) || key < 0) { 3065 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key")); 3066 return (EINVAL); 3067 } 3068 3069 if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) { 3070 if (AGP_PAGES2KB(bind_info.agpb_pgstart) < 3071 st->asoft_info.agpki_presize) { 3072 AGPDB_PRINT2((CE_WARN, 3073 "ioctl_agpgart_bind: bind to prealloc area")); 3074 return (EINVAL); 3075 } 3076 } 3077 3078 pg_offset = bind_info.agpb_pgstart; 3079 keyent = &st->asoft_table[key]; 3080 if (!keyent->kte_memhdl) { 3081 AGPDB_PRINT2((CE_WARN, 3082 "ioctl_agpgart_bind: Key = 0x%x can't get keyenty", 3083 key)); 3084 return (EINVAL); 3085 } 3086 3087 if (keyent->kte_bound != 0) { 3088 AGPDB_PRINT2((CE_WARN, 3089 "ioctl_agpgart_bind: Key = 0x%x already bound", 3090 key)); 3091 return (EINVAL); 3092 } 3093 retval = agp_bind_key(st, keyent, pg_offset); 3094 3095 if (retval == 0) { 3096 keyent->kte_pgoff = pg_offset; 3097 keyent->kte_bound = 1; 3098 } 3099 3100 return (retval); 3101 } 3102 3103 static int 3104 ioctl_agpgart_unbind(agpgart_softstate_t *st, void *arg, int flags) 3105 { 3106 int key, retval = 0; 3107 agp_unbind_t unbindinfo; 3108 keytable_ent_t *keyent; 3109 3110 if (is_controlling_proc(st) < 0) { 3111 AGPDB_PRINT2((CE_WARN, 3112 "ioctl_agpgart_bind: not a controlling process")); 3113 return (EPERM); 3114 } 3115 3116 if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) { 3117 return (EFAULT); 3118 } 3119 key = unbindinfo.agpu_key; 3120 if ((key >= AGP_MAXKEYS) || key < 0) { 3121 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key")); 3122 return (EINVAL); 3123 } 3124 keyent = &st->asoft_table[key]; 3125 if (!keyent->kte_bound) { 3126 return (EINVAL); 3127 } 3128 3129 if ((retval = agp_unbind_key(st, keyent)) != 0) 3130 return (retval); 3131 3132 return (0); 3133 } 3134 3135 /*ARGSUSED*/ 3136 static int 3137 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags, 3138 cred_t *credp, int *rvalp) 3139 { 3140 int instance; 3141 int retval = 0; 3142 void *arg = (void*)intarg; 3143 3144 agpgart_softstate_t *softstate; 3145 3146 instance = AGP_DEV2INST(dev); 3147 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 3148 if (softstate == NULL) { 3149 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err")); 3150 return (ENXIO); 3151 } 3152 3153 if ((cmd != AGPIOC_INFO) && secpolicy_gart_access(credp)) { 3154 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: permission denied")); 3155 return (EPERM); 3156 } 3157 3158 mutex_enter(&softstate->asoft_instmutex); 3159 3160 switch (cmd) { 3161 case AGPIOC_INFO: 3162 retval = ioctl_agpgart_info(softstate, arg, flags); 3163 break; 3164 case AGPIOC_ACQUIRE: 3165 retval = ioctl_agpgart_acquire(softstate); 3166 break; 3167 case AGPIOC_RELEASE: 3168 retval = ioctl_agpgart_release(softstate); 3169 break; 3170 case AGPIOC_SETUP: 3171 retval = ioctl_agpgart_setup(softstate, arg, flags); 3172 break; 3173 case AGPIOC_ALLOCATE: 3174 retval = ioctl_agpgart_alloc(softstate, arg, flags); 3175 break; 3176 case AGPIOC_DEALLOCATE: 3177 retval = ioctl_agpgart_dealloc(softstate, intarg); 3178 break; 3179 case AGPIOC_BIND: 3180 retval = ioctl_agpgart_bind(softstate, arg, flags); 3181 break; 3182 case AGPIOC_UNBIND: 3183 retval = ioctl_agpgart_unbind(softstate, arg, flags); 3184 break; 3185 default: 3186 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument")); 3187 retval = ENXIO; 3188 break; 3189 } 3190 3191 mutex_exit(&softstate->asoft_instmutex); 3192 return (retval); 3193 } 3194 3195 static int 3196 agpgart_segmap(dev_t dev, off_t off, struct as *asp, 3197 caddr_t *addrp, off_t len, unsigned int prot, 3198 unsigned int maxprot, unsigned int flags, cred_t *credp) 3199 { 3200 3201 struct agpgart_softstate *softstate; 3202 int instance; 3203 int rc = 0; 3204 3205 instance = AGP_DEV2INST(dev); 3206 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 3207 if (softstate == NULL) { 3208 AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err")); 3209 return (ENXIO); 3210 } 3211 if (!AGP_ALIGNED(len)) 3212 return (EINVAL); 3213 3214 mutex_enter(&softstate->asoft_instmutex); 3215 3216 /* 3217 * Process must have gart map privilege or gart access privilege 3218 * to map agp memory. 3219 */ 3220 if (secpolicy_gart_map(credp)) { 3221 mutex_exit(&softstate->asoft_instmutex); 3222 AGPDB_PRINT2((CE_WARN, "agpgart_segmap: permission denied")); 3223 return (EPERM); 3224 } 3225 3226 rc = devmap_setup(dev, (offset_t)off, asp, addrp, 3227 (size_t)len, prot, maxprot, flags, credp); 3228 3229 mutex_exit(&softstate->asoft_instmutex); 3230 return (rc); 3231 } 3232 3233 /*ARGSUSED*/ 3234 static int 3235 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len, 3236 size_t *mappedlen, uint_t model) 3237 { 3238 struct agpgart_softstate *softstate; 3239 int instance, status; 3240 struct keytable_ent *mementry; 3241 offset_t local_offset; 3242 3243 instance = AGP_DEV2INST(dev); 3244 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 3245 if (softstate == NULL) { 3246 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err")); 3247 return (ENXIO); 3248 } 3249 3250 3251 if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) { 3252 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large")); 3253 return (EINVAL); 3254 } 3255 3256 /* 3257 * Can not find any memory now, so fail. 3258 */ 3259 3260 mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset)); 3261 3262 if (mementry == NULL) { 3263 AGPDB_PRINT2((CE_WARN, 3264 "agpgart_devmap: can not find the proper keyent")); 3265 return (EINVAL); 3266 } 3267 3268 local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff); 3269 3270 if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) { 3271 len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset; 3272 } 3273 3274 switch (mementry->kte_type) { 3275 case AGP_NORMAL: 3276 status = devmap_pmem_setup(cookie, softstate->asoft_dip, 3277 &agp_devmap_cb, 3278 PMEMP(mementry->kte_memhdl)->pmem_cookie, local_offset, 3279 len, PROT_ALL, (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE), 3280 &mem_dev_acc_attr); 3281 break; 3282 default: 3283 AGPDB_PRINT2((CE_WARN, 3284 "agpgart_devmap: not a valid memory type")); 3285 return (EINVAL); 3286 } 3287 3288 3289 if (status == 0) { 3290 *mappedlen = len; 3291 } else { 3292 *mappedlen = 0; 3293 AGPDB_PRINT2((CE_WARN, 3294 "agpgart_devmap: devmap interface failed")); 3295 return (EINVAL); 3296 } 3297 3298 return (0); 3299 } 3300 3301 static struct cb_ops agpgart_cb_ops = { 3302 agpgart_open, /* open() */ 3303 agpgart_close, /* close() */ 3304 nodev, /* strategy() */ 3305 nodev, /* print routine */ 3306 nodev, /* no dump routine */ 3307 nodev, /* read() */ 3308 nodev, /* write() */ 3309 agpgart_ioctl, /* agpgart_ioctl */ 3310 agpgart_devmap, /* devmap routine */ 3311 nodev, /* no longer use mmap routine */ 3312 agpgart_segmap, /* system segmap routine */ 3313 nochpoll, /* no chpoll routine */ 3314 ddi_prop_op, /* system prop operations */ 3315 0, /* not a STREAMS driver */ 3316 D_DEVMAP | D_MP, /* safe for multi-thread/multi-processor */ 3317 CB_REV, /* cb_ops version? */ 3318 nodev, /* cb_aread() */ 3319 nodev, /* cb_awrite() */ 3320 }; 3321 3322 static struct dev_ops agpgart_ops = { 3323 DEVO_REV, /* devo_rev */ 3324 0, /* devo_refcnt */ 3325 agpgart_getinfo, /* devo_getinfo */ 3326 nulldev, /* devo_identify */ 3327 nulldev, /* devo_probe */ 3328 agpgart_attach, /* devo_attach */ 3329 agpgart_detach, /* devo_detach */ 3330 nodev, /* devo_reset */ 3331 &agpgart_cb_ops, /* devo_cb_ops */ 3332 (struct bus_ops *)0, /* devo_bus_ops */ 3333 NULL, /* devo_power */ 3334 }; 3335 3336 static struct modldrv modldrv = { 3337 &mod_driverops, 3338 "AGP driver v%I%", 3339 &agpgart_ops, 3340 }; 3341 3342 static struct modlinkage modlinkage = { 3343 MODREV_1, /* MODREV_1 is indicated by manual */ 3344 {&modldrv, NULL, NULL, NULL} 3345 }; 3346 3347 static void *agpgart_glob_soft_handle; 3348 3349 int 3350 _init(void) 3351 { 3352 int ret = DDI_SUCCESS; 3353 3354 ret = ddi_soft_state_init(&agpgart_glob_soft_handle, 3355 sizeof (agpgart_softstate_t), 3356 AGPGART_MAX_INSTANCES); 3357 3358 if (ret != 0) { 3359 AGPDB_PRINT2((CE_WARN, 3360 "_init: soft state init error code=0x%x", ret)); 3361 return (ret); 3362 } 3363 3364 if ((ret = mod_install(&modlinkage)) != 0) { 3365 AGPDB_PRINT2((CE_WARN, 3366 "_init: mod install error code=0x%x", ret)); 3367 ddi_soft_state_fini(&agpgart_glob_soft_handle); 3368 return (ret); 3369 } 3370 3371 return (DDI_SUCCESS); 3372 } 3373 3374 int 3375 _info(struct modinfo *modinfop) 3376 { 3377 return (mod_info(&modlinkage, modinfop)); 3378 } 3379 3380 int 3381 _fini(void) 3382 { 3383 int ret; 3384 3385 if ((ret = mod_remove(&modlinkage)) == 0) { 3386 ddi_soft_state_fini(&agpgart_glob_soft_handle); 3387 } 3388 3389 return (ret); 3390 } 3391