1 /* 2 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 /* 6 * Portions Philip Brown phil@bolthole.com Dec 2001 7 */ 8 9 10 /* 11 * agpgart driver 12 * 13 * This driver is primary targeted at providing memory support for INTEL 14 * AGP device, INTEL memory less video card, and AMD64 cpu GART devices. 15 * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP, 16 * ARC_AMD64AGP to agpgart driver. However, the memory 17 * interfaces are the same for these architectures. The difference is how to 18 * manage the hardware GART table for them. 19 * 20 * For large memory allocation, this driver use direct mapping to userland 21 * application interface to save kernel virtual memory . 22 */ 23 24 #include <sys/types.h> 25 #include <sys/pci.h> 26 #include <sys/systm.h> 27 #include <sys/conf.h> 28 #include <sys/file.h> 29 #include <sys/kstat.h> 30 #include <sys/stat.h> 31 #include <sys/modctl.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/sunldi.h> 35 #include <sys/policy.h> 36 #include <sys/ddidevmap.h> 37 #include <vm/seg_dev.h> 38 #include <sys/pmem.h> 39 #include <sys/agpgart.h> 40 #include <sys/agp/agpdefs.h> 41 #include <sys/agp/agpgart_impl.h> 42 #include <sys/agp/agpamd64gart_io.h> 43 #include <sys/agp/agpmaster_io.h> 44 #include <sys/agp/agptarget_io.h> 45 46 /* Dynamic debug support */ 47 int agp_debug_var = 0; 48 #define AGPDB_PRINT1(fmt) if (agp_debug_var == 1) cmn_err fmt 49 #define AGPDB_PRINT2(fmt) if (agp_debug_var >= 1) cmn_err fmt 50 51 /* Driver global softstate handle */ 52 static void *agpgart_glob_soft_handle; 53 54 #define MAX_INSTNUM 16 55 56 #define AGP_DEV2INST(devt) (getminor((devt)) >> 4) 57 #define AGP_INST2MINOR(instance) ((instance) << 4) 58 #define IS_INTEL_830(type) ((type) == ARC_IGD830) 59 #define IS_TRUE_AGP(type) (((type) == ARC_INTELAGP) || \ 60 ((type) == ARC_AMD64AGP)) 61 62 #define agpinfo_default_to_32(v, v32) \ 63 { \ 64 (v32).agpi32_version = (v).agpi_version; \ 65 (v32).agpi32_devid = (v).agpi_devid; \ 66 (v32).agpi32_mode = (v).agpi_mode; \ 67 (v32).agpi32_aperbase = (uint32_t)(v).agpi_aperbase; \ 68 (v32).agpi32_apersize = (uint32_t)(v).agpi_apersize; \ 69 (v32).agpi32_pgtotal = (v).agpi_pgtotal; \ 70 (v32).agpi32_pgsystem = (v).agpi_pgsystem; \ 71 (v32).agpi32_pgused = (v).agpi_pgused; \ 72 } 73 74 static ddi_dma_attr_t agpgart_dma_attr = { 75 DMA_ATTR_V0, 76 0U, /* dma_attr_addr_lo */ 77 0xffffffffU, /* dma_attr_addr_hi */ 78 0xffffffffU, /* dma_attr_count_max */ 79 (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */ 80 1, /* dma_attr_burstsizes */ 81 1, /* dma_attr_minxfer */ 82 0xffffffffU, /* dma_attr_maxxfer */ 83 0xffffffffU, /* dma_attr_seg */ 84 1, /* dma_attr_sgllen, variable */ 85 4, /* dma_attr_granular */ 86 0 /* dma_attr_flags */ 87 }; 88 89 /* 90 * AMD64 supports gart table above 4G. See alloc_gart_table. 91 */ 92 static ddi_dma_attr_t garttable_dma_attr = { 93 DMA_ATTR_V0, 94 0U, /* dma_attr_addr_lo */ 95 0xffffffffU, /* dma_attr_addr_hi */ 96 0xffffffffU, /* dma_attr_count_max */ 97 (uint64_t)AGP_PAGE_SIZE, /* dma_attr_align */ 98 1, /* dma_attr_burstsizes */ 99 1, /* dma_attr_minxfer */ 100 0xffffffffU, /* dma_attr_maxxfer */ 101 0xffffffffU, /* dma_attr_seg */ 102 1, /* dma_attr_sgllen, variable */ 103 4, /* dma_attr_granular */ 104 0 /* dma_attr_flags */ 105 }; 106 107 /* 108 * AGPGART table need a physical contiguous memory. To assure that 109 * each access to gart table is strongly ordered and uncachable, 110 * we use DDI_STRICTORDER_ACC. 111 */ 112 static ddi_device_acc_attr_t gart_dev_acc_attr = { 113 DDI_DEVICE_ATTR_V0, 114 DDI_NEVERSWAP_ACC, 115 DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */ 116 }; 117 118 /* 119 * AGP memory is usually used as texture memory or for a framebuffer, so we 120 * can set the memory attribute to write combining. Video drivers will 121 * determine the frame buffer attributes, for example the memory is write 122 * combinging or non-cachable. However, the interface between Xorg and agpgart 123 * driver to support attribute selcetion doesn't exist yet. So we set agp memory 124 * to non-cachable by default now. This attribute might be overridden 125 * by MTTR in X86. 126 */ 127 static ddi_device_acc_attr_t mem_dev_acc_attr = { 128 DDI_DEVICE_ATTR_V0, 129 DDI_NEVERSWAP_ACC, 130 DDI_STRICTORDER_ACC /* Can be DDI_MERGING_OK_ACC */ 131 }; 132 133 static keytable_ent_t * 134 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset); 135 static void 136 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts); 137 138 139 static void 140 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate, 141 offset_t off, size_t len, devmap_cookie_t new_handle1, 142 void **new_devprivate1, devmap_cookie_t new_handle2, 143 void **new_devprivate2) 144 { 145 146 struct keytable_ent *mementry; 147 agpgart_softstate_t *softstate; 148 agpgart_ctx_t *ctxp, *newctxp1, *newctxp2; 149 150 ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off)); 151 ASSERT(devprivate); 152 ASSERT(handle); 153 154 ctxp = (agpgart_ctx_t *)devprivate; 155 softstate = ctxp->actx_sc; 156 ASSERT(softstate); 157 158 if (new_handle1 != NULL) { 159 newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 160 newctxp1->actx_sc = softstate; 161 newctxp1->actx_off = ctxp->actx_off; 162 *new_devprivate1 = newctxp1; 163 } 164 165 if (new_handle2 != NULL) { 166 newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 167 newctxp2->actx_sc = softstate; 168 newctxp2->actx_off = off + len; 169 *new_devprivate2 = newctxp2; 170 } 171 172 mutex_enter(&softstate->asoft_instmutex); 173 if ((new_handle1 == NULL) && (new_handle2 == NULL)) { 174 mementry = 175 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off)); 176 ASSERT(mementry); 177 mementry->kte_refcnt--; 178 } else if ((new_handle1 != NULL) && (new_handle2 != NULL)) { 179 mementry = 180 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off)); 181 ASSERT(mementry); 182 mementry->kte_refcnt++; 183 } 184 ASSERT(mementry->kte_refcnt >= 0); 185 mutex_exit(&softstate->asoft_instmutex); 186 kmem_free(ctxp, sizeof (struct agpgart_ctx)); 187 } 188 189 /*ARGSUSED*/ 190 static int 191 agp_devmap_map(devmap_cookie_t handle, dev_t dev, 192 uint_t flags, offset_t offset, size_t len, void **new_devprivate) 193 { 194 agpgart_softstate_t *softstate; 195 int instance; 196 struct keytable_ent *mementry; 197 agpgart_ctx_t *newctxp; 198 199 ASSERT(handle); 200 instance = AGP_DEV2INST(dev); 201 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 202 if (softstate == NULL) { 203 AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err")); 204 return (ENXIO); 205 } 206 207 ASSERT(softstate); 208 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 209 ASSERT(len); 210 ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len)); 211 212 mementry = 213 agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset)); 214 ASSERT(mementry); 215 mementry->kte_refcnt++; 216 ASSERT(mementry->kte_refcnt >= 0); 217 newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 218 newctxp->actx_off = offset; 219 newctxp->actx_sc = softstate; 220 *new_devprivate = newctxp; 221 222 return (0); 223 } 224 225 /*ARGSUSED*/ 226 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate, 227 devmap_cookie_t new_handle, void **new_devprivate) 228 { 229 struct keytable_ent *mementry; 230 agpgart_ctx_t *newctxp, *ctxp; 231 agpgart_softstate_t *softstate; 232 233 ASSERT(devprivate); 234 ASSERT(handle && new_handle); 235 236 ctxp = (agpgart_ctx_t *)devprivate; 237 ASSERT(AGP_ALIGNED(ctxp->actx_off)); 238 239 newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP); 240 newctxp->actx_off = ctxp->actx_off; 241 newctxp->actx_sc = ctxp->actx_sc; 242 softstate = (agpgart_softstate_t *)newctxp->actx_sc; 243 244 mutex_enter(&softstate->asoft_instmutex); 245 mementry = agp_find_bound_keyent(softstate, 246 AGP_BYTES2PAGES(newctxp->actx_off)); 247 mementry->kte_refcnt++; 248 ASSERT(mementry->kte_refcnt >= 0); 249 mutex_exit(&softstate->asoft_instmutex); 250 *new_devprivate = newctxp; 251 252 return (0); 253 } 254 255 struct devmap_callback_ctl agp_devmap_cb = { 256 DEVMAP_OPS_REV, /* rev */ 257 agp_devmap_map, /* map */ 258 NULL, /* access */ 259 agp_devmap_dup, /* dup */ 260 agp_devmap_unmap, /* unmap */ 261 }; 262 263 /* 264 * agp_master_regis_byname() 265 * 266 * Description: 267 * Open the AGP master device node by device path name and 268 * register the device handle for later operations. 269 * We check all possible driver instance from 0 270 * to MAX_INSTNUM because the master device could be 271 * at any instance number. Only one AGP master is supported. 272 * 273 * Arguments: 274 * master_hdlp AGP master device LDI handle pointer 275 * agpgart_l AGPGART driver LDI identifier 276 * 277 * Returns: 278 * -1 failed 279 * 0 success 280 */ 281 static int 282 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li) 283 { 284 int i; 285 char buf[MAXPATHLEN]; 286 287 ASSERT(master_hdlp); 288 ASSERT(agpgart_li); 289 290 /* 291 * Search all possible instance numbers for the agp master device. 292 * Only one master device is supported now, so the search ends 293 * when one master device is found. 294 */ 295 for (i = 0; i < MAX_INSTNUM; i++) { 296 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i); 297 if ((ldi_open_by_name(buf, 0, kcred, 298 master_hdlp, agpgart_li))) 299 continue; 300 AGPDB_PRINT1((CE_NOTE, 301 "master device found: instance number=%d", i)); 302 break; 303 304 } 305 306 /* AGP master device not found */ 307 if (i == MAX_INSTNUM) 308 return (-1); 309 310 return (0); 311 } 312 313 /* 314 * agp_target_regis_byname() 315 * 316 * Description: 317 * This function opens agp bridge device node by 318 * device path name and registers the device handle 319 * for later operations. 320 * We check driver instance from 0 to MAX_INSTNUM 321 * because the master device could be at any instance 322 * number. Only one agp target is supported. 323 * 324 * 325 * Arguments: 326 * target_hdlp AGP target device LDI handle pointer 327 * agpgart_l AGPGART driver LDI identifier 328 * 329 * Returns: 330 * -1 failed 331 * 0 success 332 */ 333 static int 334 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li) 335 { 336 int i; 337 char buf[MAXPATHLEN]; 338 339 ASSERT(target_hdlp); 340 ASSERT(agpgart_li); 341 342 for (i = 0; i < MAX_INSTNUM; i++) { 343 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i); 344 if ((ldi_open_by_name(buf, 0, kcred, 345 target_hdlp, agpgart_li))) 346 continue; 347 348 AGPDB_PRINT1((CE_NOTE, 349 "bridge device found: instance number=%d", i)); 350 break; 351 352 } 353 354 /* AGP bridge device not found */ 355 if (i == MAX_INSTNUM) { 356 AGPDB_PRINT2((CE_WARN, "bridge device not found")); 357 return (-1); 358 } 359 360 return (0); 361 } 362 363 /* 364 * amd64_gart_regis_byname() 365 * 366 * Description: 367 * Open all amd64 gart device nodes by deice path name and 368 * register the device handles for later operations. Each cpu 369 * has its own amd64 gart device. 370 * 371 * Arguments: 372 * cpu_garts cpu garts device list header 373 * agpgart_l AGPGART driver LDI identifier 374 * 375 * Returns: 376 * -1 failed 377 * 0 success 378 */ 379 static int 380 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li) 381 { 382 amd64_gart_dev_list_t *gart_list; 383 int i; 384 char buf[MAXPATHLEN]; 385 ldi_handle_t gart_hdl; 386 int ret; 387 388 ASSERT(cpu_garts); 389 ASSERT(agpgart_li); 390 391 /* 392 * Search all possible instance numbers for the gart devices. 393 * There can be multiple on-cpu gart devices for Opteron server. 394 */ 395 for (i = 0; i < MAX_INSTNUM; i++) { 396 (void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i); 397 ret = ldi_open_by_name(buf, 0, kcred, 398 &gart_hdl, agpgart_li); 399 400 if (ret == ENODEV) 401 continue; 402 else if (ret != 0) { /* There was an error opening the device */ 403 amd64_gart_unregister(cpu_garts); 404 return (ret); 405 } 406 407 AGPDB_PRINT1((CE_NOTE, 408 "amd64 gart device found: instance number=%d", i)); 409 410 gart_list = (amd64_gart_dev_list_t *) 411 kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP); 412 413 /* Add new item to the head of the gart device list */ 414 gart_list->gart_devhdl = gart_hdl; 415 gart_list->next = cpu_garts->gart_dev_list_head; 416 cpu_garts->gart_dev_list_head = gart_list; 417 cpu_garts->gart_device_num++; 418 } 419 420 if (cpu_garts->gart_device_num == 0) 421 return (ENODEV); 422 return (0); 423 } 424 425 /* 426 * Unregister agp master device handle 427 */ 428 static void 429 agp_master_unregister(ldi_handle_t *master_hdlp) 430 { 431 ASSERT(master_hdlp); 432 433 if (master_hdlp) { 434 (void) ldi_close(*master_hdlp, 0, kcred); 435 *master_hdlp = NULL; 436 } 437 } 438 439 /* 440 * Unregister agp bridge device handle 441 */ 442 static void 443 agp_target_unregister(ldi_handle_t *target_hdlp) 444 { 445 if (target_hdlp) { 446 (void) ldi_close(*target_hdlp, 0, kcred); 447 *target_hdlp = NULL; 448 } 449 } 450 451 /* 452 * Unregister all amd64 gart device handles 453 */ 454 static void 455 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts) 456 { 457 amd64_gart_dev_list_t *gart_list; 458 amd64_gart_dev_list_t *next; 459 460 ASSERT(cpu_garts); 461 462 for (gart_list = cpu_garts->gart_dev_list_head; 463 gart_list; gart_list = next) { 464 465 ASSERT(gart_list->gart_devhdl); 466 (void) ldi_close(gart_list->gart_devhdl, 0, kcred); 467 next = gart_list->next; 468 /* Free allocated memory */ 469 kmem_free(gart_list, sizeof (amd64_gart_dev_list_t)); 470 } 471 cpu_garts->gart_dev_list_head = NULL; 472 cpu_garts->gart_device_num = 0; 473 } 474 475 /* 476 * lyr_detect_master_type() 477 * 478 * Description: 479 * This function gets agp master type by querying agp master device. 480 * 481 * Arguments: 482 * master_hdlp agp master device ldi handle pointer 483 * 484 * Returns: 485 * -1 unsupported device 486 * DEVICE_IS_I810 i810 series 487 * DEVICE_IS_I810 i830 series 488 * DEVICE_IS_AGP true agp master 489 */ 490 static int 491 lyr_detect_master_type(ldi_handle_t *master_hdlp) 492 { 493 int vtype; 494 int err; 495 496 ASSERT(master_hdlp); 497 498 /* ldi_ioctl(agpmaster) */ 499 err = ldi_ioctl(*master_hdlp, DEVICE_DETECT, 500 (intptr_t)&vtype, FKIOCTL, kcred, 0); 501 if (err) /* Unsupported graphics device */ 502 return (-1); 503 return (vtype); 504 } 505 506 /* 507 * devtect_target_type() 508 * 509 * Description: 510 * This function gets the host bridge chipset type by querying the agp 511 * target device. 512 * 513 * Arguments: 514 * target_hdlp agp target device LDI handle pointer 515 * 516 * Returns: 517 * CHIP_IS_INTEL Intel agp chipsets 518 * CHIP_IS_AMD AMD agp chipset 519 * -1 unsupported chipset 520 */ 521 static int 522 lyr_detect_target_type(ldi_handle_t *target_hdlp) 523 { 524 int btype; 525 int err; 526 527 ASSERT(target_hdlp); 528 529 err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype, 530 FKIOCTL, kcred, 0); 531 if (err) /* Unsupported bridge device */ 532 return (-1); 533 return (btype); 534 } 535 536 /* 537 * lyr_init() 538 * 539 * Description: 540 * This function detects the graphics system architecture and 541 * registers all relative device handles in a global structure 542 * "agp_regdev". Then it stores the system arc type in driver 543 * soft state. 544 * 545 * Arguments: 546 * agp_regdev AGP devices registration struct pointer 547 * agpgart_l AGPGART driver LDI identifier 548 * 549 * Returns: 550 * 0 System arc supported and agp devices registration successed. 551 * -1 System arc not supported or device registration failed. 552 */ 553 int 554 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li) 555 { 556 ldi_handle_t *master_hdlp; 557 ldi_handle_t *target_hdlp; 558 amd64_garts_dev_t *garts_dev; 559 int card_type, chip_type; 560 int ret; 561 562 ASSERT(agp_regdev); 563 564 bzero(agp_regdev, sizeof (agp_registered_dev_t)); 565 agp_regdev->agprd_arctype = ARC_UNKNOWN; 566 /* 567 * Register agp devices, assuming all instances attached, and 568 * detect which agp architucture this server belongs to. This 569 * must be done before the agpgart driver starts to use layered 570 * driver interfaces. 571 */ 572 master_hdlp = &agp_regdev->agprd_masterhdl; 573 target_hdlp = &agp_regdev->agprd_targethdl; 574 garts_dev = &agp_regdev->agprd_cpugarts; 575 576 /* Check whether the system is amd64 arc */ 577 if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) { 578 /* No amd64 gart devices */ 579 AGPDB_PRINT1((CE_NOTE, 580 "lyr_init: this is not an amd64 system")); 581 if (agp_master_regis_byname(master_hdlp, agpgart_li)) { 582 AGPDB_PRINT2((CE_WARN, 583 "lyr_init: register master device unsuccessful")); 584 goto err1; 585 } 586 if (agp_target_regis_byname(target_hdlp, agpgart_li)) { 587 AGPDB_PRINT2((CE_WARN, 588 "lyr_init: register target device unsuccessful")); 589 goto err2; 590 } 591 card_type = lyr_detect_master_type(master_hdlp); 592 /* 593 * Detect system arc by master device. If it is a intel 594 * integrated device, finish the detection successfully. 595 */ 596 switch (card_type) { 597 case DEVICE_IS_I810: /* I810 likewise graphics */ 598 AGPDB_PRINT1((CE_NOTE, 599 "lyr_init: the system is Intel 810 arch")); 600 agp_regdev->agprd_arctype = ARC_IGD810; 601 return (0); 602 case DEVICE_IS_I830: /* I830 likewise graphics */ 603 AGPDB_PRINT1((CE_NOTE, 604 "lyr_init: the system is Intel 830 arch")); 605 agp_regdev->agprd_arctype = ARC_IGD830; 606 return (0); 607 case DEVICE_IS_AGP: /* AGP graphics */ 608 break; 609 default: /* Non IGD/AGP graphics */ 610 AGPDB_PRINT2((CE_WARN, 611 "lyr_init: non-supported master device")); 612 goto err3; 613 } 614 615 chip_type = lyr_detect_target_type(target_hdlp); 616 617 /* Continue to detect AGP arc by target device */ 618 switch (chip_type) { 619 case CHIP_IS_INTEL: /* Intel chipset */ 620 AGPDB_PRINT1((CE_NOTE, 621 "lyr_init: Intel AGP arch detected")); 622 agp_regdev->agprd_arctype = ARC_INTELAGP; 623 return (0); 624 case CHIP_IS_AMD: /* AMD chipset */ 625 AGPDB_PRINT2((CE_WARN, 626 "lyr_init: no cpu gart, but have AMD64 chipsets")); 627 goto err3; 628 default: /* Non supported chipset */ 629 AGPDB_PRINT2((CE_WARN, 630 "lyr_init: detection can not continue")); 631 goto err3; 632 } 633 634 } 635 636 if (ret) 637 return (-1); /* Errors in open amd64 cpu gart devices */ 638 639 /* 640 * AMD64 cpu gart device exsits, continue detection 641 */ 642 if (agp_master_regis_byname(master_hdlp, agpgart_li)) { 643 AGPDB_PRINT1((CE_NOTE, "lyr_init: no AGP master in amd64")); 644 goto err1; 645 } 646 647 if (agp_target_regis_byname(target_hdlp, agpgart_li)) { 648 AGPDB_PRINT1((CE_NOTE, 649 "lyr_init: no AGP bridge")); 650 goto err2; 651 } 652 653 AGPDB_PRINT1((CE_NOTE, 654 "lyr_init: the system is AMD64 AGP architecture")); 655 656 agp_regdev->agprd_arctype = ARC_AMD64AGP; 657 658 return (0); /* Finished successfully */ 659 660 err3: 661 agp_target_unregister(&agp_regdev->agprd_targethdl); 662 err2: 663 agp_master_unregister(&agp_regdev->agprd_masterhdl); 664 err1: 665 /* AMD64 CPU gart registered ? */ 666 if (ret == 0) { 667 amd64_gart_unregister(garts_dev); 668 } 669 agp_regdev->agprd_arctype = ARC_UNKNOWN; 670 return (-1); 671 } 672 673 void 674 lyr_end(agp_registered_dev_t *agp_regdev) 675 { 676 ASSERT(agp_regdev); 677 678 switch (agp_regdev->agprd_arctype) { 679 case ARC_IGD810: 680 case ARC_IGD830: 681 case ARC_INTELAGP: 682 agp_master_unregister(&agp_regdev->agprd_masterhdl); 683 agp_target_unregister(&agp_regdev->agprd_targethdl); 684 685 return; 686 case ARC_AMD64AGP: 687 agp_master_unregister(&agp_regdev->agprd_masterhdl); 688 agp_target_unregister(&agp_regdev->agprd_targethdl); 689 amd64_gart_unregister(&agp_regdev->agprd_cpugarts); 690 691 return; 692 default: 693 ASSERT(0); 694 return; 695 } 696 } 697 698 int 699 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev) 700 { 701 ldi_handle_t hdl; 702 igd_info_t value1; 703 i_agp_info_t value2; 704 size_t prealloc_size; 705 int err; 706 707 ASSERT(info); 708 ASSERT(agp_regdev); 709 710 switch (agp_regdev->agprd_arctype) { 711 case ARC_IGD810: 712 hdl = agp_regdev->agprd_masterhdl; 713 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1, 714 FKIOCTL, kcred, 0); 715 if (err) 716 return (-1); 717 info->agpki_mdevid = value1.igd_devid; 718 info->agpki_aperbase = value1.igd_aperbase; 719 info->agpki_apersize = (uint32_t)value1.igd_apersize; 720 721 hdl = agp_regdev->agprd_targethdl; 722 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE, 723 (intptr_t)&prealloc_size, FKIOCTL, kcred, 0); 724 if (err) 725 return (-1); 726 info->agpki_presize = prealloc_size; 727 728 break; 729 730 case ARC_IGD830: 731 hdl = agp_regdev->agprd_masterhdl; 732 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1, 733 FKIOCTL, kcred, 0); 734 if (err) 735 return (-1); 736 info->agpki_mdevid = value1.igd_devid; 737 info->agpki_aperbase = value1.igd_aperbase; 738 info->agpki_apersize = (uint32_t)value1.igd_apersize; 739 740 hdl = agp_regdev->agprd_targethdl; 741 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE, 742 (intptr_t)&prealloc_size, FKIOCTL, kcred, 0); 743 if (err) 744 return (-1); 745 746 /* 747 * Assume all units are kilobytes unless explicitly 748 * stated below: 749 * preallocated GTT memory = preallocated memory - GTT size 750 * - scratch page size 751 * 752 * scratch page size = 4 753 * GTT size (KB) = aperture size (MB) 754 * this algorithm came from Xorg source code 755 */ 756 if (prealloc_size > (info->agpki_apersize + 4)) 757 prealloc_size = 758 prealloc_size - info->agpki_apersize - 4; 759 else { 760 AGPDB_PRINT2((CE_WARN, "lyr_get_info: " 761 "pre-allocated memory too small, setting to zero")); 762 prealloc_size = 0; 763 } 764 info->agpki_presize = prealloc_size; 765 AGPDB_PRINT2((CE_NOTE, 766 "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB", 767 prealloc_size, info->agpki_apersize)); 768 break; 769 case ARC_INTELAGP: 770 case ARC_AMD64AGP: 771 /* AGP devices */ 772 hdl = agp_regdev->agprd_masterhdl; 773 err = ldi_ioctl(hdl, AGP_MASTER_GETINFO, 774 (intptr_t)&value2, FKIOCTL, kcred, 0); 775 if (err) 776 return (-1); 777 info->agpki_mdevid = value2.iagp_devid; 778 info->agpki_mver = value2.iagp_ver; 779 info->agpki_mstatus = value2.iagp_mode; 780 hdl = agp_regdev->agprd_targethdl; 781 err = ldi_ioctl(hdl, AGP_TARGET_GETINFO, 782 (intptr_t)&value2, FKIOCTL, kcred, 0); 783 if (err) 784 return (-1); 785 info->agpki_tdevid = value2.iagp_devid; 786 info->agpki_tver = value2.iagp_ver; 787 info->agpki_tstatus = value2.iagp_mode; 788 info->agpki_aperbase = value2.iagp_aperbase; 789 info->agpki_apersize = (uint32_t)value2.iagp_apersize; 790 break; 791 default: 792 AGPDB_PRINT2((CE_WARN, 793 "lyr_get_info: function doesn't work for unknown arc")); 794 return (-1); 795 } 796 if ((info->agpki_apersize >= MAXAPERMEGAS) || 797 (info->agpki_apersize == 0) || 798 (info->agpki_aperbase == 0)) { 799 AGPDB_PRINT2((CE_WARN, 800 "lyr_get_info: aperture is not programmed correctly!")); 801 return (-1); 802 } 803 804 return (0); 805 } 806 807 /* 808 * lyr_i8xx_add_to_gtt() 809 * 810 * Description: 811 * This function sets up the integrated video device gtt table 812 * via an ioclt to the AGP master driver. 813 * 814 * Arguments: 815 * pg_offset The start entry to be setup 816 * keyent Keytable entity pointer 817 * agp_regdev AGP devices registration struct pointer 818 * 819 * Returns: 820 * 0 success 821 * -1 invalid operations 822 */ 823 int 824 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent, 825 agp_registered_dev_t *agp_regdev) 826 { 827 int err = 0; 828 int rval; 829 ldi_handle_t hdl; 830 igd_gtt_seg_t gttseg; 831 uint32_t *addrp, i; 832 uint32_t npages; 833 834 ASSERT(keyent); 835 ASSERT(agp_regdev); 836 gttseg.igs_pgstart = pg_offset; 837 npages = keyent->kte_pages; 838 gttseg.igs_npage = npages; 839 gttseg.igs_type = keyent->kte_type; 840 gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc 841 (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP); 842 843 addrp = gttseg.igs_phyaddr; 844 for (i = 0; i < npages; i++, addrp++) { 845 *addrp = 846 (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT); 847 } 848 849 hdl = agp_regdev->agprd_masterhdl; 850 if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)>tseg, FKIOCTL, 851 kcred, &rval)) { 852 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error")); 853 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x", 854 gttseg.igs_pgstart)); 855 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x", 856 gttseg.igs_npage)); 857 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x", 858 gttseg.igs_type)); 859 err = -1; 860 } 861 kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage); 862 return (err); 863 } 864 865 /* 866 * lyr_i8xx_remove_from_gtt() 867 * 868 * Description: 869 * This function clears the integrated video device gtt table via 870 * an ioctl to the agp master device. 871 * 872 * Arguments: 873 * pg_offset The starting entry to be cleared 874 * npage The number of entries to be cleared 875 * agp_regdev AGP devices struct pointer 876 * 877 * Returns: 878 * 0 success 879 * -1 invalid operations 880 */ 881 int 882 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage, 883 agp_registered_dev_t *agp_regdev) 884 { 885 int rval; 886 ldi_handle_t hdl; 887 igd_gtt_seg_t gttseg; 888 889 gttseg.igs_pgstart = pg_offset; 890 gttseg.igs_npage = npage; 891 892 hdl = agp_regdev->agprd_masterhdl; 893 if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)>tseg, FKIOCTL, 894 kcred, &rval)) 895 return (-1); 896 897 return (0); 898 } 899 900 /* 901 * lyr_set_gart_addr() 902 * 903 * Description: 904 * This function puts the gart table physical address in the 905 * gart base register. 906 * Please refer to gart and gtt table base register format for 907 * gart base register format in agpdefs.h. 908 * 909 * Arguments: 910 * phy_base The base physical address of gart table 911 * agp_regdev AGP devices registration struct pointer 912 * 913 * Returns: 914 * 0 success 915 * -1 failed 916 * 917 */ 918 919 int 920 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev) 921 { 922 amd64_gart_dev_list_t *gart_list; 923 ldi_handle_t hdl; 924 int err = 0; 925 926 ASSERT(agp_regdev); 927 switch (agp_regdev->agprd_arctype) { 928 case ARC_IGD810: 929 { 930 uint32_t base; 931 932 ASSERT((phy_base & I810_POINTER_MASK) == 0); 933 base = (uint32_t)phy_base; 934 935 hdl = agp_regdev->agprd_masterhdl; 936 err = ldi_ioctl(hdl, I810_SET_GTT_BASE, 937 (intptr_t)&base, FKIOCTL, kcred, 0); 938 break; 939 } 940 case ARC_INTELAGP: 941 { 942 uint32_t addr; 943 addr = (uint32_t)phy_base; 944 945 ASSERT((phy_base & GTT_POINTER_MASK) == 0); 946 hdl = agp_regdev->agprd_targethdl; 947 err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR, 948 (intptr_t)&addr, FKIOCTL, kcred, 0); 949 break; 950 } 951 case ARC_AMD64AGP: 952 { 953 uint32_t addr; 954 955 ASSERT((phy_base & AMD64_POINTER_MASK) == 0); 956 addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT) 957 & AMD64_GARTBASE_MASK); 958 959 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 960 gart_list; 961 gart_list = gart_list->next) { 962 hdl = gart_list->gart_devhdl; 963 if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR, 964 (intptr_t)&addr, FKIOCTL, kcred, 0)) { 965 err = -1; 966 break; 967 } 968 } 969 break; 970 } 971 default: 972 err = -1; 973 } 974 975 if (err) 976 return (-1); 977 978 return (0); 979 } 980 981 int 982 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev) 983 { 984 ldi_handle_t hdl; 985 uint32_t command; 986 987 ASSERT(agp_regdev); 988 command = cmd; 989 hdl = agp_regdev->agprd_targethdl; 990 if (ldi_ioctl(hdl, AGP_TARGET_SETCMD, 991 (intptr_t)&command, FKIOCTL, kcred, 0)) 992 return (-1); 993 hdl = agp_regdev->agprd_masterhdl; 994 if (ldi_ioctl(hdl, AGP_MASTER_SETCMD, 995 (intptr_t)&command, FKIOCTL, kcred, 0)) 996 return (-1); 997 998 return (0); 999 } 1000 1001 int 1002 lyr_config_devices(agp_registered_dev_t *agp_regdev) 1003 { 1004 amd64_gart_dev_list_t *gart_list; 1005 ldi_handle_t hdl; 1006 int rc = 0; 1007 1008 ASSERT(agp_regdev); 1009 switch (agp_regdev->agprd_arctype) { 1010 case ARC_IGD830: 1011 case ARC_IGD810: 1012 break; 1013 case ARC_INTELAGP: 1014 { 1015 hdl = agp_regdev->agprd_targethdl; 1016 rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE, 1017 0, FKIOCTL, kcred, 0); 1018 break; 1019 } 1020 case ARC_AMD64AGP: 1021 { 1022 /* 1023 * BIOS always shadow registers such like Aperture Base 1024 * register, Aperture Size Register from the AGP bridge 1025 * to the AMD64 CPU host bridge. If future BIOSes are broken 1026 * in this regard, we may need to shadow these registers 1027 * in driver. 1028 */ 1029 1030 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 1031 gart_list; 1032 gart_list = gart_list->next) { 1033 hdl = gart_list->gart_devhdl; 1034 if (ldi_ioctl(hdl, AMD64_CONFIGURE, 1035 0, FKIOCTL, kcred, 0)) { 1036 rc = -1; 1037 break; 1038 } 1039 } 1040 break; 1041 } 1042 default: 1043 rc = -1; 1044 } 1045 1046 if (rc) 1047 return (-1); 1048 1049 return (0); 1050 } 1051 1052 int 1053 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev) 1054 { 1055 amd64_gart_dev_list_t *gart_list; 1056 ldi_handle_t hdl; 1057 int rc = 0; 1058 1059 ASSERT(agp_regdev); 1060 switch (agp_regdev->agprd_arctype) { 1061 case ARC_IGD830: 1062 case ARC_IGD810: 1063 { 1064 hdl = agp_regdev->agprd_masterhdl; 1065 rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0); 1066 break; 1067 } 1068 case ARC_INTELAGP: 1069 { 1070 hdl = agp_regdev->agprd_targethdl; 1071 rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG, 1072 0, FKIOCTL, kcred, 0); 1073 break; 1074 } 1075 case ARC_AMD64AGP: 1076 { 1077 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 1078 gart_list; gart_list = gart_list->next) { 1079 hdl = gart_list->gart_devhdl; 1080 if (ldi_ioctl(hdl, AMD64_UNCONFIG, 1081 0, FKIOCTL, kcred, 0)) { 1082 rc = -1; 1083 break; 1084 } 1085 } 1086 break; 1087 } 1088 default: 1089 rc = -1; 1090 } 1091 1092 if (rc) 1093 return (-1); 1094 1095 return (0); 1096 } 1097 1098 /* 1099 * lyr_flush_gart_cache() 1100 * 1101 * Description: 1102 * This function flushes the GART translation look-aside buffer. All 1103 * GART translation caches will be flushed after this operation. 1104 * 1105 * Arguments: 1106 * agp_regdev AGP devices struct pointer 1107 */ 1108 void 1109 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev) 1110 { 1111 amd64_gart_dev_list_t *gart_list; 1112 ldi_handle_t hdl; 1113 1114 ASSERT(agp_regdev); 1115 if (agp_regdev->agprd_arctype == ARC_AMD64AGP) { 1116 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head; 1117 gart_list; gart_list = gart_list->next) { 1118 hdl = gart_list->gart_devhdl; 1119 (void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB, 1120 0, FKIOCTL, kcred, 0); 1121 } 1122 } else if (agp_regdev->agprd_arctype == ARC_INTELAGP) { 1123 hdl = agp_regdev->agprd_targethdl; 1124 (void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0, 1125 FKIOCTL, kcred, 0); 1126 } 1127 } 1128 1129 /* 1130 * get_max_pages() 1131 * 1132 * Description: 1133 * This function compute the total pages allowed for agp aperture 1134 * based on the ammount of physical pages. 1135 * The algorithm is: compare the aperture size with 1/4 of total 1136 * physical pages, and use the smaller one to for the max available 1137 * pages. 1138 * 1139 * Arguments: 1140 * aper_size system agp aperture size (in MB) 1141 * 1142 * Returns: 1143 * The max possible number of agp memory pages available to users 1144 */ 1145 static uint32_t 1146 get_max_pages(uint32_t aper_size) 1147 { 1148 uint32_t i, j; 1149 1150 ASSERT(aper_size <= MAXAPERMEGAS); 1151 1152 i = AGP_MB2PAGES(aper_size); 1153 j = (physmem >> 2); 1154 1155 return ((i < j) ? i : j); 1156 } 1157 1158 /* 1159 * agp_fill_empty_keyent() 1160 * 1161 * Description: 1162 * This function finds a empty key table slot and 1163 * fills it with a new entity. 1164 * 1165 * Arguments: 1166 * softsate driver soft state pointer 1167 * entryp new entity data pointer 1168 * 1169 * Returns: 1170 * NULL no key table slot available 1171 * entryp the new entity slot pointer 1172 */ 1173 static keytable_ent_t * 1174 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp) 1175 { 1176 int key; 1177 keytable_ent_t *newentryp; 1178 1179 ASSERT(softstate); 1180 ASSERT(entryp); 1181 ASSERT(entryp->kte_memhdl); 1182 ASSERT(entryp->kte_pfnarray); 1183 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 1184 1185 for (key = 0; key < AGP_MAXKEYS; key++) { 1186 newentryp = &softstate->asoft_table[key]; 1187 if (newentryp->kte_memhdl == NULL) { 1188 break; 1189 } 1190 } 1191 1192 if (key >= AGP_MAXKEYS) { 1193 AGPDB_PRINT2((CE_WARN, 1194 "agp_fill_empty_keyent: key table exhausted")); 1195 return (NULL); 1196 } 1197 1198 ASSERT(newentryp->kte_pfnarray == NULL); 1199 bcopy(entryp, newentryp, sizeof (keytable_ent_t)); 1200 newentryp->kte_key = key; 1201 1202 return (newentryp); 1203 } 1204 1205 /* 1206 * agp_find_bound_keyent() 1207 * 1208 * Description: 1209 * This function finds the key table entity by agp aperture page offset. 1210 * Every keytable entity will have an agp aperture range after the binding 1211 * operation. 1212 * 1213 * Arguments: 1214 * softsate driver soft state pointer 1215 * pg_offset agp aperture page offset 1216 * 1217 * Returns: 1218 * NULL no such keytable entity 1219 * pointer key table entity pointer found 1220 */ 1221 static keytable_ent_t * 1222 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset) 1223 { 1224 int keycount; 1225 keytable_ent_t *entryp; 1226 1227 ASSERT(softstate); 1228 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 1229 1230 for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) { 1231 entryp = &softstate->asoft_table[keycount]; 1232 if (entryp->kte_bound == 0) { 1233 continue; 1234 } 1235 1236 if (pg_offset < entryp->kte_pgoff) 1237 continue; 1238 if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages)) 1239 continue; 1240 1241 ASSERT(entryp->kte_memhdl); 1242 ASSERT(entryp->kte_pfnarray); 1243 1244 return (entryp); 1245 } 1246 1247 return (NULL); 1248 } 1249 1250 /* 1251 * agp_check_off() 1252 * 1253 * Description: 1254 * This function checks whether an AGP aperture range to be bound 1255 * overlaps with AGP offset already bound. 1256 * 1257 * Arguments: 1258 * entryp key table start entry pointer 1259 * pg_start AGP range start page offset 1260 * pg_num pages number to be bound 1261 * 1262 * Returns: 1263 * 0 Does not overlap 1264 * -1 Overlaps 1265 */ 1266 1267 static int 1268 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num) 1269 { 1270 int key; 1271 uint64_t pg_end; 1272 uint64_t kpg_end; 1273 1274 ASSERT(entryp); 1275 1276 pg_end = pg_start + pg_num; 1277 for (key = 0; key < AGP_MAXKEYS; key++) { 1278 if (!entryp[key].kte_bound) 1279 continue; 1280 1281 kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages; 1282 if (!((pg_end <= entryp[key].kte_pgoff) || 1283 (pg_start >= kpg_end))) 1284 break; 1285 } 1286 1287 if (key == AGP_MAXKEYS) 1288 return (0); 1289 else 1290 return (-1); 1291 } 1292 1293 static int 1294 is_controlling_proc(agpgart_softstate_t *st) 1295 { 1296 ASSERT(st); 1297 1298 if (!st->asoft_acquired) { 1299 AGPDB_PRINT2((CE_WARN, 1300 "ioctl_agpgart_setup: gart not acquired")); 1301 return (-1); 1302 } 1303 if (st->asoft_curpid != ddi_get_pid()) { 1304 AGPDB_PRINT2((CE_WARN, 1305 "ioctl_agpgart_release: not controlling process")); 1306 return (-1); 1307 } 1308 1309 return (0); 1310 } 1311 1312 static void release_control(agpgart_softstate_t *st) 1313 { 1314 st->asoft_curpid = 0; 1315 st->asoft_acquired = 0; 1316 } 1317 1318 static void acquire_control(agpgart_softstate_t *st) 1319 { 1320 st->asoft_curpid = ddi_get_pid(); 1321 st->asoft_acquired = 1; 1322 } 1323 1324 /* 1325 * agp_remove_from_gart() 1326 * 1327 * Description: 1328 * This function fills the gart table entries by a given page 1329 * frame number array and setup the agp aperture page to physical 1330 * memory page translation. 1331 * Arguments: 1332 * pg_offset Starting aperture page to be bound 1333 * entries the number of pages to be bound 1334 * acc_hdl GART table dma memory acc handle 1335 * tablep GART table kernel virtual address 1336 */ 1337 static void 1338 agp_remove_from_gart( 1339 uint32_t pg_offset, 1340 uint32_t entries, 1341 ddi_dma_handle_t dma_hdl, 1342 uint32_t *tablep) 1343 { 1344 uint32_t items = 0; 1345 uint32_t *entryp; 1346 1347 entryp = tablep + pg_offset; 1348 while (items < entries) { 1349 *(entryp + items) = 0; 1350 items++; 1351 } 1352 (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t), 1353 entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV); 1354 } 1355 1356 /* 1357 * agp_unbind_key() 1358 * 1359 * Description: 1360 * This function unbinds AGP memory from the gart table. It will clear 1361 * all the gart entries related to this agp memory. 1362 * 1363 * Arguments: 1364 * softstate driver soft state pointer 1365 * entryp key table entity pointer 1366 * 1367 * Returns: 1368 * EINVAL invalid key table entity pointer 1369 * 0 success 1370 * 1371 */ 1372 static int 1373 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp) 1374 { 1375 int retval = 0; 1376 1377 ASSERT(entryp); 1378 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 1379 1380 if (!entryp->kte_bound) { 1381 AGPDB_PRINT2((CE_WARN, 1382 "agp_unbind_key: key = 0x%x, not bound", 1383 entryp->kte_key)); 1384 return (EINVAL); 1385 } 1386 if (entryp->kte_refcnt) { 1387 AGPDB_PRINT2((CE_WARN, 1388 "agp_unbind_key: memory is exported to users")); 1389 return (EINVAL); 1390 } 1391 1392 ASSERT((entryp->kte_pgoff + entryp->kte_pages) <= 1393 AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)); 1394 ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN)); 1395 1396 switch (softstate->asoft_devreg.agprd_arctype) { 1397 case ARC_IGD810: 1398 case ARC_IGD830: 1399 retval = lyr_i8xx_remove_from_gtt( 1400 entryp->kte_pgoff, entryp->kte_pages, 1401 &softstate->asoft_devreg); 1402 if (retval) { 1403 AGPDB_PRINT2((CE_WARN, 1404 "agp_unbind_key: Key = 0x%x, clear table error", 1405 entryp->kte_key)); 1406 return (EIO); 1407 } 1408 break; 1409 case ARC_INTELAGP: 1410 case ARC_AMD64AGP: 1411 agp_remove_from_gart(entryp->kte_pgoff, 1412 entryp->kte_pages, 1413 softstate->gart_dma_handle, 1414 (uint32_t *)softstate->gart_vbase); 1415 /* Flush GTLB table */ 1416 lyr_flush_gart_cache(&softstate->asoft_devreg); 1417 1418 break; 1419 } 1420 1421 entryp->kte_bound = 0; 1422 1423 return (0); 1424 } 1425 1426 /* 1427 * agp_dealloc_kmem() 1428 * 1429 * Description: 1430 * This function deallocates dma memory resources for userland 1431 * applications. 1432 * 1433 * Arguments: 1434 * entryp keytable entity pointer 1435 */ 1436 static void 1437 agp_dealloc_kmem(keytable_ent_t *entryp) 1438 { 1439 kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages); 1440 entryp->kte_pfnarray = NULL; 1441 1442 (void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle); 1443 KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0; 1444 ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl); 1445 KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL; 1446 KMEMP(entryp->kte_memhdl)->kmem_reallen = 0; 1447 KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL; 1448 1449 ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle)); 1450 KMEMP(entryp->kte_memhdl)->kmem_handle = NULL; 1451 1452 kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t)); 1453 entryp->kte_memhdl = NULL; 1454 } 1455 1456 /* 1457 * agp_dealloc_mem() 1458 * 1459 * Description: 1460 * This function deallocates physical memory resources allocated for 1461 * userland applications. 1462 * 1463 * Arguments: 1464 * st driver soft state pointer 1465 * entryp key table entity pointer 1466 * 1467 * Returns: 1468 * -1 not a valid memory type or the memory is mapped by 1469 * user area applications 1470 * 0 success 1471 */ 1472 static int 1473 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t *entryp) 1474 { 1475 1476 ASSERT(entryp); 1477 ASSERT(st); 1478 ASSERT(entryp->kte_memhdl); 1479 ASSERT(mutex_owned(&st->asoft_instmutex)); 1480 1481 /* auto unbind here */ 1482 if (entryp->kte_bound && !entryp->kte_refcnt) { 1483 AGPDB_PRINT2((CE_WARN, 1484 "agp_dealloc_mem: key=0x%x, auto unbind", 1485 entryp->kte_key)); 1486 1487 /* 1488 * agp_dealloc_mem may be called indirectly by agp_detach. 1489 * In the agp_detach function, agpgart_close is already 1490 * called which will free the gart table. agp_unbind_key 1491 * will panic if no valid gart table exists. So test if 1492 * gart table exsits here. 1493 */ 1494 if (st->asoft_opened) 1495 (void) agp_unbind_key(st, entryp); 1496 } 1497 if (entryp->kte_refcnt) { 1498 AGPDB_PRINT2((CE_WARN, 1499 "agp_dealloc_mem: memory is exported to users")); 1500 return (-1); 1501 } 1502 1503 switch (entryp->kte_type) { 1504 case AGP_NORMAL: 1505 case AGP_PHYSICAL: 1506 agp_dealloc_kmem(entryp); 1507 break; 1508 default: 1509 return (-1); 1510 } 1511 1512 return (0); 1513 } 1514 1515 /* 1516 * agp_del_allkeys() 1517 * 1518 * Description: 1519 * This function calls agp_dealloc_mem to release all the agp memory 1520 * resource allocated. 1521 * 1522 * Arguments: 1523 * softsate driver soft state pointer 1524 * Returns: 1525 * -1 can not free all agp memory 1526 * 0 success 1527 * 1528 */ 1529 static int 1530 agp_del_allkeys(agpgart_softstate_t *softstate) 1531 { 1532 int key; 1533 int ret = 0; 1534 1535 ASSERT(softstate); 1536 for (key = 0; key < AGP_MAXKEYS; key++) { 1537 if (softstate->asoft_table[key].kte_memhdl != NULL) { 1538 /* 1539 * Check if we can free agp memory now. 1540 * If agp memory is exported to user 1541 * applications, agp_dealloc_mem will fail. 1542 */ 1543 if (agp_dealloc_mem(softstate, 1544 &softstate->asoft_table[key])) 1545 ret = -1; 1546 } 1547 } 1548 1549 return (ret); 1550 } 1551 1552 /* 1553 * pfn2gartentry() 1554 * 1555 * Description: 1556 * This function converts a physical address to GART entry. 1557 * For AMD64, hardware only support addresses below 40bits, 1558 * about 1024G physical address, so the largest pfn 1559 * number is below 28 bits. Please refer to GART and GTT entry 1560 * format table in agpdefs.h for entry format. Intel IGD only 1561 * only supports GTT entry below 1G. Intel AGP only supports 1562 * GART entry below 4G. 1563 * 1564 * Arguments: 1565 * arc_type system agp arc type 1566 * pfn page frame number 1567 * itemv the entry item to be returned 1568 * Returns: 1569 * -1 not a invalid page frame 1570 * 0 conversion success 1571 */ 1572 static int 1573 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv) 1574 { 1575 uint64_t paddr; 1576 1577 paddr = (uint64_t)pfn << AGP_PAGE_SHIFT; 1578 AGPDB_PRINT1((CE_NOTE, "checking pfn number %lu for type %d", 1579 pfn, arc_type)); 1580 1581 switch (arc_type) { 1582 case ARC_INTELAGP: 1583 { 1584 /* Only support 32-bit hardware address */ 1585 if ((paddr & AGP_INTEL_POINTER_MASK) != 0) { 1586 AGPDB_PRINT2((CE_WARN, 1587 "INTEL AGP Hardware only support 32 bits")); 1588 return (-1); 1589 } 1590 *itemv = (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID; 1591 1592 break; 1593 } 1594 case ARC_AMD64AGP: 1595 { 1596 uint32_t value1, value2; 1597 /* Physaddr should not exceed 40-bit */ 1598 if ((paddr & AMD64_POINTER_MASK) != 0) { 1599 AGPDB_PRINT2((CE_WARN, 1600 "AMD64 GART hardware only supoort 40 bits")); 1601 return (-1); 1602 } 1603 value1 = (uint32_t)pfn >> 20; 1604 value1 <<= 4; 1605 value2 = (uint32_t)pfn << 12; 1606 1607 *itemv = value1 | value2 | AMD64_ENTRY_VALID; 1608 break; 1609 } 1610 case ARC_IGD810: 1611 if ((paddr & I810_POINTER_MASK) != 0) { 1612 AGPDB_PRINT2((CE_WARN, 1613 "Intel i810 only support 30 bits")); 1614 return (-1); 1615 } 1616 break; 1617 1618 case ARC_IGD830: 1619 if ((paddr & GTT_POINTER_MASK) != 0) { 1620 AGPDB_PRINT2((CE_WARN, 1621 "Intel IGD only support 32 bits")); 1622 return (-1); 1623 } 1624 break; 1625 default: 1626 AGPDB_PRINT2((CE_WARN, 1627 "pfn2gartentry: arc type = %d, not support", arc_type)); 1628 return (-1); 1629 } 1630 return (0); 1631 } 1632 1633 /* 1634 * Check allocated physical pages validity, only called in DEBUG 1635 * mode. 1636 */ 1637 static int 1638 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items) 1639 { 1640 int count; 1641 uint32_t ret; 1642 1643 for (count = 0; count < items; count++) { 1644 if (pfn2gartentry(arc_type, pfnarray[count], &ret)) 1645 break; 1646 } 1647 if (count < items) 1648 return (-1); 1649 else 1650 return (0); 1651 } 1652 1653 /* 1654 * kmem_getpfns() 1655 * 1656 * Description: 1657 * This function gets page frame numbers from dma handle. 1658 * 1659 * Arguments: 1660 * dma_handle dma hanle allocated by ddi_dma_alloc_handle 1661 * dma_cookip dma cookie pointer 1662 * cookies_num cookies number 1663 * pfnarray array to store page frames 1664 * 1665 * Returns: 1666 * 0 success 1667 */ 1668 static int 1669 kmem_getpfns( 1670 ddi_dma_handle_t dma_handle, 1671 ddi_dma_cookie_t *dma_cookiep, 1672 int cookies_num, 1673 pfn_t *pfnarray) 1674 { 1675 int num_cookies; 1676 int index = 0; 1677 1678 num_cookies = cookies_num; 1679 1680 while (num_cookies > 0) { 1681 uint64_t ck_startaddr, ck_length, ck_end; 1682 ck_startaddr = dma_cookiep->dmac_address; 1683 ck_length = dma_cookiep->dmac_size; 1684 1685 ck_end = ck_startaddr + ck_length; 1686 while (ck_startaddr < ck_end) { 1687 pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT; 1688 ck_startaddr += AGP_PAGE_SIZE; 1689 index++; 1690 } 1691 1692 num_cookies--; 1693 if (num_cookies > 0) { 1694 ddi_dma_nextcookie(dma_handle, dma_cookiep); 1695 } 1696 } 1697 1698 return (0); 1699 } 1700 1701 static int 1702 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info) 1703 { 1704 switch (softstate->asoft_devreg.agprd_arctype) { 1705 case ARC_IGD810: 1706 case ARC_IGD830: 1707 info->agpi_version.agpv_major = 0; 1708 info->agpi_version.agpv_minor = 0; 1709 info->agpi_devid = softstate->asoft_info.agpki_mdevid; 1710 info->agpi_mode = 0; 1711 break; 1712 case ARC_INTELAGP: 1713 case ARC_AMD64AGP: 1714 info->agpi_version = softstate->asoft_info.agpki_tver; 1715 info->agpi_devid = softstate->asoft_info.agpki_tdevid; 1716 info->agpi_mode = softstate->asoft_info.agpki_tstatus; 1717 break; 1718 default: 1719 AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC")); 1720 return (-1); 1721 } 1722 /* 1723 * 64bit->32bit conversion possible 1724 */ 1725 info->agpi_aperbase = softstate->asoft_info.agpki_aperbase; 1726 info->agpi_apersize = softstate->asoft_info.agpki_apersize; 1727 info->agpi_pgtotal = softstate->asoft_pgtotal; 1728 info->agpi_pgsystem = info->agpi_pgtotal; 1729 info->agpi_pgused = softstate->asoft_pgused; 1730 1731 return (0); 1732 } 1733 1734 static uint32_t 1735 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode) 1736 { 1737 uint32_t cmd; 1738 int rq, sba, over4g, fw, rate; 1739 1740 /* 1741 * tstatus: target device status 1742 * mstatus: master device status 1743 * mode: the agp mode to be sent 1744 */ 1745 1746 /* 1747 * RQ - Request Queue size 1748 * set RQ to the min of mode and tstatus 1749 * if mode set a RQ larger than hardware can support, 1750 * use the max RQ which hardware can support. 1751 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support 1752 * Corelogic will enqueue agp transaction 1753 */ 1754 rq = mode & AGPSTAT_RQ_MASK; 1755 if ((tstatus & AGPSTAT_RQ_MASK) < rq) 1756 rq = tstatus & AGPSTAT_RQ_MASK; 1757 1758 /* 1759 * SBA - Sideband Addressing 1760 * 1761 * Sideband Addressing provides an additional bus to pass requests 1762 * (address and command) to the target from the master. 1763 * 1764 * set SBA if all three support it 1765 */ 1766 sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA) 1767 & (mode & AGPSTAT_SBA); 1768 1769 /* set OVER4G if all three support it */ 1770 over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G) 1771 & (mode & AGPSTAT_OVER4G); 1772 1773 /* 1774 * FW - fast write 1775 * 1776 * acceleration of memory write transactions from the corelogic to the 1777 * A.G.P. master device acting like a PCI target. 1778 * 1779 * set FW if all three support it 1780 */ 1781 fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW) 1782 & (mode & AGPSTAT_FW); 1783 1784 /* 1785 * figure out the max rate 1786 * AGP v2 support: 4X, 2X, 1X speed 1787 * status bit meaning 1788 * --------------------------------------------- 1789 * 7:3 others 1790 * 3 0 stand for V2 support 1791 * 0:2 001:1X, 010:2X, 100:4X 1792 * ---------------------------------------------- 1793 */ 1794 rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK) 1795 & (mode & AGPSTAT_RATE_MASK); 1796 if (rate & AGP2_RATE_4X) 1797 rate = AGP2_RATE_4X; 1798 else if (rate & AGP2_RATE_2X) 1799 rate = AGP2_RATE_2X; 1800 else 1801 rate = AGP2_RATE_1X; 1802 1803 cmd = rq | sba | over4g | fw | rate; 1804 /* enable agp mode */ 1805 cmd |= AGPCMD_AGPEN; 1806 1807 return (cmd); 1808 } 1809 1810 static uint32_t 1811 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode) 1812 { 1813 uint32_t cmd = 0; 1814 uint32_t rq, arqsz, cal, sba, over4g, fw, rate; 1815 1816 /* 1817 * tstatus: target device status 1818 * mstatus: master device status 1819 * mode: the agp mode to be set 1820 */ 1821 1822 /* 1823 * RQ - Request Queue size 1824 * Set RQ to the min of mode and tstatus 1825 * If mode set a RQ larger than hardware can support, 1826 * use the max RQ which hardware can support. 1827 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support 1828 * Corelogic will enqueue agp transaction; 1829 */ 1830 rq = mode & AGPSTAT_RQ_MASK; 1831 if ((tstatus & AGPSTAT_RQ_MASK) < rq) 1832 rq = tstatus & AGPSTAT_RQ_MASK; 1833 1834 /* 1835 * ARQSZ - Asynchronous Request Queue size 1836 * Set the value equal to tstatus. 1837 * Don't allow the mode register to override values 1838 */ 1839 arqsz = tstatus & AGPSTAT_ARQSZ_MASK; 1840 1841 /* 1842 * CAL - Calibration cycle 1843 * Set to the min of tstatus and mstatus 1844 * Don't allow override by mode register 1845 */ 1846 cal = tstatus & AGPSTAT_CAL_MASK; 1847 if ((mstatus & AGPSTAT_CAL_MASK) < cal) 1848 cal = mstatus & AGPSTAT_CAL_MASK; 1849 1850 /* 1851 * SBA - Sideband Addressing 1852 * 1853 * Sideband Addressing provides an additional bus to pass requests 1854 * (address and command) to the target from the master. 1855 * 1856 * SBA in agp v3.0 must be set 1857 */ 1858 sba = AGPCMD_SBAEN; 1859 1860 /* GART64B is not set since no hardware supports it now */ 1861 1862 /* Set OVER4G if all three support it */ 1863 over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G) 1864 & (mode & AGPSTAT_OVER4G); 1865 1866 /* 1867 * FW - fast write 1868 * 1869 * Acceleration of memory write transactions from the corelogic to the 1870 * A.G.P. master device acting like a PCI target. 1871 * 1872 * Always set FW in AGP 3.0 1873 */ 1874 fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW) 1875 & (mode & AGPSTAT_FW); 1876 1877 /* 1878 * Figure out the max rate 1879 * 1880 * AGP v3 support: 8X, 4X speed 1881 * 1882 * status bit meaning 1883 * --------------------------------------------- 1884 * 7:3 others 1885 * 3 1 stand for V3 support 1886 * 0:2 001:4X, 010:8X, 011:4X,8X 1887 * ---------------------------------------------- 1888 */ 1889 rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK) 1890 & (mode & AGPSTAT_RATE_MASK); 1891 if (rate & AGP3_RATE_8X) 1892 rate = AGP3_RATE_8X; 1893 else 1894 rate = AGP3_RATE_4X; 1895 1896 cmd = rq | arqsz | cal | sba | over4g | fw | rate; 1897 /* Enable AGP mode */ 1898 cmd |= AGPCMD_AGPEN; 1899 1900 return (cmd); 1901 } 1902 1903 static int 1904 agp_setup(agpgart_softstate_t *softstate, uint32_t mode) 1905 { 1906 uint32_t tstatus, mstatus; 1907 uint32_t agp_mode; 1908 1909 tstatus = softstate->asoft_info.agpki_tstatus; 1910 mstatus = softstate->asoft_info.agpki_mstatus; 1911 1912 /* 1913 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0 1914 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we 1915 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in 1916 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register, 1917 * we can get which mode it is working at. The working mode of 1918 * AGP master and AGP target must be consistent. That is, both 1919 * of them must work on AGP 3.0 mode or AGP 2.0 mode. 1920 */ 1921 if ((softstate->asoft_info.agpki_tver.agpv_major == 3) && 1922 (tstatus & AGPSTAT_MODE3)) { 1923 /* Master device should be 3.0 mode, too */ 1924 if ((softstate->asoft_info.agpki_mver.agpv_major != 3) || 1925 ((mstatus & AGPSTAT_MODE3) == 0)) 1926 return (EIO); 1927 1928 agp_mode = agp_v3_setup(tstatus, mstatus, mode); 1929 /* Write to the AGPCMD register of target and master devices */ 1930 if (lyr_set_agp_cmd(agp_mode, 1931 &softstate->asoft_devreg)) 1932 return (EIO); 1933 1934 softstate->asoft_mode = agp_mode; 1935 1936 return (0); 1937 } 1938 1939 /* 1940 * If agp taget device doesn't work in AGP 3.0 mode, 1941 * it must work in AGP 2.0 mode. And make sure 1942 * master device work in AGP 2.0 mode too 1943 */ 1944 if ((softstate->asoft_info.agpki_mver.agpv_major == 3) && 1945 (mstatus & AGPSTAT_MODE3)) 1946 return (EIO); 1947 1948 agp_mode = agp_v2_setup(tstatus, mstatus, mode); 1949 if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg)) 1950 return (EIO); 1951 softstate->asoft_mode = agp_mode; 1952 1953 return (0); 1954 } 1955 1956 /* 1957 * agp_alloc_kmem() 1958 * 1959 * Description: 1960 * This function allocates physical memory for userland applications 1961 * by ddi interfaces. This function can also be called to allocate 1962 * small phsyical contiguous pages, usually tens of kilobytes. 1963 * 1964 * Arguments: 1965 * softsate driver soft state pointer 1966 * length memory size 1967 * 1968 * Returns: 1969 * entryp new keytable entity pointer 1970 * NULL no keytable slot available or no physical 1971 * memory available 1972 */ 1973 static keytable_ent_t * 1974 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length, int type) 1975 { 1976 keytable_ent_t keyentry; 1977 keytable_ent_t *entryp; 1978 int ret; 1979 1980 ASSERT(AGP_ALIGNED(length)); 1981 1982 bzero(&keyentry, sizeof (keytable_ent_t)); 1983 1984 keyentry.kte_pages = AGP_BYTES2PAGES(length); 1985 keyentry.kte_type = type; 1986 1987 /* 1988 * Set dma_attr_sgllen to assure contiguous physical pages 1989 */ 1990 if (type == AGP_PHYSICAL) 1991 agpgart_dma_attr.dma_attr_sgllen = 1; 1992 else 1993 agpgart_dma_attr.dma_attr_sgllen = (int)keyentry.kte_pages; 1994 1995 /* 4k size pages */ 1996 keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP); 1997 1998 if (ddi_dma_alloc_handle(softstate->asoft_dip, 1999 &agpgart_dma_attr, 2000 DDI_DMA_SLEEP, NULL, 2001 &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) { 2002 AGPDB_PRINT2((CE_WARN, 2003 "agp_alloc_kmem: ddi_dma_allco_hanlde error")); 2004 goto err4; 2005 } 2006 2007 if ((ret = ddi_dma_mem_alloc( 2008 KMEMP(keyentry.kte_memhdl)->kmem_handle, 2009 length, 2010 &gart_dev_acc_attr, 2011 DDI_DMA_CONSISTENT, 2012 DDI_DMA_SLEEP, NULL, 2013 &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr, 2014 &KMEMP(keyentry.kte_memhdl)->kmem_reallen, 2015 &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) { 2016 AGPDB_PRINT2((CE_WARN, 2017 "agp_alloc_kmem: ddi_dma_mem_alloc error")); 2018 2019 goto err3; 2020 } 2021 2022 ret = ddi_dma_addr_bind_handle( 2023 KMEMP(keyentry.kte_memhdl)->kmem_handle, 2024 NULL, 2025 KMEMP(keyentry.kte_memhdl)->kmem_kvaddr, 2026 length, 2027 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2028 DDI_DMA_SLEEP, 2029 NULL, 2030 &KMEMP(keyentry.kte_memhdl)->kmem_dcookie, 2031 &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num); 2032 2033 /* 2034 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more 2035 * than one cookie, we check this in the if statement. 2036 */ 2037 2038 if ((ret != DDI_DMA_MAPPED) || 2039 ((agpgart_dma_attr.dma_attr_sgllen == 1) && 2040 (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1))) { 2041 AGPDB_PRINT2((CE_WARN, 2042 "agp_alloc_kmem: can not alloc physical memory properly")); 2043 goto err2; 2044 } 2045 2046 keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) * 2047 keyentry.kte_pages, KM_SLEEP); 2048 2049 if (kmem_getpfns( 2050 KMEMP(keyentry.kte_memhdl)->kmem_handle, 2051 &KMEMP(keyentry.kte_memhdl)->kmem_dcookie, 2052 KMEMP(keyentry.kte_memhdl)->kmem_cookies_num, 2053 keyentry.kte_pfnarray)) { 2054 AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error")); 2055 goto err1; 2056 } 2057 2058 ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype, 2059 keyentry.kte_pfnarray, keyentry.kte_pages)); 2060 if (agp_check_pfns(softstate->asoft_devreg.agprd_arctype, 2061 keyentry.kte_pfnarray, keyentry.kte_pages)) 2062 goto err1; 2063 entryp = agp_fill_empty_keyent(softstate, &keyentry); 2064 if (!entryp) { 2065 AGPDB_PRINT2((CE_WARN, 2066 "agp_alloc_kmem: agp_fill_empty_keyent error")); 2067 2068 goto err1; 2069 } 2070 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 2071 2072 return (entryp); 2073 2074 err1: 2075 kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages); 2076 keyentry.kte_pfnarray = NULL; 2077 (void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle); 2078 KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0; 2079 err2: 2080 ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl); 2081 KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL; 2082 KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0; 2083 KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL; 2084 err3: 2085 ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle)); 2086 KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL; 2087 err4: 2088 kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t)); 2089 keyentry.kte_memhdl = NULL; 2090 return (NULL); 2091 2092 } 2093 2094 /* 2095 * agp_alloc_mem() 2096 * 2097 * Description: 2098 * This function allocate physical memory for userland applications, 2099 * in order to save kernel virtual space, we use the direct mapping 2100 * memory interface if it is available. 2101 * 2102 * Arguments: 2103 * st driver soft state pointer 2104 * length memory size 2105 * type AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical 2106 * memory type for intel i810 IGD 2107 * 2108 * Returns: 2109 * NULL Invalid memory type or can not allocate memory 2110 * Keytable entry pointer returned by agp_alloc_kmem 2111 */ 2112 static keytable_ent_t * 2113 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type) 2114 { 2115 2116 /* 2117 * AGP_PHYSICAL type require contiguous physical pages exported 2118 * to X drivers, like i810 HW cursor, ARGB cursor. the number of 2119 * pages needed is usuallysmall and contiguous, 4K, 16K. So we 2120 * use DDI interface to allocated such memory. And X use xsvc 2121 * drivers to map this memory into its own address space. 2122 */ 2123 ASSERT(st); 2124 2125 switch (type) { 2126 case AGP_NORMAL: 2127 case AGP_PHYSICAL: 2128 return (agp_alloc_kmem(st, length, type)); 2129 default: 2130 return (NULL); 2131 } 2132 } 2133 2134 /* 2135 * free_gart_table() 2136 * 2137 * Description: 2138 * This function frees the gart table memory allocated by driver. 2139 * Must disable gart table before calling this function. 2140 * 2141 * Arguments: 2142 * softstate driver soft state pointer 2143 * 2144 */ 2145 static void 2146 free_gart_table(agpgart_softstate_t *st) 2147 { 2148 2149 if (st->gart_dma_handle == NULL) 2150 return; 2151 2152 (void) ddi_dma_unbind_handle(st->gart_dma_handle); 2153 ddi_dma_mem_free(&st->gart_dma_acc_handle); 2154 st->gart_dma_acc_handle = NULL; 2155 ddi_dma_free_handle(&st->gart_dma_handle); 2156 st->gart_dma_handle = NULL; 2157 st->gart_vbase = 0; 2158 st->gart_size = 0; 2159 } 2160 2161 /* 2162 * alloc_gart_table() 2163 * 2164 * Description: 2165 * This function allocates one physical continuous gart table. 2166 * INTEL integrated video device except i810 have their special 2167 * video bios; No need to allocate gart table for them. 2168 * 2169 * Arguments: 2170 * st driver soft state pointer 2171 * 2172 * Returns: 2173 * 0 success 2174 * -1 can not allocate gart tabl 2175 */ 2176 static int 2177 alloc_gart_table(agpgart_softstate_t *st) 2178 { 2179 int num_pages; 2180 size_t table_size; 2181 int ret = DDI_SUCCESS; 2182 ddi_dma_cookie_t cookie; 2183 uint32_t num_cookies; 2184 2185 num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize); 2186 2187 /* 2188 * Only 40-bit maximum physical memory is supported by today's 2189 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses). 2190 * No one supports 64-bit gart entries now, so the size of gart 2191 * entries defaults to 32-bit though AGP3.0 specifies the possibility 2192 * of 64-bit gart entries. 2193 */ 2194 2195 table_size = num_pages * (sizeof (uint32_t)); 2196 2197 /* 2198 * Only AMD64 can put gart table above 4G, 40 bits at maximum 2199 */ 2200 if (st->asoft_devreg.agprd_arctype == ARC_AMD64AGP) 2201 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL; 2202 else 2203 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU; 2204 /* Allocate physical continuous page frame for gart table */ 2205 if (ret = ddi_dma_alloc_handle(st->asoft_dip, 2206 &garttable_dma_attr, 2207 DDI_DMA_SLEEP, 2208 NULL, &st->gart_dma_handle)) { 2209 AGPDB_PRINT2((CE_WARN, 2210 "alloc_gart_table: ddi_dma_alloc_handle failed")); 2211 goto err3; 2212 } 2213 2214 if (ret = ddi_dma_mem_alloc(st->gart_dma_handle, 2215 table_size, 2216 &gart_dev_acc_attr, 2217 DDI_DMA_CONSISTENT, 2218 DDI_DMA_SLEEP, NULL, 2219 &st->gart_vbase, 2220 &st->gart_size, 2221 &st->gart_dma_acc_handle)) { 2222 AGPDB_PRINT2((CE_WARN, 2223 "alloc_gart_table: ddi_dma_mem_alloc failed")); 2224 goto err2; 2225 2226 } 2227 2228 ret = ddi_dma_addr_bind_handle(st->gart_dma_handle, 2229 NULL, st->gart_vbase, 2230 table_size, 2231 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2232 DDI_DMA_SLEEP, NULL, 2233 &cookie, &num_cookies); 2234 2235 st->gart_pbase = cookie.dmac_address; 2236 2237 if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) { 2238 if (num_cookies > 1) 2239 (void) ddi_dma_unbind_handle(st->gart_dma_handle); 2240 AGPDB_PRINT2((CE_WARN, 2241 "alloc_gart_table: alloc contiguous phys memory failed")); 2242 goto err1; 2243 } 2244 2245 return (0); 2246 err1: 2247 ddi_dma_mem_free(&st->gart_dma_acc_handle); 2248 st->gart_dma_acc_handle = NULL; 2249 err2: 2250 ddi_dma_free_handle(&st->gart_dma_handle); 2251 st->gart_dma_handle = NULL; 2252 err3: 2253 st->gart_pbase = 0; 2254 st->gart_size = 0; 2255 st->gart_vbase = 0; 2256 2257 return (-1); 2258 } 2259 2260 /* 2261 * agp_add_to_gart() 2262 * 2263 * Description: 2264 * This function fills the gart table entries by a given page frame number 2265 * array and set up the agp aperture page to physical memory page 2266 * translation. 2267 * Arguments: 2268 * type valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP, 2269 * ARC_AMD64AGP 2270 * pfnarray allocated physical page frame number array 2271 * pg_offset agp aperture start page to be bound 2272 * entries the number of pages to be bound 2273 * dma_hdl gart table dma memory handle 2274 * tablep gart table kernel virtual address 2275 * Returns: 2276 * -1 failed 2277 * 0 success 2278 */ 2279 static int 2280 agp_add_to_gart( 2281 agp_arc_type_t type, 2282 pfn_t *pfnarray, 2283 uint32_t pg_offset, 2284 uint32_t entries, 2285 ddi_dma_handle_t dma_hdl, 2286 uint32_t *tablep) 2287 { 2288 int items = 0; 2289 uint32_t *entryp; 2290 uint32_t itemv; 2291 2292 entryp = tablep + pg_offset; 2293 while (items < entries) { 2294 if (pfn2gartentry(type, pfnarray[items], &itemv)) 2295 break; 2296 *(entryp + items) = itemv; 2297 items++; 2298 } 2299 if (items < entries) 2300 return (-1); 2301 2302 (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t), 2303 entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV); 2304 2305 return (0); 2306 } 2307 2308 /* 2309 * agp_bind_key() 2310 * 2311 * Description: 2312 * This function will call low level gart table access functions to 2313 * set up gart table translation. Also it will do some sanity 2314 * checking on key table entry. 2315 * 2316 * Arguments: 2317 * softstate driver soft state pointer 2318 * keyent key table entity pointer to be bound 2319 * pg_offset aperture start page to be bound 2320 * Returns: 2321 * EINVAL not a valid operation 2322 */ 2323 static int 2324 agp_bind_key(agpgart_softstate_t *softstate, 2325 keytable_ent_t *keyent, uint32_t pg_offset) 2326 { 2327 uint64_t pg_end; 2328 int ret = 0; 2329 2330 ASSERT(keyent); 2331 ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS)); 2332 ASSERT(mutex_owned(&softstate->asoft_instmutex)); 2333 2334 pg_end = pg_offset + keyent->kte_pages; 2335 2336 if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) { 2337 AGPDB_PRINT2((CE_WARN, 2338 "agp_bind_key: key=0x%x,exceed aper range", 2339 keyent->kte_key)); 2340 2341 return (EINVAL); 2342 } 2343 2344 if (agp_check_off(softstate->asoft_table, 2345 pg_offset, keyent->kte_pages)) { 2346 AGPDB_PRINT2((CE_WARN, 2347 "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped", 2348 pg_offset, keyent->kte_pages)); 2349 return (EINVAL); 2350 } 2351 2352 ASSERT(keyent->kte_pfnarray != NULL); 2353 2354 switch (softstate->asoft_devreg.agprd_arctype) { 2355 case ARC_IGD810: 2356 case ARC_IGD830: 2357 ret = lyr_i8xx_add_to_gtt(pg_offset, keyent, 2358 &softstate->asoft_devreg); 2359 if (ret) 2360 return (EIO); 2361 break; 2362 case ARC_INTELAGP: 2363 case ARC_AMD64AGP: 2364 ret = agp_add_to_gart( 2365 softstate->asoft_devreg.agprd_arctype, 2366 keyent->kte_pfnarray, 2367 pg_offset, 2368 keyent->kte_pages, 2369 softstate->gart_dma_handle, 2370 (uint32_t *)softstate->gart_vbase); 2371 if (ret) 2372 return (EINVAL); 2373 /* Flush GTLB table */ 2374 lyr_flush_gart_cache(&softstate->asoft_devreg); 2375 break; 2376 default: 2377 AGPDB_PRINT2((CE_WARN, 2378 "agp_bind_key: arc type = 0x%x unsupported", 2379 softstate->asoft_devreg.agprd_arctype)); 2380 return (EINVAL); 2381 } 2382 return (0); 2383 } 2384 2385 static int 2386 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2387 { 2388 int instance; 2389 agpgart_softstate_t *softstate; 2390 2391 if (cmd != DDI_ATTACH) { 2392 AGPDB_PRINT2((CE_WARN, 2393 "agpgart_attach: only attach op supported")); 2394 return (DDI_FAILURE); 2395 } 2396 instance = ddi_get_instance(dip); 2397 2398 if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance) 2399 != DDI_SUCCESS) { 2400 AGPDB_PRINT2((CE_WARN, 2401 "agpgart_attach: soft state zalloc failed")); 2402 goto err1; 2403 2404 } 2405 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2406 mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL); 2407 softstate->asoft_dip = dip; 2408 /* 2409 * Allocate LDI identifier for agpgart driver 2410 * Agpgart driver is the kernel consumer 2411 */ 2412 if (ldi_ident_from_dip(dip, &softstate->asoft_li)) { 2413 AGPDB_PRINT2((CE_WARN, 2414 "agpgart_attach: LDI indentifier allcation failed")); 2415 goto err2; 2416 } 2417 2418 softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN; 2419 /* Install agp kstat */ 2420 if (agp_init_kstats(softstate)) { 2421 AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error")); 2422 goto err3; 2423 } 2424 /* 2425 * devfs will create /dev/agpgart 2426 * and /devices/agpgart:agpgart 2427 */ 2428 2429 if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR, 2430 AGP_INST2MINOR(instance), 2431 DDI_NT_AGP_PSEUDO, 0)) { 2432 AGPDB_PRINT2((CE_WARN, 2433 "agpgart_attach: Can not create minor node")); 2434 goto err4; 2435 } 2436 2437 softstate->asoft_table = kmem_zalloc( 2438 AGP_MAXKEYS * (sizeof (keytable_ent_t)), 2439 KM_SLEEP); 2440 2441 return (DDI_SUCCESS); 2442 err4: 2443 agp_fini_kstats(softstate); 2444 err3: 2445 ldi_ident_release(softstate->asoft_li); 2446 err2: 2447 ddi_soft_state_free(agpgart_glob_soft_handle, instance); 2448 err1: 2449 return (DDI_FAILURE); 2450 } 2451 2452 static int 2453 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2454 { 2455 int instance; 2456 agpgart_softstate_t *st; 2457 2458 instance = ddi_get_instance(dip); 2459 2460 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2461 2462 if (cmd != DDI_DETACH) 2463 return (DDI_FAILURE); 2464 2465 /* 2466 * Caller should free all the memory allocated explicitly. 2467 * We release the memory allocated by caller which is not 2468 * properly freed. mutex_enter here make sure assertion on 2469 * softstate mutex success in agp_dealloc_mem. 2470 */ 2471 mutex_enter(&st->asoft_instmutex); 2472 if (agp_del_allkeys(st)) { 2473 AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err")); 2474 AGPDB_PRINT2((CE_WARN, 2475 "you might free agp memory exported to your applications")); 2476 2477 mutex_exit(&st->asoft_instmutex); 2478 return (DDI_FAILURE); 2479 } 2480 mutex_exit(&st->asoft_instmutex); 2481 if (st->asoft_table) { 2482 kmem_free(st->asoft_table, 2483 AGP_MAXKEYS * (sizeof (keytable_ent_t))); 2484 st->asoft_table = 0; 2485 } 2486 2487 ddi_remove_minor_node(dip, AGPGART_DEVNODE); 2488 agp_fini_kstats(st); 2489 ldi_ident_release(st->asoft_li); 2490 mutex_destroy(&st->asoft_instmutex); 2491 ddi_soft_state_free(agpgart_glob_soft_handle, instance); 2492 2493 return (DDI_SUCCESS); 2494 } 2495 2496 /*ARGSUSED*/ 2497 static int 2498 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 2499 void **resultp) 2500 { 2501 agpgart_softstate_t *st; 2502 int instance, rval = DDI_FAILURE; 2503 dev_t dev; 2504 2505 switch (cmd) { 2506 case DDI_INFO_DEVT2DEVINFO: 2507 dev = (dev_t)arg; 2508 instance = AGP_DEV2INST(dev); 2509 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2510 if (st != NULL) { 2511 mutex_enter(&st->asoft_instmutex); 2512 *resultp = st->asoft_dip; 2513 mutex_exit(&st->asoft_instmutex); 2514 rval = DDI_SUCCESS; 2515 } else 2516 *resultp = NULL; 2517 2518 break; 2519 case DDI_INFO_DEVT2INSTANCE: 2520 dev = (dev_t)arg; 2521 instance = AGP_DEV2INST(dev); 2522 *resultp = (void *)(uintptr_t)instance; 2523 rval = DDI_SUCCESS; 2524 2525 break; 2526 default: 2527 break; 2528 } 2529 2530 return (rval); 2531 } 2532 2533 /* 2534 * agpgart_open() 2535 * 2536 * Description: 2537 * This function is the driver open entry point. If it is the 2538 * first time the agpgart driver is opened, the driver will 2539 * open other agp related layered drivers and set up the agpgart 2540 * table properly. 2541 * 2542 * Arguments: 2543 * dev device number pointer 2544 * openflags open flags 2545 * otyp OTYP_BLK, OTYP_CHR 2546 * credp user's credential's struct pointer 2547 * 2548 * Returns: 2549 * ENXIO operation error 2550 * EAGAIN resoure temporarily unvailable 2551 * 0 success 2552 */ 2553 /*ARGSUSED*/ 2554 static int 2555 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp) 2556 { 2557 int instance = AGP_DEV2INST(*dev); 2558 agpgart_softstate_t *softstate; 2559 int rc = 0; 2560 2561 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2562 if (softstate == NULL) { 2563 AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err")); 2564 return (ENXIO); 2565 } 2566 mutex_enter(&softstate->asoft_instmutex); 2567 2568 if (softstate->asoft_opened) { 2569 softstate->asoft_opened++; 2570 mutex_exit(&softstate->asoft_instmutex); 2571 return (0); 2572 } 2573 2574 /* 2575 * The driver is opened first time, so we initialize layered 2576 * driver interface and softstate member here. 2577 */ 2578 softstate->asoft_pgused = 0; 2579 if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) { 2580 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed")); 2581 mutex_exit(&softstate->asoft_instmutex); 2582 return (EAGAIN); 2583 } 2584 2585 /* Call into layered driver */ 2586 if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) { 2587 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error")); 2588 lyr_end(&softstate->asoft_devreg); 2589 mutex_exit(&softstate->asoft_instmutex); 2590 return (EIO); 2591 } 2592 2593 /* 2594 * BIOS already set up gtt table for ARC_IGD830 2595 */ 2596 if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) { 2597 softstate->asoft_opened++; 2598 2599 softstate->asoft_pgtotal = 2600 get_max_pages(softstate->asoft_info.agpki_apersize); 2601 2602 if (lyr_config_devices(&softstate->asoft_devreg)) { 2603 AGPDB_PRINT2((CE_WARN, 2604 "agpgart_open: lyr_config_devices error")); 2605 lyr_end(&softstate->asoft_devreg); 2606 mutex_exit(&softstate->asoft_instmutex); 2607 2608 return (EIO); 2609 } 2610 mutex_exit(&softstate->asoft_instmutex); 2611 return (0); 2612 } 2613 2614 rc = alloc_gart_table(softstate); 2615 2616 /* 2617 * Allocate physically contiguous pages for AGP arc or 2618 * i810 arc. If failed, divide aper_size by 2 to 2619 * reduce gart table size until 4 megabytes. This 2620 * is just a workaround for systems with very few 2621 * physically contiguous memory. 2622 */ 2623 if (rc) { 2624 while ((softstate->asoft_info.agpki_apersize >= 4) && 2625 (alloc_gart_table(softstate))) { 2626 softstate->asoft_info.agpki_apersize >>= 1; 2627 } 2628 if (softstate->asoft_info.agpki_apersize >= 4) 2629 rc = 0; 2630 } 2631 2632 if (rc != 0) { 2633 AGPDB_PRINT2((CE_WARN, 2634 "agpgart_open: alloc gart table failed")); 2635 lyr_end(&softstate->asoft_devreg); 2636 mutex_exit(&softstate->asoft_instmutex); 2637 return (EAGAIN); 2638 } 2639 2640 softstate->asoft_pgtotal = 2641 get_max_pages(softstate->asoft_info.agpki_apersize); 2642 /* 2643 * BIOS doesn't initialize GTT for i810, 2644 * So i810 GTT must be created by driver. 2645 * 2646 * Set up gart table and enable it. 2647 */ 2648 if (lyr_set_gart_addr(softstate->gart_pbase, 2649 &softstate->asoft_devreg)) { 2650 AGPDB_PRINT2((CE_WARN, 2651 "agpgart_open: set gart table addr failed")); 2652 free_gart_table(softstate); 2653 lyr_end(&softstate->asoft_devreg); 2654 mutex_exit(&softstate->asoft_instmutex); 2655 return (EIO); 2656 } 2657 if (lyr_config_devices(&softstate->asoft_devreg)) { 2658 AGPDB_PRINT2((CE_WARN, 2659 "agpgart_open: lyr_config_devices failed")); 2660 free_gart_table(softstate); 2661 lyr_end(&softstate->asoft_devreg); 2662 mutex_exit(&softstate->asoft_instmutex); 2663 return (EIO); 2664 } 2665 2666 softstate->asoft_opened++; 2667 mutex_exit(&softstate->asoft_instmutex); 2668 2669 return (0); 2670 } 2671 2672 /* 2673 * agpgart_close() 2674 * 2675 * Description: 2676 * agpgart_close will release resources allocated in the first open 2677 * and close other open layered drivers. Also it frees the memory 2678 * allocated by ioctls. 2679 * 2680 * Arguments: 2681 * dev device number 2682 * flag file status flag 2683 * otyp OTYP_BLK, OTYP_CHR 2684 * credp user's credential's struct pointer 2685 * 2686 * Returns: 2687 * ENXIO not an error, to support "deferred attach" 2688 * 0 success 2689 */ 2690 /*ARGSUSED*/ 2691 static int 2692 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp) 2693 { 2694 int instance = AGP_DEV2INST(dev); 2695 agpgart_softstate_t *softstate; 2696 2697 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 2698 if (softstate == NULL) { 2699 AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err")); 2700 return (ENXIO); 2701 } 2702 2703 mutex_enter(&softstate->asoft_instmutex); 2704 ASSERT(softstate->asoft_opened); 2705 2706 2707 /* 2708 * If the last process close this device is not the controlling 2709 * process, also release the control over agpgart driver here if the 2710 * the controlling process fails to release the control before it 2711 * close the driver. 2712 */ 2713 if (softstate->asoft_acquired == 1) { 2714 AGPDB_PRINT2((CE_WARN, 2715 "agpgart_close: auto release control over driver")); 2716 release_control(softstate); 2717 } 2718 2719 if (lyr_unconfig_devices(&softstate->asoft_devreg)) { 2720 AGPDB_PRINT2((CE_WARN, 2721 "agpgart_close: lyr_unconfig_device error")); 2722 mutex_exit(&softstate->asoft_instmutex); 2723 return (EIO); 2724 } 2725 softstate->asoft_agpen = 0; 2726 2727 if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) { 2728 free_gart_table(softstate); 2729 } 2730 2731 lyr_end(&softstate->asoft_devreg); 2732 2733 /* 2734 * This statement must be positioned before agp_del_allkeys 2735 * agp_dealloc_mem indirectly called by agp_del_allkeys 2736 * will test this variable. 2737 */ 2738 softstate->asoft_opened = 0; 2739 2740 /* 2741 * Free the memory allocated by user applications which 2742 * was never deallocated. 2743 */ 2744 (void) agp_del_allkeys(softstate); 2745 2746 mutex_exit(&softstate->asoft_instmutex); 2747 2748 return (0); 2749 } 2750 2751 static int 2752 ioctl_agpgart_info(agpgart_softstate_t *softstate, void *arg, int flags) 2753 { 2754 agp_info_t infostruct; 2755 #ifdef _MULTI_DATAMODEL 2756 agp_info32_t infostruct32; 2757 #endif 2758 2759 bzero(&infostruct, sizeof (agp_info_t)); 2760 2761 #ifdef _MULTI_DATAMODEL 2762 bzero(&infostruct32, sizeof (agp_info32_t)); 2763 if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) { 2764 if (copyinfo(softstate, &infostruct)) 2765 return (EINVAL); 2766 2767 agpinfo_default_to_32(infostruct, infostruct32); 2768 if (ddi_copyout(&infostruct32, arg, 2769 sizeof (agp_info32_t), flags) != 0) 2770 return (EFAULT); 2771 2772 return (0); 2773 } 2774 #endif /* _MULTI_DATAMODEL */ 2775 if (copyinfo(softstate, &infostruct)) 2776 return (EINVAL); 2777 2778 if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) { 2779 return (EFAULT); 2780 } 2781 2782 return (0); 2783 } 2784 2785 static int 2786 ioctl_agpgart_acquire(agpgart_softstate_t *st) 2787 { 2788 if (st->asoft_acquired) { 2789 AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired")); 2790 return (EBUSY); 2791 } 2792 acquire_control(st); 2793 return (0); 2794 } 2795 2796 static int 2797 ioctl_agpgart_release(agpgart_softstate_t *st) 2798 { 2799 if (is_controlling_proc(st) < 0) { 2800 AGPDB_PRINT2((CE_WARN, 2801 "ioctl_agpgart_release: not a controlling process")); 2802 return (EPERM); 2803 } 2804 release_control(st); 2805 return (0); 2806 } 2807 2808 static int 2809 ioctl_agpgart_setup(agpgart_softstate_t *st, void *arg, int flags) 2810 { 2811 agp_setup_t data; 2812 int rc = 0; 2813 2814 if (is_controlling_proc(st) < 0) { 2815 AGPDB_PRINT2((CE_WARN, 2816 "ioctl_agpgart_setup: not a controlling process")); 2817 return (EPERM); 2818 } 2819 2820 if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) { 2821 AGPDB_PRINT2((CE_WARN, 2822 "ioctl_agpgart_setup: no true agp bridge")); 2823 return (EINVAL); 2824 } 2825 2826 if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0) 2827 return (EFAULT); 2828 2829 if (rc = agp_setup(st, data.agps_mode)) 2830 return (rc); 2831 /* Store agp mode status for kstat */ 2832 st->asoft_agpen = 1; 2833 return (0); 2834 } 2835 2836 static int 2837 ioctl_agpgart_alloc(agpgart_softstate_t *st, void *arg, int flags) 2838 { 2839 agp_allocate_t alloc_info; 2840 keytable_ent_t *entryp; 2841 size_t length; 2842 uint64_t pg_num; 2843 2844 if (is_controlling_proc(st) < 0) { 2845 AGPDB_PRINT2((CE_WARN, 2846 "ioctl_agpgart_alloc: not a controlling process")); 2847 return (EPERM); 2848 } 2849 2850 if (ddi_copyin(arg, &alloc_info, 2851 sizeof (agp_allocate_t), flags) != 0) { 2852 return (EFAULT); 2853 } 2854 pg_num = st->asoft_pgused + alloc_info.agpa_pgcount; 2855 if (pg_num > st->asoft_pgtotal) { 2856 AGPDB_PRINT2((CE_WARN, 2857 "ioctl_agpgart_alloc: exceeding the memory pages limit")); 2858 AGPDB_PRINT2((CE_WARN, 2859 "ioctl_agpgart_alloc: request %x pages failed", 2860 alloc_info.agpa_pgcount)); 2861 AGPDB_PRINT2((CE_WARN, 2862 "ioctl_agpgart_alloc: pages used %x total is %x", 2863 st->asoft_pgused, st->asoft_pgtotal)); 2864 2865 return (EINVAL); 2866 } 2867 2868 length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount); 2869 entryp = agp_alloc_mem(st, length, alloc_info.agpa_type); 2870 if (!entryp) { 2871 AGPDB_PRINT2((CE_WARN, 2872 "ioctl_agpgart_alloc: allocate 0x%lx bytes failed", 2873 length)); 2874 return (ENOMEM); 2875 } 2876 ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS)); 2877 alloc_info.agpa_key = entryp->kte_key; 2878 if (alloc_info.agpa_type == AGP_PHYSICAL) { 2879 alloc_info.agpa_physical = 2880 (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT); 2881 } 2882 /* Update the memory pagse used */ 2883 st->asoft_pgused += alloc_info.agpa_pgcount; 2884 2885 if (ddi_copyout(&alloc_info, arg, 2886 sizeof (agp_allocate_t), flags) != 0) { 2887 2888 return (EFAULT); 2889 } 2890 2891 return (0); 2892 } 2893 2894 static int 2895 ioctl_agpgart_dealloc(agpgart_softstate_t *st, intptr_t arg) 2896 { 2897 int key; 2898 keytable_ent_t *keyent; 2899 2900 if (is_controlling_proc(st) < 0) { 2901 AGPDB_PRINT2((CE_WARN, 2902 "ioctl_agpgart_dealloc: not a controlling process")); 2903 return (EPERM); 2904 } 2905 key = (int)arg; 2906 if ((key >= AGP_MAXKEYS) || key < 0) { 2907 return (EINVAL); 2908 } 2909 keyent = &st->asoft_table[key]; 2910 if (!keyent->kte_memhdl) { 2911 return (EINVAL); 2912 } 2913 2914 if (agp_dealloc_mem(st, keyent)) 2915 return (EINVAL); 2916 2917 /* Update the memory pages used */ 2918 st->asoft_pgused -= keyent->kte_pages; 2919 bzero(keyent, sizeof (keytable_ent_t)); 2920 2921 return (0); 2922 } 2923 2924 static int 2925 ioctl_agpgart_bind(agpgart_softstate_t *st, void *arg, int flags) 2926 { 2927 agp_bind_t bind_info; 2928 keytable_ent_t *keyent; 2929 int key; 2930 uint32_t pg_offset; 2931 int retval = 0; 2932 2933 if (is_controlling_proc(st) < 0) { 2934 AGPDB_PRINT2((CE_WARN, 2935 "ioctl_agpgart_bind: not a controlling process")); 2936 return (EPERM); 2937 } 2938 2939 if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) { 2940 return (EFAULT); 2941 } 2942 2943 key = bind_info.agpb_key; 2944 if ((key >= AGP_MAXKEYS) || key < 0) { 2945 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key")); 2946 return (EINVAL); 2947 } 2948 2949 if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) { 2950 if (AGP_PAGES2KB(bind_info.agpb_pgstart) < 2951 st->asoft_info.agpki_presize) { 2952 AGPDB_PRINT2((CE_WARN, 2953 "ioctl_agpgart_bind: bind to prealloc area " 2954 "pgstart = %dKB < presize = %ldKB", 2955 AGP_PAGES2KB(bind_info.agpb_pgstart), 2956 st->asoft_info.agpki_presize)); 2957 return (EINVAL); 2958 } 2959 } 2960 2961 pg_offset = bind_info.agpb_pgstart; 2962 keyent = &st->asoft_table[key]; 2963 if (!keyent->kte_memhdl) { 2964 AGPDB_PRINT2((CE_WARN, 2965 "ioctl_agpgart_bind: Key = 0x%x can't get keyenty", 2966 key)); 2967 return (EINVAL); 2968 } 2969 2970 if (keyent->kte_bound != 0) { 2971 AGPDB_PRINT2((CE_WARN, 2972 "ioctl_agpgart_bind: Key = 0x%x already bound", 2973 key)); 2974 return (EINVAL); 2975 } 2976 retval = agp_bind_key(st, keyent, pg_offset); 2977 2978 if (retval == 0) { 2979 keyent->kte_pgoff = pg_offset; 2980 keyent->kte_bound = 1; 2981 } 2982 2983 return (retval); 2984 } 2985 2986 static int 2987 ioctl_agpgart_unbind(agpgart_softstate_t *st, void *arg, int flags) 2988 { 2989 int key, retval = 0; 2990 agp_unbind_t unbindinfo; 2991 keytable_ent_t *keyent; 2992 2993 if (is_controlling_proc(st) < 0) { 2994 AGPDB_PRINT2((CE_WARN, 2995 "ioctl_agpgart_bind: not a controlling process")); 2996 return (EPERM); 2997 } 2998 2999 if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) { 3000 return (EFAULT); 3001 } 3002 key = unbindinfo.agpu_key; 3003 if ((key >= AGP_MAXKEYS) || key < 0) { 3004 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key")); 3005 return (EINVAL); 3006 } 3007 keyent = &st->asoft_table[key]; 3008 if (!keyent->kte_bound) { 3009 return (EINVAL); 3010 } 3011 3012 if ((retval = agp_unbind_key(st, keyent)) != 0) 3013 return (retval); 3014 3015 return (0); 3016 } 3017 3018 /*ARGSUSED*/ 3019 static int 3020 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags, 3021 cred_t *credp, int *rvalp) 3022 { 3023 int instance; 3024 int retval = 0; 3025 void *arg = (void*)intarg; 3026 3027 agpgart_softstate_t *softstate; 3028 3029 instance = AGP_DEV2INST(dev); 3030 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 3031 if (softstate == NULL) { 3032 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err")); 3033 return (ENXIO); 3034 } 3035 3036 if ((cmd != AGPIOC_INFO) && secpolicy_gart_access(credp)) { 3037 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: permission denied")); 3038 return (EPERM); 3039 } 3040 3041 mutex_enter(&softstate->asoft_instmutex); 3042 3043 switch (cmd) { 3044 case AGPIOC_INFO: 3045 retval = ioctl_agpgart_info(softstate, arg, flags); 3046 break; 3047 case AGPIOC_ACQUIRE: 3048 retval = ioctl_agpgart_acquire(softstate); 3049 break; 3050 case AGPIOC_RELEASE: 3051 retval = ioctl_agpgart_release(softstate); 3052 break; 3053 case AGPIOC_SETUP: 3054 retval = ioctl_agpgart_setup(softstate, arg, flags); 3055 break; 3056 case AGPIOC_ALLOCATE: 3057 retval = ioctl_agpgart_alloc(softstate, arg, flags); 3058 break; 3059 case AGPIOC_DEALLOCATE: 3060 retval = ioctl_agpgart_dealloc(softstate, intarg); 3061 break; 3062 case AGPIOC_BIND: 3063 retval = ioctl_agpgart_bind(softstate, arg, flags); 3064 break; 3065 case AGPIOC_UNBIND: 3066 retval = ioctl_agpgart_unbind(softstate, arg, flags); 3067 break; 3068 default: 3069 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument")); 3070 retval = ENXIO; 3071 break; 3072 } 3073 3074 mutex_exit(&softstate->asoft_instmutex); 3075 return (retval); 3076 } 3077 3078 static int 3079 agpgart_segmap(dev_t dev, off_t off, struct as *asp, 3080 caddr_t *addrp, off_t len, unsigned int prot, 3081 unsigned int maxprot, unsigned int flags, cred_t *credp) 3082 { 3083 3084 struct agpgart_softstate *softstate; 3085 int instance; 3086 int rc = 0; 3087 3088 instance = AGP_DEV2INST(dev); 3089 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 3090 if (softstate == NULL) { 3091 AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err")); 3092 return (ENXIO); 3093 } 3094 if (!AGP_ALIGNED(len)) 3095 return (EINVAL); 3096 3097 mutex_enter(&softstate->asoft_instmutex); 3098 3099 /* 3100 * Process must have gart map privilege or gart access privilege 3101 * to map agp memory. 3102 */ 3103 if (secpolicy_gart_map(credp)) { 3104 mutex_exit(&softstate->asoft_instmutex); 3105 AGPDB_PRINT2((CE_WARN, "agpgart_segmap: permission denied")); 3106 return (EPERM); 3107 } 3108 3109 rc = devmap_setup(dev, (offset_t)off, asp, addrp, 3110 (size_t)len, prot, maxprot, flags, credp); 3111 3112 mutex_exit(&softstate->asoft_instmutex); 3113 return (rc); 3114 } 3115 3116 /*ARGSUSED*/ 3117 static int 3118 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len, 3119 size_t *mappedlen, uint_t model) 3120 { 3121 struct agpgart_softstate *softstate; 3122 int instance, status; 3123 struct keytable_ent *mementry; 3124 offset_t local_offset; 3125 3126 instance = AGP_DEV2INST(dev); 3127 softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance); 3128 if (softstate == NULL) { 3129 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err")); 3130 return (ENXIO); 3131 } 3132 3133 3134 if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) { 3135 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large")); 3136 return (EINVAL); 3137 } 3138 3139 /* 3140 * Can not find any memory now, so fail. 3141 */ 3142 3143 mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset)); 3144 3145 if (mementry == NULL) { 3146 AGPDB_PRINT2((CE_WARN, 3147 "agpgart_devmap: can not find the proper keyent")); 3148 return (EINVAL); 3149 } 3150 3151 local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff); 3152 3153 if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) { 3154 len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset; 3155 } 3156 3157 switch (mementry->kte_type) { 3158 case AGP_NORMAL: 3159 if (PMEMP(mementry->kte_memhdl)->pmem_cookie) { 3160 status = devmap_pmem_setup(cookie, 3161 softstate->asoft_dip, 3162 &agp_devmap_cb, 3163 PMEMP(mementry->kte_memhdl)->pmem_cookie, 3164 local_offset, 3165 len, PROT_ALL, 3166 (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE), 3167 &mem_dev_acc_attr); 3168 } else { 3169 AGPDB_PRINT2((CE_WARN, 3170 "agpgart_devmap: not a valid memory type")); 3171 return (EINVAL); 3172 3173 } 3174 3175 break; 3176 default: 3177 AGPDB_PRINT2((CE_WARN, 3178 "agpgart_devmap: not a valid memory type")); 3179 return (EINVAL); 3180 } 3181 3182 3183 if (status == 0) { 3184 *mappedlen = len; 3185 } else { 3186 *mappedlen = 0; 3187 AGPDB_PRINT2((CE_WARN, 3188 "agpgart_devmap: devmap interface failed")); 3189 return (EINVAL); 3190 } 3191 3192 return (0); 3193 } 3194 3195 static struct cb_ops agpgart_cb_ops = { 3196 agpgart_open, /* open() */ 3197 agpgart_close, /* close() */ 3198 nodev, /* strategy() */ 3199 nodev, /* print routine */ 3200 nodev, /* no dump routine */ 3201 nodev, /* read() */ 3202 nodev, /* write() */ 3203 agpgart_ioctl, /* agpgart_ioctl */ 3204 agpgart_devmap, /* devmap routine */ 3205 nodev, /* no longer use mmap routine */ 3206 agpgart_segmap, /* system segmap routine */ 3207 nochpoll, /* no chpoll routine */ 3208 ddi_prop_op, /* system prop operations */ 3209 0, /* not a STREAMS driver */ 3210 D_DEVMAP | D_MP, /* safe for multi-thread/multi-processor */ 3211 CB_REV, /* cb_ops version? */ 3212 nodev, /* cb_aread() */ 3213 nodev, /* cb_awrite() */ 3214 }; 3215 3216 static struct dev_ops agpgart_ops = { 3217 DEVO_REV, /* devo_rev */ 3218 0, /* devo_refcnt */ 3219 agpgart_getinfo, /* devo_getinfo */ 3220 nulldev, /* devo_identify */ 3221 nulldev, /* devo_probe */ 3222 agpgart_attach, /* devo_attach */ 3223 agpgart_detach, /* devo_detach */ 3224 nodev, /* devo_reset */ 3225 &agpgart_cb_ops, /* devo_cb_ops */ 3226 (struct bus_ops *)0, /* devo_bus_ops */ 3227 NULL, /* devo_power */ 3228 ddi_quiesce_not_needed, /* devo_quiesce */ 3229 }; 3230 3231 static struct modldrv modldrv = { 3232 &mod_driverops, 3233 "AGP driver", 3234 &agpgart_ops, 3235 }; 3236 3237 static struct modlinkage modlinkage = { 3238 MODREV_1, /* MODREV_1 is indicated by manual */ 3239 {&modldrv, NULL, NULL, NULL} 3240 }; 3241 3242 static void *agpgart_glob_soft_handle; 3243 3244 int 3245 _init(void) 3246 { 3247 int ret = DDI_SUCCESS; 3248 3249 ret = ddi_soft_state_init(&agpgart_glob_soft_handle, 3250 sizeof (agpgart_softstate_t), 3251 AGPGART_MAX_INSTANCES); 3252 3253 if (ret != 0) { 3254 AGPDB_PRINT2((CE_WARN, 3255 "_init: soft state init error code=0x%x", ret)); 3256 return (ret); 3257 } 3258 3259 if ((ret = mod_install(&modlinkage)) != 0) { 3260 AGPDB_PRINT2((CE_WARN, 3261 "_init: mod install error code=0x%x", ret)); 3262 ddi_soft_state_fini(&agpgart_glob_soft_handle); 3263 return (ret); 3264 } 3265 3266 return (DDI_SUCCESS); 3267 } 3268 3269 int 3270 _info(struct modinfo *modinfop) 3271 { 3272 return (mod_info(&modlinkage, modinfop)); 3273 } 3274 3275 int 3276 _fini(void) 3277 { 3278 int ret; 3279 3280 if ((ret = mod_remove(&modlinkage)) == 0) { 3281 ddi_soft_state_fini(&agpgart_glob_soft_handle); 3282 } 3283 3284 return (ret); 3285 } 3286