1 /*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_bus.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/ioccom.h> 39 #include <sys/agpio.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 44 #include <dev/pci/pcivar.h> 45 #include <dev/pci/pcireg.h> 46 #include <pci/agppriv.h> 47 #include <pci/agpvar.h> 48 #include <pci/agpreg.h> 49 50 #include <vm/vm.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pageout.h> 54 #include <vm/pmap.h> 55 56 #include <machine/md_var.h> 57 #include <machine/bus.h> 58 #include <machine/resource.h> 59 #include <sys/rman.h> 60 61 MODULE_VERSION(agp, 1); 62 63 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures"); 64 65 /* agp_drv.c */ 66 static d_open_t agp_open; 67 static d_close_t agp_close; 68 static d_ioctl_t agp_ioctl; 69 static d_mmap_t agp_mmap; 70 71 static struct cdevsw agp_cdevsw = { 72 .d_version = D_VERSION, 73 .d_flags = D_NEEDGIANT, 74 .d_open = agp_open, 75 .d_close = agp_close, 76 .d_ioctl = agp_ioctl, 77 .d_mmap = agp_mmap, 78 .d_name = "agp", 79 }; 80 81 static devclass_t agp_devclass; 82 #define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev)) 83 84 /* Helper functions for implementing chipset mini drivers. */ 85 86 void 87 agp_flush_cache() 88 { 89 #ifdef __i386__ 90 wbinvd(); 91 #endif 92 #ifdef __alpha__ 93 /* FIXME: This is most likely not correct as it doesn't flush CPU 94 * write caches, but we don't have a facility to do that and 95 * this is all linux does, too */ 96 alpha_mb(); 97 #endif 98 } 99 100 u_int8_t 101 agp_find_caps(device_t dev) 102 { 103 u_int32_t status; 104 u_int8_t ptr, next; 105 106 /* 107 * Check the CAP_LIST bit of the PCI status register first. 108 */ 109 status = pci_read_config(dev, PCIR_STATUS, 2); 110 if (!(status & 0x10)) 111 return 0; 112 113 /* 114 * Traverse the capabilities list. 115 */ 116 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1); 117 ptr != 0; 118 ptr = next) { 119 u_int32_t capid = pci_read_config(dev, ptr, 4); 120 next = AGP_CAPID_GET_NEXT_PTR(capid); 121 122 /* 123 * If this capability entry ID is 2, then we are done. 124 */ 125 if (AGP_CAPID_GET_CAP_ID(capid) == 2) 126 return ptr; 127 } 128 129 return 0; 130 } 131 132 /* 133 * Find an AGP display device (if any). 134 */ 135 static device_t 136 agp_find_display(void) 137 { 138 devclass_t pci = devclass_find("pci"); 139 device_t bus, dev = 0; 140 device_t *kids; 141 int busnum, numkids, i; 142 143 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 144 bus = devclass_get_device(pci, busnum); 145 if (!bus) 146 continue; 147 device_get_children(bus, &kids, &numkids); 148 for (i = 0; i < numkids; i++) { 149 dev = kids[i]; 150 if (pci_get_class(dev) == PCIC_DISPLAY 151 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA) 152 if (agp_find_caps(dev)) { 153 free(kids, M_TEMP); 154 return dev; 155 } 156 157 } 158 free(kids, M_TEMP); 159 } 160 161 return 0; 162 } 163 164 struct agp_gatt * 165 agp_alloc_gatt(device_t dev) 166 { 167 u_int32_t apsize = AGP_GET_APERTURE(dev); 168 u_int32_t entries = apsize >> AGP_PAGE_SHIFT; 169 struct agp_gatt *gatt; 170 171 if (bootverbose) 172 device_printf(dev, 173 "allocating GATT for aperture of size %dM\n", 174 apsize / (1024*1024)); 175 176 if (entries == 0) { 177 device_printf(dev, "bad aperture size\n"); 178 return NULL; 179 } 180 181 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT); 182 if (!gatt) 183 return 0; 184 185 gatt->ag_entries = entries; 186 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0, 187 0, ~0, PAGE_SIZE, 0); 188 if (!gatt->ag_virtual) { 189 if (bootverbose) 190 device_printf(dev, "contiguous allocation failed\n"); 191 free(gatt, M_AGP); 192 return 0; 193 } 194 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t)); 195 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual); 196 agp_flush_cache(); 197 198 return gatt; 199 } 200 201 void 202 agp_free_gatt(struct agp_gatt *gatt) 203 { 204 contigfree(gatt->ag_virtual, 205 gatt->ag_entries * sizeof(u_int32_t), M_AGP); 206 free(gatt, M_AGP); 207 } 208 209 static int agp_max[][2] = { 210 {0, 0}, 211 {32, 4}, 212 {64, 28}, 213 {128, 96}, 214 {256, 204}, 215 {512, 440}, 216 {1024, 942}, 217 {2048, 1920}, 218 {4096, 3932} 219 }; 220 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0])) 221 222 int 223 agp_generic_attach(device_t dev) 224 { 225 struct agp_softc *sc = device_get_softc(dev); 226 int rid, memsize, i; 227 228 /* 229 * Find and map the aperture. 230 */ 231 rid = AGP_APBASE; 232 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 233 RF_ACTIVE); 234 if (!sc->as_aperture) 235 return ENOMEM; 236 237 /* 238 * Work out an upper bound for agp memory allocation. This 239 * uses a heurisitc table from the Linux driver. 240 */ 241 memsize = ptoa(Maxmem) >> 20; 242 for (i = 0; i < agp_max_size; i++) { 243 if (memsize <= agp_max[i][0]) 244 break; 245 } 246 if (i == agp_max_size) i = agp_max_size - 1; 247 sc->as_maxmem = agp_max[i][1] << 20U; 248 249 /* 250 * The lock is used to prevent re-entry to 251 * agp_generic_bind_memory() since that function can sleep. 252 */ 253 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF); 254 255 /* 256 * Initialise stuff for the userland device. 257 */ 258 agp_devclass = devclass_find("agp"); 259 TAILQ_INIT(&sc->as_memory); 260 sc->as_nextid = 1; 261 262 sc->as_devnode = make_dev(&agp_cdevsw, 263 device_get_unit(dev), 264 UID_ROOT, 265 GID_WHEEL, 266 0600, 267 "agpgart"); 268 269 return 0; 270 } 271 272 int 273 agp_generic_detach(device_t dev) 274 { 275 struct agp_softc *sc = device_get_softc(dev); 276 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture); 277 mtx_destroy(&sc->as_lock); 278 destroy_dev(sc->as_devnode); 279 agp_flush_cache(); 280 return 0; 281 } 282 283 /* 284 * This does the enable logic for v3, with the same topology 285 * restrictions as in place for v2 -- one bus, one device on the bus. 286 */ 287 static int 288 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode) 289 { 290 u_int32_t tstatus, mstatus; 291 u_int32_t command; 292 int rq, sba, fw, rate, arqsz, cal; 293 294 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 295 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); 296 297 /* Set RQ to the min of mode, tstatus and mstatus */ 298 rq = AGP_MODE_GET_RQ(mode); 299 if (AGP_MODE_GET_RQ(tstatus) < rq) 300 rq = AGP_MODE_GET_RQ(tstatus); 301 if (AGP_MODE_GET_RQ(mstatus) < rq) 302 rq = AGP_MODE_GET_RQ(mstatus); 303 304 /* 305 * ARQSZ - Set the value to the maximum one. 306 * Don't allow the mode register to override values. 307 */ 308 arqsz = AGP_MODE_GET_ARQSZ(mode); 309 if (AGP_MODE_GET_ARQSZ(tstatus) > rq) 310 rq = AGP_MODE_GET_ARQSZ(tstatus); 311 if (AGP_MODE_GET_ARQSZ(mstatus) > rq) 312 rq = AGP_MODE_GET_ARQSZ(mstatus); 313 314 /* Calibration cycle - don't allow override by mode register */ 315 cal = AGP_MODE_GET_CAL(tstatus); 316 if (AGP_MODE_GET_CAL(mstatus) < cal) 317 cal = AGP_MODE_GET_CAL(mstatus); 318 319 /* SBA must be supported for AGP v3. */ 320 sba = 1; 321 322 /* Set FW if all three support it. */ 323 fw = (AGP_MODE_GET_FW(tstatus) 324 & AGP_MODE_GET_FW(mstatus) 325 & AGP_MODE_GET_FW(mode)); 326 327 /* Figure out the max rate */ 328 rate = (AGP_MODE_GET_RATE(tstatus) 329 & AGP_MODE_GET_RATE(mstatus) 330 & AGP_MODE_GET_RATE(mode)); 331 if (rate & AGP_MODE_V3_RATE_8x) 332 rate = AGP_MODE_V3_RATE_8x; 333 else 334 rate = AGP_MODE_V3_RATE_4x; 335 if (bootverbose) 336 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4); 337 338 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4); 339 340 /* Construct the new mode word and tell the hardware */ 341 command = AGP_MODE_SET_RQ(0, rq); 342 command = AGP_MODE_SET_ARQSZ(command, arqsz); 343 command = AGP_MODE_SET_CAL(command, cal); 344 command = AGP_MODE_SET_SBA(command, sba); 345 command = AGP_MODE_SET_FW(command, fw); 346 command = AGP_MODE_SET_RATE(command, rate); 347 command = AGP_MODE_SET_AGP(command, 1); 348 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4); 349 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4); 350 351 return 0; 352 } 353 354 static int 355 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode) 356 { 357 u_int32_t tstatus, mstatus; 358 u_int32_t command; 359 int rq, sba, fw, rate; 360 361 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 362 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); 363 364 /* Set RQ to the min of mode, tstatus and mstatus */ 365 rq = AGP_MODE_GET_RQ(mode); 366 if (AGP_MODE_GET_RQ(tstatus) < rq) 367 rq = AGP_MODE_GET_RQ(tstatus); 368 if (AGP_MODE_GET_RQ(mstatus) < rq) 369 rq = AGP_MODE_GET_RQ(mstatus); 370 371 /* Set SBA if all three can deal with SBA */ 372 sba = (AGP_MODE_GET_SBA(tstatus) 373 & AGP_MODE_GET_SBA(mstatus) 374 & AGP_MODE_GET_SBA(mode)); 375 376 /* Similar for FW */ 377 fw = (AGP_MODE_GET_FW(tstatus) 378 & AGP_MODE_GET_FW(mstatus) 379 & AGP_MODE_GET_FW(mode)); 380 381 /* Figure out the max rate */ 382 rate = (AGP_MODE_GET_RATE(tstatus) 383 & AGP_MODE_GET_RATE(mstatus) 384 & AGP_MODE_GET_RATE(mode)); 385 if (rate & AGP_MODE_V2_RATE_4x) 386 rate = AGP_MODE_V2_RATE_4x; 387 else if (rate & AGP_MODE_V2_RATE_2x) 388 rate = AGP_MODE_V2_RATE_2x; 389 else 390 rate = AGP_MODE_V2_RATE_1x; 391 if (bootverbose) 392 device_printf(dev, "Setting AGP v2 mode %d\n", rate); 393 394 /* Construct the new mode word and tell the hardware */ 395 command = AGP_MODE_SET_RQ(0, rq); 396 command = AGP_MODE_SET_SBA(command, sba); 397 command = AGP_MODE_SET_FW(command, fw); 398 command = AGP_MODE_SET_RATE(command, rate); 399 command = AGP_MODE_SET_AGP(command, 1); 400 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4); 401 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4); 402 403 return 0; 404 } 405 406 int 407 agp_generic_enable(device_t dev, u_int32_t mode) 408 { 409 device_t mdev = agp_find_display(); 410 u_int32_t tstatus, mstatus; 411 412 if (!mdev) { 413 AGP_DPF("can't find display\n"); 414 return ENXIO; 415 } 416 417 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 418 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); 419 420 /* 421 * Check display and bridge for AGP v3 support. AGP v3 allows 422 * more variety in topology than v2, e.g. multiple AGP devices 423 * attached to one bridge, or multiple AGP bridges in one 424 * system. This doesn't attempt to address those situations, 425 * but should work fine for a classic single AGP slot system 426 * with AGP v3. 427 */ 428 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus)) 429 return (agp_v3_enable(dev, mdev, mode)); 430 else 431 return (agp_v2_enable(dev, mdev, mode)); 432 } 433 434 struct agp_memory * 435 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size) 436 { 437 struct agp_softc *sc = device_get_softc(dev); 438 struct agp_memory *mem; 439 440 if ((size & (AGP_PAGE_SIZE - 1)) != 0) 441 return 0; 442 443 if (sc->as_allocated + size > sc->as_maxmem) 444 return 0; 445 446 if (type != 0) { 447 printf("agp_generic_alloc_memory: unsupported type %d\n", 448 type); 449 return 0; 450 } 451 452 mem = malloc(sizeof *mem, M_AGP, M_WAITOK); 453 mem->am_id = sc->as_nextid++; 454 mem->am_size = size; 455 mem->am_type = 0; 456 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size))); 457 mem->am_physical = 0; 458 mem->am_offset = 0; 459 mem->am_is_bound = 0; 460 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); 461 sc->as_allocated += size; 462 463 return mem; 464 } 465 466 int 467 agp_generic_free_memory(device_t dev, struct agp_memory *mem) 468 { 469 struct agp_softc *sc = device_get_softc(dev); 470 471 if (mem->am_is_bound) 472 return EBUSY; 473 474 sc->as_allocated -= mem->am_size; 475 TAILQ_REMOVE(&sc->as_memory, mem, am_link); 476 vm_object_deallocate(mem->am_obj); 477 free(mem, M_AGP); 478 return 0; 479 } 480 481 int 482 agp_generic_bind_memory(device_t dev, struct agp_memory *mem, 483 vm_offset_t offset) 484 { 485 struct agp_softc *sc = device_get_softc(dev); 486 vm_offset_t i, j, k; 487 vm_page_t m; 488 int error; 489 490 /* Do some sanity checks first. */ 491 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 || 492 offset + mem->am_size > AGP_GET_APERTURE(dev)) { 493 device_printf(dev, "binding memory at bad offset %#x\n", 494 (int)offset); 495 return EINVAL; 496 } 497 498 /* 499 * Allocate the pages early, before acquiring the lock, 500 * because vm_page_grab() used with VM_ALLOC_RETRY may 501 * block and we can't hold a mutex while blocking. 502 */ 503 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 504 /* 505 * Find a page from the object and wire it 506 * down. This page will be mapped using one or more 507 * entries in the GATT (assuming that PAGE_SIZE >= 508 * AGP_PAGE_SIZE. If this is the first call to bind, 509 * the pages will be allocated and zeroed. 510 */ 511 VM_OBJECT_LOCK(mem->am_obj); 512 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i), 513 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY); 514 VM_OBJECT_UNLOCK(mem->am_obj); 515 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m)); 516 } 517 518 mtx_lock(&sc->as_lock); 519 520 if (mem->am_is_bound) { 521 device_printf(dev, "memory already bound\n"); 522 error = EINVAL; 523 goto bad; 524 } 525 526 /* 527 * Bind the individual pages and flush the chipset's 528 * TLB. 529 * 530 * XXX Presumably, this needs to be the pci address on alpha 531 * (i.e. use alpha_XXX_dmamap()). I don't have access to any 532 * alpha AGP hardware to check. 533 */ 534 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 535 VM_OBJECT_LOCK(mem->am_obj); 536 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); 537 VM_OBJECT_UNLOCK(mem->am_obj); 538 539 /* 540 * Install entries in the GATT, making sure that if 541 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not 542 * aligned to PAGE_SIZE, we don't modify too many GATT 543 * entries. 544 */ 545 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size; 546 j += AGP_PAGE_SIZE) { 547 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j; 548 AGP_DPF("binding offset %#x to pa %#x\n", 549 offset + i + j, pa); 550 error = AGP_BIND_PAGE(dev, offset + i + j, pa); 551 if (error) { 552 /* 553 * Bail out. Reverse all the mappings 554 * and unwire the pages. 555 */ 556 vm_page_lock_queues(); 557 vm_page_wakeup(m); 558 vm_page_unlock_queues(); 559 for (k = 0; k < i + j; k += AGP_PAGE_SIZE) 560 AGP_UNBIND_PAGE(dev, offset + k); 561 goto bad; 562 } 563 } 564 vm_page_lock_queues(); 565 vm_page_wakeup(m); 566 vm_page_unlock_queues(); 567 } 568 569 /* 570 * Flush the cpu cache since we are providing a new mapping 571 * for these pages. 572 */ 573 agp_flush_cache(); 574 575 /* 576 * Make sure the chipset gets the new mappings. 577 */ 578 AGP_FLUSH_TLB(dev); 579 580 mem->am_offset = offset; 581 mem->am_is_bound = 1; 582 583 mtx_unlock(&sc->as_lock); 584 585 return 0; 586 bad: 587 mtx_unlock(&sc->as_lock); 588 VM_OBJECT_LOCK(mem->am_obj); 589 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 590 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); 591 vm_page_lock_queues(); 592 vm_page_unwire(m, 0); 593 vm_page_unlock_queues(); 594 } 595 VM_OBJECT_UNLOCK(mem->am_obj); 596 597 return error; 598 } 599 600 int 601 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem) 602 { 603 struct agp_softc *sc = device_get_softc(dev); 604 vm_page_t m; 605 int i; 606 607 mtx_lock(&sc->as_lock); 608 609 if (!mem->am_is_bound) { 610 device_printf(dev, "memory is not bound\n"); 611 mtx_unlock(&sc->as_lock); 612 return EINVAL; 613 } 614 615 616 /* 617 * Unbind the individual pages and flush the chipset's 618 * TLB. Unwire the pages so they can be swapped. 619 */ 620 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) 621 AGP_UNBIND_PAGE(dev, mem->am_offset + i); 622 VM_OBJECT_LOCK(mem->am_obj); 623 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 624 m = vm_page_lookup(mem->am_obj, atop(i)); 625 vm_page_lock_queues(); 626 vm_page_unwire(m, 0); 627 vm_page_unlock_queues(); 628 } 629 VM_OBJECT_UNLOCK(mem->am_obj); 630 631 agp_flush_cache(); 632 AGP_FLUSH_TLB(dev); 633 634 mem->am_offset = 0; 635 mem->am_is_bound = 0; 636 637 mtx_unlock(&sc->as_lock); 638 639 return 0; 640 } 641 642 /* Helper functions for implementing user/kernel api */ 643 644 static int 645 agp_acquire_helper(device_t dev, enum agp_acquire_state state) 646 { 647 struct agp_softc *sc = device_get_softc(dev); 648 649 if (sc->as_state != AGP_ACQUIRE_FREE) 650 return EBUSY; 651 sc->as_state = state; 652 653 return 0; 654 } 655 656 static int 657 agp_release_helper(device_t dev, enum agp_acquire_state state) 658 { 659 struct agp_softc *sc = device_get_softc(dev); 660 661 if (sc->as_state == AGP_ACQUIRE_FREE) 662 return 0; 663 664 if (sc->as_state != state) 665 return EBUSY; 666 667 sc->as_state = AGP_ACQUIRE_FREE; 668 return 0; 669 } 670 671 static struct agp_memory * 672 agp_find_memory(device_t dev, int id) 673 { 674 struct agp_softc *sc = device_get_softc(dev); 675 struct agp_memory *mem; 676 677 AGP_DPF("searching for memory block %d\n", id); 678 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 679 AGP_DPF("considering memory block %d\n", mem->am_id); 680 if (mem->am_id == id) 681 return mem; 682 } 683 return 0; 684 } 685 686 /* Implementation of the userland ioctl api */ 687 688 static int 689 agp_info_user(device_t dev, agp_info *info) 690 { 691 struct agp_softc *sc = device_get_softc(dev); 692 693 bzero(info, sizeof *info); 694 info->bridge_id = pci_get_devid(dev); 695 info->agp_mode = 696 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 697 info->aper_base = rman_get_start(sc->as_aperture); 698 info->aper_size = AGP_GET_APERTURE(dev) >> 20; 699 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; 700 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; 701 702 return 0; 703 } 704 705 static int 706 agp_setup_user(device_t dev, agp_setup *setup) 707 { 708 return AGP_ENABLE(dev, setup->agp_mode); 709 } 710 711 static int 712 agp_allocate_user(device_t dev, agp_allocate *alloc) 713 { 714 struct agp_memory *mem; 715 716 mem = AGP_ALLOC_MEMORY(dev, 717 alloc->type, 718 alloc->pg_count << AGP_PAGE_SHIFT); 719 if (mem) { 720 alloc->key = mem->am_id; 721 alloc->physical = mem->am_physical; 722 return 0; 723 } else { 724 return ENOMEM; 725 } 726 } 727 728 static int 729 agp_deallocate_user(device_t dev, int id) 730 { 731 struct agp_memory *mem = agp_find_memory(dev, id);; 732 733 if (mem) { 734 AGP_FREE_MEMORY(dev, mem); 735 return 0; 736 } else { 737 return ENOENT; 738 } 739 } 740 741 static int 742 agp_bind_user(device_t dev, agp_bind *bind) 743 { 744 struct agp_memory *mem = agp_find_memory(dev, bind->key); 745 746 if (!mem) 747 return ENOENT; 748 749 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT); 750 } 751 752 static int 753 agp_unbind_user(device_t dev, agp_unbind *unbind) 754 { 755 struct agp_memory *mem = agp_find_memory(dev, unbind->key); 756 757 if (!mem) 758 return ENOENT; 759 760 return AGP_UNBIND_MEMORY(dev, mem); 761 } 762 763 static int 764 agp_open(dev_t kdev, int oflags, int devtype, struct thread *td) 765 { 766 device_t dev = KDEV2DEV(kdev); 767 struct agp_softc *sc = device_get_softc(dev); 768 769 if (!sc->as_isopen) { 770 sc->as_isopen = 1; 771 device_busy(dev); 772 } 773 774 return 0; 775 } 776 777 static int 778 agp_close(dev_t kdev, int fflag, int devtype, struct thread *td) 779 { 780 device_t dev = KDEV2DEV(kdev); 781 struct agp_softc *sc = device_get_softc(dev); 782 struct agp_memory *mem; 783 784 /* 785 * Clear the GATT and force release on last close 786 */ 787 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) { 788 if (mem->am_is_bound) 789 AGP_UNBIND_MEMORY(dev, mem); 790 AGP_FREE_MEMORY(dev, mem); 791 } 792 if (sc->as_state == AGP_ACQUIRE_USER) 793 agp_release_helper(dev, AGP_ACQUIRE_USER); 794 sc->as_isopen = 0; 795 device_unbusy(dev); 796 797 return 0; 798 } 799 800 static int 801 agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td) 802 { 803 device_t dev = KDEV2DEV(kdev); 804 805 switch (cmd) { 806 case AGPIOC_INFO: 807 return agp_info_user(dev, (agp_info *) data); 808 809 case AGPIOC_ACQUIRE: 810 return agp_acquire_helper(dev, AGP_ACQUIRE_USER); 811 812 case AGPIOC_RELEASE: 813 return agp_release_helper(dev, AGP_ACQUIRE_USER); 814 815 case AGPIOC_SETUP: 816 return agp_setup_user(dev, (agp_setup *)data); 817 818 case AGPIOC_ALLOCATE: 819 return agp_allocate_user(dev, (agp_allocate *)data); 820 821 case AGPIOC_DEALLOCATE: 822 return agp_deallocate_user(dev, *(int *) data); 823 824 case AGPIOC_BIND: 825 return agp_bind_user(dev, (agp_bind *)data); 826 827 case AGPIOC_UNBIND: 828 return agp_unbind_user(dev, (agp_unbind *)data); 829 830 } 831 832 return EINVAL; 833 } 834 835 static int 836 agp_mmap(dev_t kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot) 837 { 838 device_t dev = KDEV2DEV(kdev); 839 struct agp_softc *sc = device_get_softc(dev); 840 841 if (offset > AGP_GET_APERTURE(dev)) 842 return -1; 843 *paddr = rman_get_start(sc->as_aperture) + offset; 844 return 0; 845 } 846 847 /* Implementation of the kernel api */ 848 849 device_t 850 agp_find_device() 851 { 852 if (!agp_devclass) 853 return 0; 854 return devclass_get_device(agp_devclass, 0); 855 } 856 857 enum agp_acquire_state 858 agp_state(device_t dev) 859 { 860 struct agp_softc *sc = device_get_softc(dev); 861 return sc->as_state; 862 } 863 864 void 865 agp_get_info(device_t dev, struct agp_info *info) 866 { 867 struct agp_softc *sc = device_get_softc(dev); 868 869 info->ai_mode = 870 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 871 info->ai_aperture_base = rman_get_start(sc->as_aperture); 872 info->ai_aperture_size = rman_get_size(sc->as_aperture); 873 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture); 874 info->ai_memory_allowed = sc->as_maxmem; 875 info->ai_memory_used = sc->as_allocated; 876 } 877 878 int 879 agp_acquire(device_t dev) 880 { 881 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); 882 } 883 884 int 885 agp_release(device_t dev) 886 { 887 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); 888 } 889 890 int 891 agp_enable(device_t dev, u_int32_t mode) 892 { 893 return AGP_ENABLE(dev, mode); 894 } 895 896 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes) 897 { 898 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes); 899 } 900 901 void agp_free_memory(device_t dev, void *handle) 902 { 903 struct agp_memory *mem = (struct agp_memory *) handle; 904 AGP_FREE_MEMORY(dev, mem); 905 } 906 907 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset) 908 { 909 struct agp_memory *mem = (struct agp_memory *) handle; 910 return AGP_BIND_MEMORY(dev, mem, offset); 911 } 912 913 int agp_unbind_memory(device_t dev, void *handle) 914 { 915 struct agp_memory *mem = (struct agp_memory *) handle; 916 return AGP_UNBIND_MEMORY(dev, mem); 917 } 918 919 void agp_memory_info(device_t dev, void *handle, struct 920 agp_memory_info *mi) 921 { 922 struct agp_memory *mem = (struct agp_memory *) handle; 923 924 mi->ami_size = mem->am_size; 925 mi->ami_physical = mem->am_physical; 926 mi->ami_offset = mem->am_offset; 927 mi->ami_is_bound = mem->am_is_bound; 928 } 929