1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000 Doug Rabson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_agp.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #include <sys/bus.h> 40 #include <sys/conf.h> 41 #include <sys/ioccom.h> 42 #include <sys/agpio.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/rwlock.h> 47 48 #include <dev/agp/agppriv.h> 49 #include <dev/agp/agpvar.h> 50 #include <dev/agp/agpreg.h> 51 #include <dev/pci/pcivar.h> 52 #include <dev/pci/pcireg.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/vm_param.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pageout.h> 61 #include <vm/pmap.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 #include <sys/rman.h> 66 67 MODULE_VERSION(agp, 1); 68 69 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures"); 70 71 /* agp_drv.c */ 72 static d_open_t agp_open; 73 static d_close_t agp_close; 74 static d_ioctl_t agp_ioctl; 75 static d_mmap_t agp_mmap; 76 77 static struct cdevsw agp_cdevsw = { 78 .d_version = D_VERSION, 79 .d_flags = D_NEEDGIANT, 80 .d_open = agp_open, 81 .d_close = agp_close, 82 .d_ioctl = agp_ioctl, 83 .d_mmap = agp_mmap, 84 .d_name = "agp", 85 }; 86 87 static devclass_t agp_devclass; 88 89 /* Helper functions for implementing chipset mini drivers. */ 90 91 u_int8_t 92 agp_find_caps(device_t dev) 93 { 94 int capreg; 95 96 if (pci_find_cap(dev, PCIY_AGP, &capreg) != 0) 97 capreg = 0; 98 return (capreg); 99 } 100 101 /* 102 * Find an AGP display device (if any). 103 */ 104 static device_t 105 agp_find_display(void) 106 { 107 devclass_t pci = devclass_find("pci"); 108 device_t bus, dev = 0; 109 device_t *kids; 110 int busnum, numkids, i; 111 112 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 113 bus = devclass_get_device(pci, busnum); 114 if (!bus) 115 continue; 116 if (device_get_children(bus, &kids, &numkids) != 0) 117 continue; 118 for (i = 0; i < numkids; i++) { 119 dev = kids[i]; 120 if (pci_get_class(dev) == PCIC_DISPLAY 121 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA) 122 if (agp_find_caps(dev)) { 123 free(kids, M_TEMP); 124 return dev; 125 } 126 127 } 128 free(kids, M_TEMP); 129 } 130 131 return 0; 132 } 133 134 struct agp_gatt * 135 agp_alloc_gatt(device_t dev) 136 { 137 u_int32_t apsize = AGP_GET_APERTURE(dev); 138 u_int32_t entries = apsize >> AGP_PAGE_SHIFT; 139 struct agp_gatt *gatt; 140 141 if (bootverbose) 142 device_printf(dev, 143 "allocating GATT for aperture of size %dM\n", 144 apsize / (1024*1024)); 145 146 if (entries == 0) { 147 device_printf(dev, "bad aperture size\n"); 148 return NULL; 149 } 150 151 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT); 152 if (!gatt) 153 return 0; 154 155 gatt->ag_entries = entries; 156 gatt->ag_virtual = kmem_alloc_contig(entries * sizeof(uint32_t), 157 M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); 158 if (!gatt->ag_virtual) { 159 if (bootverbose) 160 device_printf(dev, "contiguous allocation failed\n"); 161 free(gatt, M_AGP); 162 return 0; 163 } 164 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual); 165 166 return gatt; 167 } 168 169 void 170 agp_free_gatt(struct agp_gatt *gatt) 171 { 172 kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t)); 173 free(gatt, M_AGP); 174 } 175 176 static u_int agp_max[][2] = { 177 {0, 0}, 178 {32, 4}, 179 {64, 28}, 180 {128, 96}, 181 {256, 204}, 182 {512, 440}, 183 {1024, 942}, 184 {2048, 1920}, 185 {4096, 3932} 186 }; 187 #define AGP_MAX_SIZE nitems(agp_max) 188 189 /** 190 * Sets the PCI resource which represents the AGP aperture. 191 * 192 * If not called, the default AGP aperture resource of AGP_APBASE will 193 * be used. Must be called before agp_generic_attach(). 194 */ 195 void 196 agp_set_aperture_resource(device_t dev, int rid) 197 { 198 struct agp_softc *sc = device_get_softc(dev); 199 200 sc->as_aperture_rid = rid; 201 } 202 203 int 204 agp_generic_attach(device_t dev) 205 { 206 struct make_dev_args mdargs; 207 struct agp_softc *sc = device_get_softc(dev); 208 int error, i, unit; 209 u_int memsize; 210 211 /* 212 * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE 213 * because the kernel doesn't need to map it. 214 */ 215 216 if (sc->as_aperture_rid != -1) { 217 if (sc->as_aperture_rid == 0) 218 sc->as_aperture_rid = AGP_APBASE; 219 220 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 221 &sc->as_aperture_rid, RF_SHAREABLE); 222 if (!sc->as_aperture) 223 return ENOMEM; 224 } 225 226 /* 227 * Work out an upper bound for agp memory allocation. This 228 * uses a heurisitc table from the Linux driver. 229 */ 230 memsize = ptoa(realmem) >> 20; 231 for (i = 0; i < AGP_MAX_SIZE; i++) { 232 if (memsize <= agp_max[i][0]) 233 break; 234 } 235 if (i == AGP_MAX_SIZE) 236 i = AGP_MAX_SIZE - 1; 237 sc->as_maxmem = agp_max[i][1] << 20U; 238 239 /* 240 * The lock is used to prevent re-entry to 241 * agp_generic_bind_memory() since that function can sleep. 242 */ 243 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF); 244 245 /* 246 * Initialise stuff for the userland device. 247 */ 248 agp_devclass = devclass_find("agp"); 249 TAILQ_INIT(&sc->as_memory); 250 sc->as_nextid = 1; 251 252 sc->as_devalias = NULL; 253 254 make_dev_args_init(&mdargs); 255 mdargs.mda_devsw = &agp_cdevsw; 256 mdargs.mda_uid = UID_ROOT; 257 mdargs.mda_gid = GID_WHEEL; 258 mdargs.mda_mode = 0600; 259 mdargs.mda_si_drv1 = sc; 260 mdargs.mda_si_drv2 = NULL; 261 262 unit = device_get_unit(dev); 263 error = make_dev_s(&mdargs, &sc->as_devnode, "agpgart%d", unit); 264 if (error == 0) { 265 /* 266 * Create an alias for the first device that shows up. 267 */ 268 if (unit == 0) { 269 (void)make_dev_alias_p(MAKEDEV_CHECKNAME, 270 &sc->as_devalias, sc->as_devnode, "agpgart"); 271 } 272 } else { 273 agp_free_res(dev); 274 } 275 276 return error; 277 } 278 279 void 280 agp_free_cdev(device_t dev) 281 { 282 struct agp_softc *sc = device_get_softc(dev); 283 284 destroy_dev(sc->as_devnode); 285 if (sc->as_devalias != NULL) 286 destroy_dev(sc->as_devalias); 287 } 288 289 void 290 agp_free_res(device_t dev) 291 { 292 struct agp_softc *sc = device_get_softc(dev); 293 294 if (sc->as_aperture != NULL) 295 bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid, 296 sc->as_aperture); 297 mtx_destroy(&sc->as_lock); 298 } 299 300 int 301 agp_generic_detach(device_t dev) 302 { 303 304 agp_free_cdev(dev); 305 agp_free_res(dev); 306 return 0; 307 } 308 309 /** 310 * Default AGP aperture size detection which simply returns the size of 311 * the aperture's PCI resource. 312 */ 313 u_int32_t 314 agp_generic_get_aperture(device_t dev) 315 { 316 struct agp_softc *sc = device_get_softc(dev); 317 318 return rman_get_size(sc->as_aperture); 319 } 320 321 /** 322 * Default AGP aperture size setting function, which simply doesn't allow 323 * changes to resource size. 324 */ 325 int 326 agp_generic_set_aperture(device_t dev, u_int32_t aperture) 327 { 328 u_int32_t current_aperture; 329 330 current_aperture = AGP_GET_APERTURE(dev); 331 if (current_aperture != aperture) 332 return EINVAL; 333 else 334 return 0; 335 } 336 337 /* 338 * This does the enable logic for v3, with the same topology 339 * restrictions as in place for v2 -- one bus, one device on the bus. 340 */ 341 static int 342 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode) 343 { 344 u_int32_t tstatus, mstatus; 345 u_int32_t command; 346 int rq, sba, fw, rate, arqsz, cal; 347 348 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 349 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); 350 351 /* Set RQ to the min of mode, tstatus and mstatus */ 352 rq = AGP_MODE_GET_RQ(mode); 353 if (AGP_MODE_GET_RQ(tstatus) < rq) 354 rq = AGP_MODE_GET_RQ(tstatus); 355 if (AGP_MODE_GET_RQ(mstatus) < rq) 356 rq = AGP_MODE_GET_RQ(mstatus); 357 358 /* 359 * ARQSZ - Set the value to the maximum one. 360 * Don't allow the mode register to override values. 361 */ 362 arqsz = AGP_MODE_GET_ARQSZ(mode); 363 if (AGP_MODE_GET_ARQSZ(tstatus) > rq) 364 rq = AGP_MODE_GET_ARQSZ(tstatus); 365 if (AGP_MODE_GET_ARQSZ(mstatus) > rq) 366 rq = AGP_MODE_GET_ARQSZ(mstatus); 367 368 /* Calibration cycle - don't allow override by mode register */ 369 cal = AGP_MODE_GET_CAL(tstatus); 370 if (AGP_MODE_GET_CAL(mstatus) < cal) 371 cal = AGP_MODE_GET_CAL(mstatus); 372 373 /* SBA must be supported for AGP v3. */ 374 sba = 1; 375 376 /* Set FW if all three support it. */ 377 fw = (AGP_MODE_GET_FW(tstatus) 378 & AGP_MODE_GET_FW(mstatus) 379 & AGP_MODE_GET_FW(mode)); 380 381 /* Figure out the max rate */ 382 rate = (AGP_MODE_GET_RATE(tstatus) 383 & AGP_MODE_GET_RATE(mstatus) 384 & AGP_MODE_GET_RATE(mode)); 385 if (rate & AGP_MODE_V3_RATE_8x) 386 rate = AGP_MODE_V3_RATE_8x; 387 else 388 rate = AGP_MODE_V3_RATE_4x; 389 if (bootverbose) 390 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4); 391 392 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4); 393 394 /* Construct the new mode word and tell the hardware */ 395 command = 0; 396 command = AGP_MODE_SET_RQ(0, rq); 397 command = AGP_MODE_SET_ARQSZ(command, arqsz); 398 command = AGP_MODE_SET_CAL(command, cal); 399 command = AGP_MODE_SET_SBA(command, sba); 400 command = AGP_MODE_SET_FW(command, fw); 401 command = AGP_MODE_SET_RATE(command, rate); 402 command = AGP_MODE_SET_MODE_3(command, 1); 403 command = AGP_MODE_SET_AGP(command, 1); 404 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4); 405 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4); 406 407 return 0; 408 } 409 410 static int 411 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode) 412 { 413 u_int32_t tstatus, mstatus; 414 u_int32_t command; 415 int rq, sba, fw, rate; 416 417 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 418 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); 419 420 /* Set RQ to the min of mode, tstatus and mstatus */ 421 rq = AGP_MODE_GET_RQ(mode); 422 if (AGP_MODE_GET_RQ(tstatus) < rq) 423 rq = AGP_MODE_GET_RQ(tstatus); 424 if (AGP_MODE_GET_RQ(mstatus) < rq) 425 rq = AGP_MODE_GET_RQ(mstatus); 426 427 /* Set SBA if all three can deal with SBA */ 428 sba = (AGP_MODE_GET_SBA(tstatus) 429 & AGP_MODE_GET_SBA(mstatus) 430 & AGP_MODE_GET_SBA(mode)); 431 432 /* Similar for FW */ 433 fw = (AGP_MODE_GET_FW(tstatus) 434 & AGP_MODE_GET_FW(mstatus) 435 & AGP_MODE_GET_FW(mode)); 436 437 /* Figure out the max rate */ 438 rate = (AGP_MODE_GET_RATE(tstatus) 439 & AGP_MODE_GET_RATE(mstatus) 440 & AGP_MODE_GET_RATE(mode)); 441 if (rate & AGP_MODE_V2_RATE_4x) 442 rate = AGP_MODE_V2_RATE_4x; 443 else if (rate & AGP_MODE_V2_RATE_2x) 444 rate = AGP_MODE_V2_RATE_2x; 445 else 446 rate = AGP_MODE_V2_RATE_1x; 447 if (bootverbose) 448 device_printf(dev, "Setting AGP v2 mode %d\n", rate); 449 450 /* Construct the new mode word and tell the hardware */ 451 command = 0; 452 command = AGP_MODE_SET_RQ(0, rq); 453 command = AGP_MODE_SET_SBA(command, sba); 454 command = AGP_MODE_SET_FW(command, fw); 455 command = AGP_MODE_SET_RATE(command, rate); 456 command = AGP_MODE_SET_AGP(command, 1); 457 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4); 458 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4); 459 460 return 0; 461 } 462 463 int 464 agp_generic_enable(device_t dev, u_int32_t mode) 465 { 466 device_t mdev = agp_find_display(); 467 u_int32_t tstatus, mstatus; 468 469 if (!mdev) { 470 AGP_DPF("can't find display\n"); 471 return ENXIO; 472 } 473 474 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 475 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); 476 477 /* 478 * Check display and bridge for AGP v3 support. AGP v3 allows 479 * more variety in topology than v2, e.g. multiple AGP devices 480 * attached to one bridge, or multiple AGP bridges in one 481 * system. This doesn't attempt to address those situations, 482 * but should work fine for a classic single AGP slot system 483 * with AGP v3. 484 */ 485 if (AGP_MODE_GET_MODE_3(mode) && 486 AGP_MODE_GET_MODE_3(tstatus) && 487 AGP_MODE_GET_MODE_3(mstatus)) 488 return (agp_v3_enable(dev, mdev, mode)); 489 else 490 return (agp_v2_enable(dev, mdev, mode)); 491 } 492 493 struct agp_memory * 494 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size) 495 { 496 struct agp_softc *sc = device_get_softc(dev); 497 struct agp_memory *mem; 498 499 if ((size & (AGP_PAGE_SIZE - 1)) != 0) 500 return 0; 501 502 if (size > sc->as_maxmem - sc->as_allocated) 503 return 0; 504 505 if (type != 0) { 506 printf("agp_generic_alloc_memory: unsupported type %d\n", 507 type); 508 return 0; 509 } 510 511 mem = malloc(sizeof *mem, M_AGP, M_WAITOK); 512 mem->am_id = sc->as_nextid++; 513 mem->am_size = size; 514 mem->am_type = 0; 515 mem->am_obj = vm_object_allocate(OBJT_SWAP, atop(round_page(size))); 516 mem->am_physical = 0; 517 mem->am_offset = 0; 518 mem->am_is_bound = 0; 519 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); 520 sc->as_allocated += size; 521 522 return mem; 523 } 524 525 int 526 agp_generic_free_memory(device_t dev, struct agp_memory *mem) 527 { 528 struct agp_softc *sc = device_get_softc(dev); 529 530 if (mem->am_is_bound) 531 return EBUSY; 532 533 sc->as_allocated -= mem->am_size; 534 TAILQ_REMOVE(&sc->as_memory, mem, am_link); 535 vm_object_deallocate(mem->am_obj); 536 free(mem, M_AGP); 537 return 0; 538 } 539 540 int 541 agp_generic_bind_memory(device_t dev, struct agp_memory *mem, 542 vm_offset_t offset) 543 { 544 struct agp_softc *sc = device_get_softc(dev); 545 vm_offset_t i, j, k; 546 vm_page_t m; 547 int error; 548 549 /* Do some sanity checks first. */ 550 if ((offset & (AGP_PAGE_SIZE - 1)) != 0 || 551 offset + mem->am_size > AGP_GET_APERTURE(dev)) { 552 device_printf(dev, "binding memory at bad offset %#x\n", 553 (int)offset); 554 return EINVAL; 555 } 556 557 /* 558 * Allocate the pages early, before acquiring the lock, 559 * because vm_page_grab() may sleep and we can't hold a mutex 560 * while sleeping. 561 */ 562 VM_OBJECT_WLOCK(mem->am_obj); 563 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 564 /* 565 * Find a page from the object and wire it 566 * down. This page will be mapped using one or more 567 * entries in the GATT (assuming that PAGE_SIZE >= 568 * AGP_PAGE_SIZE. If this is the first call to bind, 569 * the pages will be allocated and zeroed. 570 */ 571 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i), 572 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 573 AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m)); 574 } 575 VM_OBJECT_WUNLOCK(mem->am_obj); 576 577 mtx_lock(&sc->as_lock); 578 579 if (mem->am_is_bound) { 580 device_printf(dev, "memory already bound\n"); 581 error = EINVAL; 582 VM_OBJECT_WLOCK(mem->am_obj); 583 i = 0; 584 goto bad; 585 } 586 587 /* 588 * Bind the individual pages and flush the chipset's 589 * TLB. 590 */ 591 VM_OBJECT_WLOCK(mem->am_obj); 592 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 593 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); 594 595 /* 596 * Install entries in the GATT, making sure that if 597 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not 598 * aligned to PAGE_SIZE, we don't modify too many GATT 599 * entries. 600 */ 601 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size; 602 j += AGP_PAGE_SIZE) { 603 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j; 604 AGP_DPF("binding offset %#jx to pa %#jx\n", 605 (uintmax_t)offset + i + j, (uintmax_t)pa); 606 error = AGP_BIND_PAGE(dev, offset + i + j, pa); 607 if (error) { 608 /* 609 * Bail out. Reverse all the mappings 610 * and unwire the pages. 611 */ 612 for (k = 0; k < i + j; k += AGP_PAGE_SIZE) 613 AGP_UNBIND_PAGE(dev, offset + k); 614 goto bad; 615 } 616 } 617 vm_page_xunbusy(m); 618 } 619 VM_OBJECT_WUNLOCK(mem->am_obj); 620 621 /* 622 * Make sure the chipset gets the new mappings. 623 */ 624 AGP_FLUSH_TLB(dev); 625 626 mem->am_offset = offset; 627 mem->am_is_bound = 1; 628 629 mtx_unlock(&sc->as_lock); 630 631 return 0; 632 bad: 633 mtx_unlock(&sc->as_lock); 634 VM_OBJECT_ASSERT_WLOCKED(mem->am_obj); 635 for (k = 0; k < mem->am_size; k += PAGE_SIZE) { 636 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k)); 637 if (k >= i) 638 vm_page_xunbusy(m); 639 vm_page_unwire(m, PQ_INACTIVE); 640 } 641 VM_OBJECT_WUNLOCK(mem->am_obj); 642 643 return error; 644 } 645 646 int 647 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem) 648 { 649 struct agp_softc *sc = device_get_softc(dev); 650 vm_page_t m; 651 int i; 652 653 mtx_lock(&sc->as_lock); 654 655 if (!mem->am_is_bound) { 656 device_printf(dev, "memory is not bound\n"); 657 mtx_unlock(&sc->as_lock); 658 return EINVAL; 659 } 660 661 /* 662 * Unbind the individual pages and flush the chipset's 663 * TLB. Unwire the pages so they can be swapped. 664 */ 665 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) 666 AGP_UNBIND_PAGE(dev, mem->am_offset + i); 667 668 AGP_FLUSH_TLB(dev); 669 670 VM_OBJECT_WLOCK(mem->am_obj); 671 for (i = 0; i < mem->am_size; i += PAGE_SIZE) { 672 m = vm_page_lookup(mem->am_obj, atop(i)); 673 vm_page_unwire(m, PQ_INACTIVE); 674 } 675 VM_OBJECT_WUNLOCK(mem->am_obj); 676 677 mem->am_offset = 0; 678 mem->am_is_bound = 0; 679 680 mtx_unlock(&sc->as_lock); 681 682 return 0; 683 } 684 685 /* Helper functions for implementing user/kernel api */ 686 687 static int 688 agp_acquire_helper(device_t dev, enum agp_acquire_state state) 689 { 690 struct agp_softc *sc = device_get_softc(dev); 691 692 if (sc->as_state != AGP_ACQUIRE_FREE) 693 return EBUSY; 694 sc->as_state = state; 695 696 return 0; 697 } 698 699 static int 700 agp_release_helper(device_t dev, enum agp_acquire_state state) 701 { 702 struct agp_softc *sc = device_get_softc(dev); 703 704 if (sc->as_state == AGP_ACQUIRE_FREE) 705 return 0; 706 707 if (sc->as_state != state) 708 return EBUSY; 709 710 sc->as_state = AGP_ACQUIRE_FREE; 711 return 0; 712 } 713 714 static struct agp_memory * 715 agp_find_memory(device_t dev, int id) 716 { 717 struct agp_softc *sc = device_get_softc(dev); 718 struct agp_memory *mem; 719 720 AGP_DPF("searching for memory block %d\n", id); 721 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 722 AGP_DPF("considering memory block %d\n", mem->am_id); 723 if (mem->am_id == id) 724 return mem; 725 } 726 return 0; 727 } 728 729 /* Implementation of the userland ioctl api */ 730 731 static int 732 agp_info_user(device_t dev, agp_info *info) 733 { 734 struct agp_softc *sc = device_get_softc(dev); 735 736 bzero(info, sizeof *info); 737 info->bridge_id = pci_get_devid(dev); 738 info->agp_mode = 739 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 740 if (sc->as_aperture) 741 info->aper_base = rman_get_start(sc->as_aperture); 742 else 743 info->aper_base = 0; 744 info->aper_size = AGP_GET_APERTURE(dev) >> 20; 745 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; 746 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; 747 748 return 0; 749 } 750 751 static int 752 agp_setup_user(device_t dev, agp_setup *setup) 753 { 754 return AGP_ENABLE(dev, setup->agp_mode); 755 } 756 757 static int 758 agp_allocate_user(device_t dev, agp_allocate *alloc) 759 { 760 struct agp_memory *mem; 761 762 mem = AGP_ALLOC_MEMORY(dev, 763 alloc->type, 764 alloc->pg_count << AGP_PAGE_SHIFT); 765 if (mem) { 766 alloc->key = mem->am_id; 767 alloc->physical = mem->am_physical; 768 return 0; 769 } else { 770 return ENOMEM; 771 } 772 } 773 774 static int 775 agp_deallocate_user(device_t dev, int id) 776 { 777 struct agp_memory *mem = agp_find_memory(dev, id); 778 779 if (mem) { 780 AGP_FREE_MEMORY(dev, mem); 781 return 0; 782 } else { 783 return ENOENT; 784 } 785 } 786 787 static int 788 agp_bind_user(device_t dev, agp_bind *bind) 789 { 790 struct agp_memory *mem = agp_find_memory(dev, bind->key); 791 792 if (!mem) 793 return ENOENT; 794 795 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT); 796 } 797 798 static int 799 agp_unbind_user(device_t dev, agp_unbind *unbind) 800 { 801 struct agp_memory *mem = agp_find_memory(dev, unbind->key); 802 803 if (!mem) 804 return ENOENT; 805 806 return AGP_UNBIND_MEMORY(dev, mem); 807 } 808 809 static int 810 agp_chipset_flush(device_t dev) 811 { 812 813 return (AGP_CHIPSET_FLUSH(dev)); 814 } 815 816 static int 817 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td) 818 { 819 device_t dev = kdev->si_drv1; 820 struct agp_softc *sc = device_get_softc(dev); 821 822 if (!sc->as_isopen) { 823 sc->as_isopen = 1; 824 device_busy(dev); 825 } 826 827 return 0; 828 } 829 830 static int 831 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td) 832 { 833 device_t dev = kdev->si_drv1; 834 struct agp_softc *sc = device_get_softc(dev); 835 struct agp_memory *mem; 836 837 /* 838 * Clear the GATT and force release on last close 839 */ 840 while ((mem = TAILQ_FIRST(&sc->as_memory)) != NULL) { 841 if (mem->am_is_bound) 842 AGP_UNBIND_MEMORY(dev, mem); 843 AGP_FREE_MEMORY(dev, mem); 844 } 845 if (sc->as_state == AGP_ACQUIRE_USER) 846 agp_release_helper(dev, AGP_ACQUIRE_USER); 847 sc->as_isopen = 0; 848 device_unbusy(dev); 849 850 return 0; 851 } 852 853 static int 854 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td) 855 { 856 device_t dev = kdev->si_drv1; 857 858 switch (cmd) { 859 case AGPIOC_INFO: 860 return agp_info_user(dev, (agp_info *) data); 861 862 case AGPIOC_ACQUIRE: 863 return agp_acquire_helper(dev, AGP_ACQUIRE_USER); 864 865 case AGPIOC_RELEASE: 866 return agp_release_helper(dev, AGP_ACQUIRE_USER); 867 868 case AGPIOC_SETUP: 869 return agp_setup_user(dev, (agp_setup *)data); 870 871 case AGPIOC_ALLOCATE: 872 return agp_allocate_user(dev, (agp_allocate *)data); 873 874 case AGPIOC_DEALLOCATE: 875 return agp_deallocate_user(dev, *(int *) data); 876 877 case AGPIOC_BIND: 878 return agp_bind_user(dev, (agp_bind *)data); 879 880 case AGPIOC_UNBIND: 881 return agp_unbind_user(dev, (agp_unbind *)data); 882 883 case AGPIOC_CHIPSET_FLUSH: 884 return agp_chipset_flush(dev); 885 } 886 887 return EINVAL; 888 } 889 890 static int 891 agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr, 892 int prot, vm_memattr_t *memattr) 893 { 894 device_t dev = kdev->si_drv1; 895 struct agp_softc *sc = device_get_softc(dev); 896 897 if (offset > AGP_GET_APERTURE(dev)) 898 return -1; 899 if (sc->as_aperture == NULL) 900 return -1; 901 *paddr = rman_get_start(sc->as_aperture) + offset; 902 return 0; 903 } 904 905 /* Implementation of the kernel api */ 906 907 device_t 908 agp_find_device(void) 909 { 910 device_t *children, child; 911 int i, count; 912 913 if (!agp_devclass) 914 return NULL; 915 if (devclass_get_devices(agp_devclass, &children, &count) != 0) 916 return NULL; 917 child = NULL; 918 for (i = 0; i < count; i++) { 919 if (device_is_attached(children[i])) { 920 child = children[i]; 921 break; 922 } 923 } 924 free(children, M_TEMP); 925 return child; 926 } 927 928 enum agp_acquire_state 929 agp_state(device_t dev) 930 { 931 struct agp_softc *sc = device_get_softc(dev); 932 return sc->as_state; 933 } 934 935 void 936 agp_get_info(device_t dev, struct agp_info *info) 937 { 938 struct agp_softc *sc = device_get_softc(dev); 939 940 info->ai_mode = 941 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); 942 if (sc->as_aperture != NULL) 943 info->ai_aperture_base = rman_get_start(sc->as_aperture); 944 else 945 info->ai_aperture_base = 0; 946 info->ai_aperture_size = AGP_GET_APERTURE(dev); 947 info->ai_memory_allowed = sc->as_maxmem; 948 info->ai_memory_used = sc->as_allocated; 949 } 950 951 int 952 agp_acquire(device_t dev) 953 { 954 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); 955 } 956 957 int 958 agp_release(device_t dev) 959 { 960 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); 961 } 962 963 int 964 agp_enable(device_t dev, u_int32_t mode) 965 { 966 return AGP_ENABLE(dev, mode); 967 } 968 969 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes) 970 { 971 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes); 972 } 973 974 void agp_free_memory(device_t dev, void *handle) 975 { 976 struct agp_memory *mem = (struct agp_memory *) handle; 977 AGP_FREE_MEMORY(dev, mem); 978 } 979 980 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset) 981 { 982 struct agp_memory *mem = (struct agp_memory *) handle; 983 return AGP_BIND_MEMORY(dev, mem, offset); 984 } 985 986 int agp_unbind_memory(device_t dev, void *handle) 987 { 988 struct agp_memory *mem = (struct agp_memory *) handle; 989 return AGP_UNBIND_MEMORY(dev, mem); 990 } 991 992 void agp_memory_info(device_t dev, void *handle, struct 993 agp_memory_info *mi) 994 { 995 struct agp_memory *mem = (struct agp_memory *) handle; 996 997 mi->ami_size = mem->am_size; 998 mi->ami_physical = mem->am_physical; 999 mi->ami_offset = mem->am_offset; 1000 mi->ami_is_bound = mem->am_is_bound; 1001 } 1002 1003 int 1004 agp_bind_pages(device_t dev, vm_page_t *pages, vm_size_t size, 1005 vm_offset_t offset) 1006 { 1007 struct agp_softc *sc; 1008 vm_offset_t i, j, k, pa; 1009 vm_page_t m; 1010 int error; 1011 1012 if ((size & (AGP_PAGE_SIZE - 1)) != 0 || 1013 (offset & (AGP_PAGE_SIZE - 1)) != 0) 1014 return (EINVAL); 1015 1016 sc = device_get_softc(dev); 1017 1018 mtx_lock(&sc->as_lock); 1019 for (i = 0; i < size; i += PAGE_SIZE) { 1020 m = pages[OFF_TO_IDX(i)]; 1021 KASSERT(vm_page_wired(m), 1022 ("agp_bind_pages: page %p hasn't been wired", m)); 1023 1024 /* 1025 * Install entries in the GATT, making sure that if 1026 * AGP_PAGE_SIZE < PAGE_SIZE and size is not 1027 * aligned to PAGE_SIZE, we don't modify too many GATT 1028 * entries. 1029 */ 1030 for (j = 0; j < PAGE_SIZE && i + j < size; j += AGP_PAGE_SIZE) { 1031 pa = VM_PAGE_TO_PHYS(m) + j; 1032 AGP_DPF("binding offset %#jx to pa %#jx\n", 1033 (uintmax_t)offset + i + j, (uintmax_t)pa); 1034 error = AGP_BIND_PAGE(dev, offset + i + j, pa); 1035 if (error) { 1036 /* 1037 * Bail out. Reverse all the mappings. 1038 */ 1039 for (k = 0; k < i + j; k += AGP_PAGE_SIZE) 1040 AGP_UNBIND_PAGE(dev, offset + k); 1041 1042 mtx_unlock(&sc->as_lock); 1043 return (error); 1044 } 1045 } 1046 } 1047 1048 AGP_FLUSH_TLB(dev); 1049 1050 mtx_unlock(&sc->as_lock); 1051 return (0); 1052 } 1053 1054 int 1055 agp_unbind_pages(device_t dev, vm_size_t size, vm_offset_t offset) 1056 { 1057 struct agp_softc *sc; 1058 vm_offset_t i; 1059 1060 if ((size & (AGP_PAGE_SIZE - 1)) != 0 || 1061 (offset & (AGP_PAGE_SIZE - 1)) != 0) 1062 return (EINVAL); 1063 1064 sc = device_get_softc(dev); 1065 1066 mtx_lock(&sc->as_lock); 1067 for (i = 0; i < size; i += AGP_PAGE_SIZE) 1068 AGP_UNBIND_PAGE(dev, offset + i); 1069 1070 AGP_FLUSH_TLB(dev); 1071 1072 mtx_unlock(&sc->as_lock); 1073 return (0); 1074 } 1075