1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/init.h> 33 #include <linux/pagemap.h> 34 #include <linux/miscdevice.h> 35 #include <linux/pm.h> 36 #include <linux/agp_backend.h> 37 #include <linux/vmalloc.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/mm.h> 40 #include <asm/io.h> 41 #include <asm/cacheflush.h> 42 #include <asm/pgtable.h> 43 #include "agp.h" 44 45 __u32 *agp_gatt_table; 46 int agp_memory_reserved; 47 48 /* 49 * Needed by the Nforce GART driver for the time being. Would be 50 * nice to do this some other way instead of needing this export. 51 */ 52 EXPORT_SYMBOL_GPL(agp_memory_reserved); 53 54 #if defined(CONFIG_X86) 55 int map_page_into_agp(struct page *page) 56 { 57 int i; 58 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); 59 /* Caller's responsibility to call global_flush_tlb() for 60 * performance reasons */ 61 return i; 62 } 63 EXPORT_SYMBOL_GPL(map_page_into_agp); 64 65 int unmap_page_from_agp(struct page *page) 66 { 67 int i; 68 i = change_page_attr(page, 1, PAGE_KERNEL); 69 /* Caller's responsibility to call global_flush_tlb() for 70 * performance reasons */ 71 return i; 72 } 73 EXPORT_SYMBOL_GPL(unmap_page_from_agp); 74 #endif 75 76 /* 77 * Generic routines for handling agp_memory structures - 78 * They use the basic page allocation routines to do the brunt of the work. 79 */ 80 81 void agp_free_key(int key) 82 { 83 if (key < 0) 84 return; 85 86 if (key < MAXKEY) 87 clear_bit(key, agp_bridge->key_list); 88 } 89 EXPORT_SYMBOL(agp_free_key); 90 91 92 static int agp_get_key(void) 93 { 94 int bit; 95 96 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 97 if (bit < MAXKEY) { 98 set_bit(bit, agp_bridge->key_list); 99 return bit; 100 } 101 return -1; 102 } 103 104 105 struct agp_memory *agp_create_memory(int scratch_pages) 106 { 107 struct agp_memory *new; 108 109 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 110 if (new == NULL) 111 return NULL; 112 113 new->key = agp_get_key(); 114 115 if (new->key < 0) { 116 kfree(new); 117 return NULL; 118 } 119 new->memory = vmalloc(PAGE_SIZE * scratch_pages); 120 121 if (new->memory == NULL) { 122 agp_free_key(new->key); 123 kfree(new); 124 return NULL; 125 } 126 new->num_scratch_pages = scratch_pages; 127 return new; 128 } 129 EXPORT_SYMBOL(agp_create_memory); 130 131 /** 132 * agp_free_memory - free memory associated with an agp_memory pointer. 133 * 134 * @curr: agp_memory pointer to be freed. 135 * 136 * It is the only function that can be called when the backend is not owned 137 * by the caller. (So it can free memory on client death.) 138 */ 139 void agp_free_memory(struct agp_memory *curr) 140 { 141 size_t i; 142 143 if (curr == NULL) 144 return; 145 146 if (curr->is_bound == TRUE) 147 agp_unbind_memory(curr); 148 149 if (curr->type != 0) { 150 curr->bridge->driver->free_by_type(curr); 151 return; 152 } 153 if (curr->page_count != 0) { 154 for (i = 0; i < curr->page_count; i++) { 155 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i])); 156 } 157 flush_agp_mappings(); 158 } 159 agp_free_key(curr->key); 160 vfree(curr->memory); 161 kfree(curr); 162 } 163 EXPORT_SYMBOL(agp_free_memory); 164 165 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 166 167 /** 168 * agp_allocate_memory - allocate a group of pages of a certain type. 169 * 170 * @page_count: size_t argument of the number of pages 171 * @type: u32 argument of the type of memory to be allocated. 172 * 173 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 174 * maps to physical ram. Any other type is device dependent. 175 * 176 * It returns NULL whenever memory is unavailable. 177 */ 178 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 179 size_t page_count, u32 type) 180 { 181 int scratch_pages; 182 struct agp_memory *new; 183 size_t i; 184 185 if (!bridge) 186 return NULL; 187 188 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 189 return NULL; 190 191 if (type != 0) { 192 new = bridge->driver->alloc_by_type(page_count, type); 193 if (new) 194 new->bridge = bridge; 195 return new; 196 } 197 198 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 199 200 new = agp_create_memory(scratch_pages); 201 202 if (new == NULL) 203 return NULL; 204 205 for (i = 0; i < page_count; i++) { 206 void *addr = bridge->driver->agp_alloc_page(bridge); 207 208 if (addr == NULL) { 209 agp_free_memory(new); 210 return NULL; 211 } 212 new->memory[i] = virt_to_gart(addr); 213 new->page_count++; 214 } 215 new->bridge = bridge; 216 217 flush_agp_mappings(); 218 219 return new; 220 } 221 EXPORT_SYMBOL(agp_allocate_memory); 222 223 224 /* End - Generic routines for handling agp_memory structures */ 225 226 227 static int agp_return_size(void) 228 { 229 int current_size; 230 void *temp; 231 232 temp = agp_bridge->current_size; 233 234 switch (agp_bridge->driver->size_type) { 235 case U8_APER_SIZE: 236 current_size = A_SIZE_8(temp)->size; 237 break; 238 case U16_APER_SIZE: 239 current_size = A_SIZE_16(temp)->size; 240 break; 241 case U32_APER_SIZE: 242 current_size = A_SIZE_32(temp)->size; 243 break; 244 case LVL2_APER_SIZE: 245 current_size = A_SIZE_LVL2(temp)->size; 246 break; 247 case FIXED_APER_SIZE: 248 current_size = A_SIZE_FIX(temp)->size; 249 break; 250 default: 251 current_size = 0; 252 break; 253 } 254 255 current_size -= (agp_memory_reserved / (1024*1024)); 256 if (current_size <0) 257 current_size = 0; 258 return current_size; 259 } 260 261 262 int agp_num_entries(void) 263 { 264 int num_entries; 265 void *temp; 266 267 temp = agp_bridge->current_size; 268 269 switch (agp_bridge->driver->size_type) { 270 case U8_APER_SIZE: 271 num_entries = A_SIZE_8(temp)->num_entries; 272 break; 273 case U16_APER_SIZE: 274 num_entries = A_SIZE_16(temp)->num_entries; 275 break; 276 case U32_APER_SIZE: 277 num_entries = A_SIZE_32(temp)->num_entries; 278 break; 279 case LVL2_APER_SIZE: 280 num_entries = A_SIZE_LVL2(temp)->num_entries; 281 break; 282 case FIXED_APER_SIZE: 283 num_entries = A_SIZE_FIX(temp)->num_entries; 284 break; 285 default: 286 num_entries = 0; 287 break; 288 } 289 290 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 291 if (num_entries<0) 292 num_entries = 0; 293 return num_entries; 294 } 295 EXPORT_SYMBOL_GPL(agp_num_entries); 296 297 298 /** 299 * agp_copy_info - copy bridge state information 300 * 301 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 302 * 303 * This function copies information about the agp bridge device and the state of 304 * the agp backend into an agp_kern_info pointer. 305 */ 306 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 307 { 308 memset(info, 0, sizeof(struct agp_kern_info)); 309 if (!bridge) { 310 info->chipset = NOT_SUPPORTED; 311 return -EIO; 312 } 313 314 info->version.major = bridge->version->major; 315 info->version.minor = bridge->version->minor; 316 info->chipset = SUPPORTED; 317 info->device = bridge->dev; 318 if (bridge->mode & AGPSTAT_MODE_3_0) 319 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 320 else 321 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 322 info->aper_base = bridge->gart_bus_addr; 323 info->aper_size = agp_return_size(); 324 info->max_memory = bridge->max_memory_agp; 325 info->current_memory = atomic_read(&bridge->current_memory_agp); 326 info->cant_use_aperture = bridge->driver->cant_use_aperture; 327 info->vm_ops = bridge->vm_ops; 328 info->page_mask = ~0UL; 329 return 0; 330 } 331 EXPORT_SYMBOL(agp_copy_info); 332 333 /* End - Routine to copy over information structure */ 334 335 /* 336 * Routines for handling swapping of agp_memory into the GATT - 337 * These routines take agp_memory and insert them into the GATT. 338 * They call device specific routines to actually write to the GATT. 339 */ 340 341 /** 342 * agp_bind_memory - Bind an agp_memory structure into the GATT. 343 * 344 * @curr: agp_memory pointer 345 * @pg_start: an offset into the graphics aperture translation table 346 * 347 * It returns -EINVAL if the pointer == NULL. 348 * It returns -EBUSY if the area of the table requested is already in use. 349 */ 350 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 351 { 352 int ret_val; 353 354 if (curr == NULL) 355 return -EINVAL; 356 357 if (curr->is_bound == TRUE) { 358 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 359 return -EINVAL; 360 } 361 if (curr->is_flushed == FALSE) { 362 curr->bridge->driver->cache_flush(); 363 curr->is_flushed = TRUE; 364 } 365 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 366 367 if (ret_val != 0) 368 return ret_val; 369 370 curr->is_bound = TRUE; 371 curr->pg_start = pg_start; 372 return 0; 373 } 374 EXPORT_SYMBOL(agp_bind_memory); 375 376 377 /** 378 * agp_unbind_memory - Removes an agp_memory structure from the GATT 379 * 380 * @curr: agp_memory pointer to be removed from the GATT. 381 * 382 * It returns -EINVAL if this piece of agp_memory is not currently bound to 383 * the graphics aperture translation table or if the agp_memory pointer == NULL 384 */ 385 int agp_unbind_memory(struct agp_memory *curr) 386 { 387 int ret_val; 388 389 if (curr == NULL) 390 return -EINVAL; 391 392 if (curr->is_bound != TRUE) { 393 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 394 return -EINVAL; 395 } 396 397 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 398 399 if (ret_val != 0) 400 return ret_val; 401 402 curr->is_bound = FALSE; 403 curr->pg_start = 0; 404 return 0; 405 } 406 EXPORT_SYMBOL(agp_unbind_memory); 407 408 /* End - Routines for handling swapping of agp_memory into the GATT */ 409 410 411 /* Generic Agp routines - Start */ 412 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 413 { 414 u32 tmp; 415 416 if (*requested_mode & AGP2_RESERVED_MASK) { 417 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 418 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 419 *requested_mode &= ~AGP2_RESERVED_MASK; 420 } 421 422 /* Check the speed bits make sense. Only one should be set. */ 423 tmp = *requested_mode & 7; 424 switch (tmp) { 425 case 0: 426 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 427 *requested_mode |= AGPSTAT2_1X; 428 break; 429 case 1: 430 case 2: 431 break; 432 case 3: 433 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 434 break; 435 case 4: 436 break; 437 case 5: 438 case 6: 439 case 7: 440 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 441 break; 442 } 443 444 /* disable SBA if it's not supported */ 445 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 446 *bridge_agpstat &= ~AGPSTAT_SBA; 447 448 /* Set rate */ 449 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 450 *bridge_agpstat &= ~AGPSTAT2_4X; 451 452 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 453 *bridge_agpstat &= ~AGPSTAT2_2X; 454 455 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 456 *bridge_agpstat &= ~AGPSTAT2_1X; 457 458 /* Now we know what mode it should be, clear out the unwanted bits. */ 459 if (*bridge_agpstat & AGPSTAT2_4X) 460 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 461 462 if (*bridge_agpstat & AGPSTAT2_2X) 463 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 464 465 if (*bridge_agpstat & AGPSTAT2_1X) 466 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 467 468 /* Apply any errata. */ 469 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 470 *bridge_agpstat &= ~AGPSTAT_FW; 471 472 if (agp_bridge->flags & AGP_ERRATA_SBA) 473 *bridge_agpstat &= ~AGPSTAT_SBA; 474 475 if (agp_bridge->flags & AGP_ERRATA_1X) { 476 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 477 *bridge_agpstat |= AGPSTAT2_1X; 478 } 479 480 /* If we've dropped down to 1X, disable fast writes. */ 481 if (*bridge_agpstat & AGPSTAT2_1X) 482 *bridge_agpstat &= ~AGPSTAT_FW; 483 } 484 485 /* 486 * requested_mode = Mode requested by (typically) X. 487 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 488 * vga_agpstat = PCI_AGP_STATUS from graphic card. 489 */ 490 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 491 { 492 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 493 u32 tmp; 494 495 if (*requested_mode & AGP3_RESERVED_MASK) { 496 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 497 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 498 *requested_mode &= ~AGP3_RESERVED_MASK; 499 } 500 501 /* Check the speed bits make sense. */ 502 tmp = *requested_mode & 7; 503 if (tmp == 0) { 504 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 505 *requested_mode |= AGPSTAT3_4X; 506 } 507 if (tmp >= 3) { 508 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 509 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 510 } 511 512 /* ARQSZ - Set the value to the maximum one. 513 * Don't allow the mode register to override values. */ 514 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 515 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 516 517 /* Calibration cycle. 518 * Don't allow the mode register to override values. */ 519 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 520 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 521 522 /* SBA *must* be supported for AGP v3 */ 523 *bridge_agpstat |= AGPSTAT_SBA; 524 525 /* 526 * Set speed. 527 * Check for invalid speeds. This can happen when applications 528 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 529 */ 530 if (*requested_mode & AGPSTAT_MODE_3_0) { 531 /* 532 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 533 * have been passed a 3.0 mode, but with 2.x speed bits set. 534 * AGP2.x 4x -> AGP3.0 4x. 535 */ 536 if (*requested_mode & AGPSTAT2_4X) { 537 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 538 current->comm, *requested_mode); 539 *requested_mode &= ~AGPSTAT2_4X; 540 *requested_mode |= AGPSTAT3_4X; 541 } 542 } else { 543 /* 544 * The caller doesn't know what they are doing. We are in 3.0 mode, 545 * but have been passed an AGP 2.x mode. 546 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 547 */ 548 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 549 current->comm, *requested_mode); 550 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 551 *requested_mode |= AGPSTAT3_4X; 552 } 553 554 if (*requested_mode & AGPSTAT3_8X) { 555 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 556 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 557 *bridge_agpstat |= AGPSTAT3_4X; 558 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 559 return; 560 } 561 if (!(*vga_agpstat & AGPSTAT3_8X)) { 562 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 563 *bridge_agpstat |= AGPSTAT3_4X; 564 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 565 return; 566 } 567 /* All set, bridge & device can do AGP x8*/ 568 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 569 goto done; 570 571 } else if (*requested_mode & AGPSTAT3_4X) { 572 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 573 *bridge_agpstat |= AGPSTAT3_4X; 574 goto done; 575 576 } else { 577 578 /* 579 * If we didn't specify an AGP mode, we see if both 580 * the graphics card, and the bridge can do x8, and use if so. 581 * If not, we fall back to x4 mode. 582 */ 583 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 584 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode supported by bridge & card (x8).\n"); 585 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 586 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 587 } else { 588 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 589 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 590 printk("bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge); 591 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 592 *bridge_agpstat |= AGPSTAT3_4X; 593 } 594 if (!(*vga_agpstat & AGPSTAT3_8X)) { 595 printk("graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", *vga_agpstat, origvga); 596 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 597 *vga_agpstat |= AGPSTAT3_4X; 598 } 599 } 600 } 601 602 done: 603 /* Apply any errata. */ 604 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 605 *bridge_agpstat &= ~AGPSTAT_FW; 606 607 if (agp_bridge->flags & AGP_ERRATA_SBA) 608 *bridge_agpstat &= ~AGPSTAT_SBA; 609 610 if (agp_bridge->flags & AGP_ERRATA_1X) { 611 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 612 *bridge_agpstat |= AGPSTAT2_1X; 613 } 614 } 615 616 617 /** 618 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 619 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 620 * @requested_mode: requested agp_stat from userspace (Typically from X) 621 * @bridge_agpstat: current agp_stat from AGP bridge. 622 * 623 * This function will hunt for an AGP graphics card, and try to match 624 * the requested mode to the capabilities of both the bridge and the card. 625 */ 626 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 627 { 628 struct pci_dev *device = NULL; 629 u32 vga_agpstat; 630 u8 cap_ptr; 631 632 for (;;) { 633 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 634 if (!device) { 635 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 636 return 0; 637 } 638 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 639 if (cap_ptr) 640 break; 641 } 642 643 /* 644 * Ok, here we have a AGP device. Disable impossible 645 * settings, and adjust the readqueue to the minimum. 646 */ 647 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 648 649 /* adjust RQ depth */ 650 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 651 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 652 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 653 654 /* disable FW if it's not supported */ 655 if (!((bridge_agpstat & AGPSTAT_FW) && 656 (vga_agpstat & AGPSTAT_FW) && 657 (requested_mode & AGPSTAT_FW))) 658 bridge_agpstat &= ~AGPSTAT_FW; 659 660 /* Check to see if we are operating in 3.0 mode */ 661 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 662 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 663 else 664 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 665 666 pci_dev_put(device); 667 return bridge_agpstat; 668 } 669 EXPORT_SYMBOL(agp_collect_device_status); 670 671 672 void agp_device_command(u32 bridge_agpstat, int agp_v3) 673 { 674 struct pci_dev *device = NULL; 675 int mode; 676 677 mode = bridge_agpstat & 0x7; 678 if (agp_v3) 679 mode *= 4; 680 681 for_each_pci_dev(device) { 682 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 683 if (!agp) 684 continue; 685 686 printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", 687 agp_v3 ? 3 : 2, pci_name(device), mode); 688 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 689 } 690 } 691 EXPORT_SYMBOL(agp_device_command); 692 693 694 void get_agp_version(struct agp_bridge_data *bridge) 695 { 696 u32 ncapid; 697 698 /* Exit early if already set by errata workarounds. */ 699 if (bridge->major_version != 0) 700 return; 701 702 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 703 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 704 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 705 } 706 EXPORT_SYMBOL(get_agp_version); 707 708 709 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 710 { 711 u32 bridge_agpstat, temp; 712 713 get_agp_version(agp_bridge); 714 715 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", 716 agp_bridge->major_version, 717 agp_bridge->minor_version, 718 pci_name(agp_bridge->dev)); 719 720 pci_read_config_dword(agp_bridge->dev, 721 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 722 723 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 724 if (bridge_agpstat == 0) 725 /* Something bad happened. FIXME: Return error code? */ 726 return; 727 728 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 729 730 /* Do AGP version specific frobbing. */ 731 if (bridge->major_version >= 3) { 732 if (bridge->mode & AGPSTAT_MODE_3_0) { 733 /* If we have 3.5, we can do the isoch stuff. */ 734 if (bridge->minor_version >= 5) 735 agp_3_5_enable(bridge); 736 agp_device_command(bridge_agpstat, TRUE); 737 return; 738 } else { 739 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 740 bridge_agpstat &= ~(7<<10) ; 741 pci_read_config_dword(bridge->dev, 742 bridge->capndx+AGPCTRL, &temp); 743 temp |= (1<<9); 744 pci_write_config_dword(bridge->dev, 745 bridge->capndx+AGPCTRL, temp); 746 747 printk(KERN_INFO PFX "Device is in legacy mode," 748 " falling back to 2.x\n"); 749 } 750 } 751 752 /* AGP v<3 */ 753 agp_device_command(bridge_agpstat, FALSE); 754 } 755 EXPORT_SYMBOL(agp_generic_enable); 756 757 758 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 759 { 760 char *table; 761 char *table_end; 762 int size; 763 int page_order; 764 int num_entries; 765 int i; 766 void *temp; 767 struct page *page; 768 769 /* The generic routines can't handle 2 level gatt's */ 770 if (bridge->driver->size_type == LVL2_APER_SIZE) 771 return -EINVAL; 772 773 table = NULL; 774 i = bridge->aperture_size_idx; 775 temp = bridge->current_size; 776 size = page_order = num_entries = 0; 777 778 if (bridge->driver->size_type != FIXED_APER_SIZE) { 779 do { 780 switch (bridge->driver->size_type) { 781 case U8_APER_SIZE: 782 size = A_SIZE_8(temp)->size; 783 page_order = 784 A_SIZE_8(temp)->page_order; 785 num_entries = 786 A_SIZE_8(temp)->num_entries; 787 break; 788 case U16_APER_SIZE: 789 size = A_SIZE_16(temp)->size; 790 page_order = A_SIZE_16(temp)->page_order; 791 num_entries = A_SIZE_16(temp)->num_entries; 792 break; 793 case U32_APER_SIZE: 794 size = A_SIZE_32(temp)->size; 795 page_order = A_SIZE_32(temp)->page_order; 796 num_entries = A_SIZE_32(temp)->num_entries; 797 break; 798 /* This case will never really happen. */ 799 case FIXED_APER_SIZE: 800 case LVL2_APER_SIZE: 801 default: 802 size = page_order = num_entries = 0; 803 break; 804 } 805 806 table = alloc_gatt_pages(page_order); 807 808 if (table == NULL) { 809 i++; 810 switch (bridge->driver->size_type) { 811 case U8_APER_SIZE: 812 bridge->current_size = A_IDX8(bridge); 813 break; 814 case U16_APER_SIZE: 815 bridge->current_size = A_IDX16(bridge); 816 break; 817 case U32_APER_SIZE: 818 bridge->current_size = A_IDX32(bridge); 819 break; 820 /* These cases will never really happen. */ 821 case FIXED_APER_SIZE: 822 case LVL2_APER_SIZE: 823 default: 824 break; 825 } 826 temp = bridge->current_size; 827 } else { 828 bridge->aperture_size_idx = i; 829 } 830 } while (!table && (i < bridge->driver->num_aperture_sizes)); 831 } else { 832 size = ((struct aper_size_info_fixed *) temp)->size; 833 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 834 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 835 table = alloc_gatt_pages(page_order); 836 } 837 838 if (table == NULL) 839 return -ENOMEM; 840 841 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 842 843 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 844 SetPageReserved(page); 845 846 bridge->gatt_table_real = (u32 *) table; 847 agp_gatt_table = (void *)table; 848 849 bridge->driver->cache_flush(); 850 bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 851 (PAGE_SIZE * (1 << page_order))); 852 bridge->driver->cache_flush(); 853 854 if (bridge->gatt_table == NULL) { 855 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 856 ClearPageReserved(page); 857 858 free_gatt_pages(table, page_order); 859 860 return -ENOMEM; 861 } 862 bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); 863 864 /* AK: bogus, should encode addresses > 4GB */ 865 for (i = 0; i < num_entries; i++) { 866 writel(bridge->scratch_page, bridge->gatt_table+i); 867 readl(bridge->gatt_table+i); /* PCI Posting. */ 868 } 869 870 return 0; 871 } 872 EXPORT_SYMBOL(agp_generic_create_gatt_table); 873 874 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 875 { 876 int page_order; 877 char *table, *table_end; 878 void *temp; 879 struct page *page; 880 881 temp = bridge->current_size; 882 883 switch (bridge->driver->size_type) { 884 case U8_APER_SIZE: 885 page_order = A_SIZE_8(temp)->page_order; 886 break; 887 case U16_APER_SIZE: 888 page_order = A_SIZE_16(temp)->page_order; 889 break; 890 case U32_APER_SIZE: 891 page_order = A_SIZE_32(temp)->page_order; 892 break; 893 case FIXED_APER_SIZE: 894 page_order = A_SIZE_FIX(temp)->page_order; 895 break; 896 case LVL2_APER_SIZE: 897 /* The generic routines can't deal with 2 level gatt's */ 898 return -EINVAL; 899 break; 900 default: 901 page_order = 0; 902 break; 903 } 904 905 /* Do not worry about freeing memory, because if this is 906 * called, then all agp memory is deallocated and removed 907 * from the table. */ 908 909 iounmap(bridge->gatt_table); 910 table = (char *) bridge->gatt_table_real; 911 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 912 913 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 914 ClearPageReserved(page); 915 916 free_gatt_pages(bridge->gatt_table_real, page_order); 917 918 agp_gatt_table = NULL; 919 bridge->gatt_table = NULL; 920 bridge->gatt_table_real = NULL; 921 bridge->gatt_bus_addr = 0; 922 923 return 0; 924 } 925 EXPORT_SYMBOL(agp_generic_free_gatt_table); 926 927 928 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 929 { 930 int num_entries; 931 size_t i; 932 off_t j; 933 void *temp; 934 struct agp_bridge_data *bridge; 935 936 bridge = mem->bridge; 937 if (!bridge) 938 return -EINVAL; 939 940 temp = bridge->current_size; 941 942 switch (bridge->driver->size_type) { 943 case U8_APER_SIZE: 944 num_entries = A_SIZE_8(temp)->num_entries; 945 break; 946 case U16_APER_SIZE: 947 num_entries = A_SIZE_16(temp)->num_entries; 948 break; 949 case U32_APER_SIZE: 950 num_entries = A_SIZE_32(temp)->num_entries; 951 break; 952 case FIXED_APER_SIZE: 953 num_entries = A_SIZE_FIX(temp)->num_entries; 954 break; 955 case LVL2_APER_SIZE: 956 /* The generic routines can't deal with 2 level gatt's */ 957 return -EINVAL; 958 break; 959 default: 960 num_entries = 0; 961 break; 962 } 963 964 num_entries -= agp_memory_reserved/PAGE_SIZE; 965 if (num_entries < 0) num_entries = 0; 966 967 if (type != 0 || mem->type != 0) { 968 /* The generic routines know nothing of memory types */ 969 return -EINVAL; 970 } 971 972 /* AK: could wrap */ 973 if ((pg_start + mem->page_count) > num_entries) 974 return -EINVAL; 975 976 j = pg_start; 977 978 while (j < (pg_start + mem->page_count)) { 979 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 980 return -EBUSY; 981 j++; 982 } 983 984 if (mem->is_flushed == FALSE) { 985 bridge->driver->cache_flush(); 986 mem->is_flushed = TRUE; 987 } 988 989 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 990 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j); 991 readl(bridge->gatt_table+j); /* PCI Posting. */ 992 } 993 994 bridge->driver->tlb_flush(mem); 995 return 0; 996 } 997 EXPORT_SYMBOL(agp_generic_insert_memory); 998 999 1000 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1001 { 1002 size_t i; 1003 struct agp_bridge_data *bridge; 1004 1005 bridge = mem->bridge; 1006 if (!bridge) 1007 return -EINVAL; 1008 1009 if (type != 0 || mem->type != 0) { 1010 /* The generic routines know nothing of memory types */ 1011 return -EINVAL; 1012 } 1013 1014 /* AK: bogus, should encode addresses > 4GB */ 1015 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1016 writel(bridge->scratch_page, bridge->gatt_table+i); 1017 readl(bridge->gatt_table+i); /* PCI Posting. */ 1018 } 1019 1020 global_cache_flush(); 1021 bridge->driver->tlb_flush(mem); 1022 return 0; 1023 } 1024 EXPORT_SYMBOL(agp_generic_remove_memory); 1025 1026 1027 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1028 { 1029 return NULL; 1030 } 1031 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1032 1033 1034 void agp_generic_free_by_type(struct agp_memory *curr) 1035 { 1036 vfree(curr->memory); 1037 agp_free_key(curr->key); 1038 kfree(curr); 1039 } 1040 EXPORT_SYMBOL(agp_generic_free_by_type); 1041 1042 1043 /* 1044 * Basic Page Allocation Routines - 1045 * These routines handle page allocation and by default they reserve the allocated 1046 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1047 * against a maximum value. 1048 */ 1049 1050 void *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1051 { 1052 struct page * page; 1053 1054 page = alloc_page(GFP_KERNEL); 1055 if (page == NULL) 1056 return NULL; 1057 1058 map_page_into_agp(page); 1059 1060 get_page(page); 1061 SetPageLocked(page); 1062 atomic_inc(&agp_bridge->current_memory_agp); 1063 return page_address(page); 1064 } 1065 EXPORT_SYMBOL(agp_generic_alloc_page); 1066 1067 1068 void agp_generic_destroy_page(void *addr) 1069 { 1070 struct page *page; 1071 1072 if (addr == NULL) 1073 return; 1074 1075 page = virt_to_page(addr); 1076 unmap_page_from_agp(page); 1077 put_page(page); 1078 unlock_page(page); 1079 free_page((unsigned long)addr); 1080 atomic_dec(&agp_bridge->current_memory_agp); 1081 } 1082 EXPORT_SYMBOL(agp_generic_destroy_page); 1083 1084 /* End Basic Page Allocation Routines */ 1085 1086 1087 /** 1088 * agp_enable - initialise the agp point-to-point connection. 1089 * 1090 * @mode: agp mode register value to configure with. 1091 */ 1092 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1093 { 1094 if (!bridge) 1095 return; 1096 bridge->driver->agp_enable(bridge, mode); 1097 } 1098 EXPORT_SYMBOL(agp_enable); 1099 1100 /* When we remove the global variable agp_bridge from all drivers 1101 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1102 */ 1103 1104 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1105 { 1106 if (list_empty(&agp_bridges)) 1107 return NULL; 1108 1109 return agp_bridge; 1110 } 1111 1112 static void ipi_handler(void *null) 1113 { 1114 flush_agp_cache(); 1115 } 1116 1117 void global_cache_flush(void) 1118 { 1119 if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) 1120 panic(PFX "timed out waiting for the other CPUs!\n"); 1121 } 1122 EXPORT_SYMBOL(global_cache_flush); 1123 1124 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1125 unsigned long addr, int type) 1126 { 1127 /* memory type is ignored in the generic routine */ 1128 if (bridge->driver->masks) 1129 return addr | bridge->driver->masks[0].mask; 1130 else 1131 return addr; 1132 } 1133 EXPORT_SYMBOL(agp_generic_mask_memory); 1134 1135 /* 1136 * These functions are implemented according to the AGPv3 spec, 1137 * which covers implementation details that had previously been 1138 * left open. 1139 */ 1140 1141 int agp3_generic_fetch_size(void) 1142 { 1143 u16 temp_size; 1144 int i; 1145 struct aper_size_info_16 *values; 1146 1147 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1148 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1149 1150 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1151 if (temp_size == values[i].size_value) { 1152 agp_bridge->previous_size = 1153 agp_bridge->current_size = (void *) (values + i); 1154 1155 agp_bridge->aperture_size_idx = i; 1156 return values[i].size; 1157 } 1158 } 1159 return 0; 1160 } 1161 EXPORT_SYMBOL(agp3_generic_fetch_size); 1162 1163 void agp3_generic_tlbflush(struct agp_memory *mem) 1164 { 1165 u32 ctrl; 1166 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1167 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1168 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1169 } 1170 EXPORT_SYMBOL(agp3_generic_tlbflush); 1171 1172 int agp3_generic_configure(void) 1173 { 1174 u32 temp; 1175 struct aper_size_info_16 *current_size; 1176 1177 current_size = A_SIZE_16(agp_bridge->current_size); 1178 1179 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1180 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1181 1182 /* set aperture size */ 1183 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1184 /* set gart pointer */ 1185 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1186 /* enable aperture and GTLB */ 1187 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1188 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1189 return 0; 1190 } 1191 EXPORT_SYMBOL(agp3_generic_configure); 1192 1193 void agp3_generic_cleanup(void) 1194 { 1195 u32 ctrl; 1196 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1197 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1198 } 1199 EXPORT_SYMBOL(agp3_generic_cleanup); 1200 1201 struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1202 { 1203 {4096, 1048576, 10,0x000}, 1204 {2048, 524288, 9, 0x800}, 1205 {1024, 262144, 8, 0xc00}, 1206 { 512, 131072, 7, 0xe00}, 1207 { 256, 65536, 6, 0xf00}, 1208 { 128, 32768, 5, 0xf20}, 1209 { 64, 16384, 4, 0xf30}, 1210 { 32, 8192, 3, 0xf38}, 1211 { 16, 4096, 2, 0xf3c}, 1212 { 8, 2048, 1, 0xf3e}, 1213 { 4, 1024, 0, 0xf3f} 1214 }; 1215 EXPORT_SYMBOL(agp3_generic_sizes); 1216 1217