1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/init.h> 33 #include <linux/pagemap.h> 34 #include <linux/miscdevice.h> 35 #include <linux/pm.h> 36 #include <linux/agp_backend.h> 37 #include <linux/vmalloc.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/mm.h> 40 #include <linux/sched.h> 41 #include <linux/slab.h> 42 #include <asm/io.h> 43 #include <asm/cacheflush.h> 44 #include <asm/pgtable.h> 45 #include "agp.h" 46 47 __u32 *agp_gatt_table; 48 int agp_memory_reserved; 49 50 /* 51 * Needed by the Nforce GART driver for the time being. Would be 52 * nice to do this some other way instead of needing this export. 53 */ 54 EXPORT_SYMBOL_GPL(agp_memory_reserved); 55 56 /* 57 * Generic routines for handling agp_memory structures - 58 * They use the basic page allocation routines to do the brunt of the work. 59 */ 60 61 void agp_free_key(int key) 62 { 63 if (key < 0) 64 return; 65 66 if (key < MAXKEY) 67 clear_bit(key, agp_bridge->key_list); 68 } 69 EXPORT_SYMBOL(agp_free_key); 70 71 72 static int agp_get_key(void) 73 { 74 int bit; 75 76 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 77 if (bit < MAXKEY) { 78 set_bit(bit, agp_bridge->key_list); 79 return bit; 80 } 81 return -1; 82 } 83 84 void agp_flush_chipset(struct agp_bridge_data *bridge) 85 { 86 if (bridge->driver->chipset_flush) 87 bridge->driver->chipset_flush(bridge); 88 } 89 EXPORT_SYMBOL(agp_flush_chipset); 90 91 /* 92 * Use kmalloc if possible for the page list. Otherwise fall back to 93 * vmalloc. This speeds things up and also saves memory for small AGP 94 * regions. 95 */ 96 97 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 98 { 99 mem->pages = NULL; 100 101 if (size <= 2*PAGE_SIZE) 102 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); 103 if (mem->pages == NULL) { 104 mem->pages = vmalloc(size); 105 } 106 } 107 EXPORT_SYMBOL(agp_alloc_page_array); 108 109 void agp_free_page_array(struct agp_memory *mem) 110 { 111 if (is_vmalloc_addr(mem->pages)) { 112 vfree(mem->pages); 113 } else { 114 kfree(mem->pages); 115 } 116 } 117 EXPORT_SYMBOL(agp_free_page_array); 118 119 120 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 121 { 122 struct agp_memory *new; 123 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 124 125 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 126 if (new == NULL) 127 return NULL; 128 129 new->key = agp_get_key(); 130 131 if (new->key < 0) { 132 kfree(new); 133 return NULL; 134 } 135 136 agp_alloc_page_array(alloc_size, new); 137 138 if (new->pages == NULL) { 139 agp_free_key(new->key); 140 kfree(new); 141 return NULL; 142 } 143 new->num_scratch_pages = 0; 144 return new; 145 } 146 147 struct agp_memory *agp_create_memory(int scratch_pages) 148 { 149 struct agp_memory *new; 150 151 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 152 if (new == NULL) 153 return NULL; 154 155 new->key = agp_get_key(); 156 157 if (new->key < 0) { 158 kfree(new); 159 return NULL; 160 } 161 162 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 163 164 if (new->pages == NULL) { 165 agp_free_key(new->key); 166 kfree(new); 167 return NULL; 168 } 169 new->num_scratch_pages = scratch_pages; 170 new->type = AGP_NORMAL_MEMORY; 171 return new; 172 } 173 EXPORT_SYMBOL(agp_create_memory); 174 175 /** 176 * agp_free_memory - free memory associated with an agp_memory pointer. 177 * 178 * @curr: agp_memory pointer to be freed. 179 * 180 * It is the only function that can be called when the backend is not owned 181 * by the caller. (So it can free memory on client death.) 182 */ 183 void agp_free_memory(struct agp_memory *curr) 184 { 185 size_t i; 186 187 if (curr == NULL) 188 return; 189 190 if (curr->is_bound) 191 agp_unbind_memory(curr); 192 193 if (curr->type >= AGP_USER_TYPES) { 194 agp_generic_free_by_type(curr); 195 return; 196 } 197 198 if (curr->type != 0) { 199 curr->bridge->driver->free_by_type(curr); 200 return; 201 } 202 if (curr->page_count != 0) { 203 if (curr->bridge->driver->agp_destroy_pages) { 204 curr->bridge->driver->agp_destroy_pages(curr); 205 } else { 206 207 for (i = 0; i < curr->page_count; i++) { 208 curr->bridge->driver->agp_destroy_page( 209 curr->pages[i], 210 AGP_PAGE_DESTROY_UNMAP); 211 } 212 for (i = 0; i < curr->page_count; i++) { 213 curr->bridge->driver->agp_destroy_page( 214 curr->pages[i], 215 AGP_PAGE_DESTROY_FREE); 216 } 217 } 218 } 219 agp_free_key(curr->key); 220 agp_free_page_array(curr); 221 kfree(curr); 222 } 223 EXPORT_SYMBOL(agp_free_memory); 224 225 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 226 227 /** 228 * agp_allocate_memory - allocate a group of pages of a certain type. 229 * 230 * @page_count: size_t argument of the number of pages 231 * @type: u32 argument of the type of memory to be allocated. 232 * 233 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 234 * maps to physical ram. Any other type is device dependent. 235 * 236 * It returns NULL whenever memory is unavailable. 237 */ 238 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 239 size_t page_count, u32 type) 240 { 241 int scratch_pages; 242 struct agp_memory *new; 243 size_t i; 244 245 if (!bridge) 246 return NULL; 247 248 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 249 return NULL; 250 251 if (type >= AGP_USER_TYPES) { 252 new = agp_generic_alloc_user(page_count, type); 253 if (new) 254 new->bridge = bridge; 255 return new; 256 } 257 258 if (type != 0) { 259 new = bridge->driver->alloc_by_type(page_count, type); 260 if (new) 261 new->bridge = bridge; 262 return new; 263 } 264 265 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 266 267 new = agp_create_memory(scratch_pages); 268 269 if (new == NULL) 270 return NULL; 271 272 if (bridge->driver->agp_alloc_pages) { 273 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { 274 agp_free_memory(new); 275 return NULL; 276 } 277 new->bridge = bridge; 278 return new; 279 } 280 281 for (i = 0; i < page_count; i++) { 282 struct page *page = bridge->driver->agp_alloc_page(bridge); 283 284 if (page == NULL) { 285 agp_free_memory(new); 286 return NULL; 287 } 288 new->pages[i] = page; 289 new->page_count++; 290 } 291 new->bridge = bridge; 292 293 return new; 294 } 295 EXPORT_SYMBOL(agp_allocate_memory); 296 297 298 /* End - Generic routines for handling agp_memory structures */ 299 300 301 static int agp_return_size(void) 302 { 303 int current_size; 304 void *temp; 305 306 temp = agp_bridge->current_size; 307 308 switch (agp_bridge->driver->size_type) { 309 case U8_APER_SIZE: 310 current_size = A_SIZE_8(temp)->size; 311 break; 312 case U16_APER_SIZE: 313 current_size = A_SIZE_16(temp)->size; 314 break; 315 case U32_APER_SIZE: 316 current_size = A_SIZE_32(temp)->size; 317 break; 318 case LVL2_APER_SIZE: 319 current_size = A_SIZE_LVL2(temp)->size; 320 break; 321 case FIXED_APER_SIZE: 322 current_size = A_SIZE_FIX(temp)->size; 323 break; 324 default: 325 current_size = 0; 326 break; 327 } 328 329 current_size -= (agp_memory_reserved / (1024*1024)); 330 if (current_size <0) 331 current_size = 0; 332 return current_size; 333 } 334 335 336 int agp_num_entries(void) 337 { 338 int num_entries; 339 void *temp; 340 341 temp = agp_bridge->current_size; 342 343 switch (agp_bridge->driver->size_type) { 344 case U8_APER_SIZE: 345 num_entries = A_SIZE_8(temp)->num_entries; 346 break; 347 case U16_APER_SIZE: 348 num_entries = A_SIZE_16(temp)->num_entries; 349 break; 350 case U32_APER_SIZE: 351 num_entries = A_SIZE_32(temp)->num_entries; 352 break; 353 case LVL2_APER_SIZE: 354 num_entries = A_SIZE_LVL2(temp)->num_entries; 355 break; 356 case FIXED_APER_SIZE: 357 num_entries = A_SIZE_FIX(temp)->num_entries; 358 break; 359 default: 360 num_entries = 0; 361 break; 362 } 363 364 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 365 if (num_entries<0) 366 num_entries = 0; 367 return num_entries; 368 } 369 EXPORT_SYMBOL_GPL(agp_num_entries); 370 371 372 /** 373 * agp_copy_info - copy bridge state information 374 * 375 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 376 * 377 * This function copies information about the agp bridge device and the state of 378 * the agp backend into an agp_kern_info pointer. 379 */ 380 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 381 { 382 memset(info, 0, sizeof(struct agp_kern_info)); 383 if (!bridge) { 384 info->chipset = NOT_SUPPORTED; 385 return -EIO; 386 } 387 388 info->version.major = bridge->version->major; 389 info->version.minor = bridge->version->minor; 390 info->chipset = SUPPORTED; 391 info->device = bridge->dev; 392 if (bridge->mode & AGPSTAT_MODE_3_0) 393 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 394 else 395 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 396 info->aper_base = bridge->gart_bus_addr; 397 info->aper_size = agp_return_size(); 398 info->max_memory = bridge->max_memory_agp; 399 info->current_memory = atomic_read(&bridge->current_memory_agp); 400 info->cant_use_aperture = bridge->driver->cant_use_aperture; 401 info->vm_ops = bridge->vm_ops; 402 info->page_mask = ~0UL; 403 return 0; 404 } 405 EXPORT_SYMBOL(agp_copy_info); 406 407 /* End - Routine to copy over information structure */ 408 409 /* 410 * Routines for handling swapping of agp_memory into the GATT - 411 * These routines take agp_memory and insert them into the GATT. 412 * They call device specific routines to actually write to the GATT. 413 */ 414 415 /** 416 * agp_bind_memory - Bind an agp_memory structure into the GATT. 417 * 418 * @curr: agp_memory pointer 419 * @pg_start: an offset into the graphics aperture translation table 420 * 421 * It returns -EINVAL if the pointer == NULL. 422 * It returns -EBUSY if the area of the table requested is already in use. 423 */ 424 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 425 { 426 int ret_val; 427 428 if (curr == NULL) 429 return -EINVAL; 430 431 if (curr->is_bound) { 432 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 433 return -EINVAL; 434 } 435 if (!curr->is_flushed) { 436 curr->bridge->driver->cache_flush(); 437 curr->is_flushed = true; 438 } 439 440 if (curr->bridge->driver->agp_map_memory) { 441 ret_val = curr->bridge->driver->agp_map_memory(curr); 442 if (ret_val) 443 return ret_val; 444 } 445 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 446 447 if (ret_val != 0) 448 return ret_val; 449 450 curr->is_bound = true; 451 curr->pg_start = pg_start; 452 spin_lock(&agp_bridge->mapped_lock); 453 list_add(&curr->mapped_list, &agp_bridge->mapped_list); 454 spin_unlock(&agp_bridge->mapped_lock); 455 456 return 0; 457 } 458 EXPORT_SYMBOL(agp_bind_memory); 459 460 461 /** 462 * agp_unbind_memory - Removes an agp_memory structure from the GATT 463 * 464 * @curr: agp_memory pointer to be removed from the GATT. 465 * 466 * It returns -EINVAL if this piece of agp_memory is not currently bound to 467 * the graphics aperture translation table or if the agp_memory pointer == NULL 468 */ 469 int agp_unbind_memory(struct agp_memory *curr) 470 { 471 int ret_val; 472 473 if (curr == NULL) 474 return -EINVAL; 475 476 if (!curr->is_bound) { 477 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 478 return -EINVAL; 479 } 480 481 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 482 483 if (ret_val != 0) 484 return ret_val; 485 486 if (curr->bridge->driver->agp_unmap_memory) 487 curr->bridge->driver->agp_unmap_memory(curr); 488 489 curr->is_bound = false; 490 curr->pg_start = 0; 491 spin_lock(&curr->bridge->mapped_lock); 492 list_del(&curr->mapped_list); 493 spin_unlock(&curr->bridge->mapped_lock); 494 return 0; 495 } 496 EXPORT_SYMBOL(agp_unbind_memory); 497 498 /** 499 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume 500 */ 501 int agp_rebind_memory(void) 502 { 503 struct agp_memory *curr; 504 int ret_val = 0; 505 506 spin_lock(&agp_bridge->mapped_lock); 507 list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { 508 ret_val = curr->bridge->driver->insert_memory(curr, 509 curr->pg_start, 510 curr->type); 511 if (ret_val != 0) 512 break; 513 } 514 spin_unlock(&agp_bridge->mapped_lock); 515 return ret_val; 516 } 517 EXPORT_SYMBOL(agp_rebind_memory); 518 519 /* End - Routines for handling swapping of agp_memory into the GATT */ 520 521 522 /* Generic Agp routines - Start */ 523 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 524 { 525 u32 tmp; 526 527 if (*requested_mode & AGP2_RESERVED_MASK) { 528 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 529 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 530 *requested_mode &= ~AGP2_RESERVED_MASK; 531 } 532 533 /* 534 * Some dumb bridges are programmed to disobey the AGP2 spec. 535 * This is likely a BIOS misprogramming rather than poweron default, or 536 * it would be a lot more common. 537 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 538 * AGPv2 spec 6.1.9 states: 539 * The RATE field indicates the data transfer rates supported by this 540 * device. A.G.P. devices must report all that apply. 541 * Fix them up as best we can. 542 */ 543 switch (*bridge_agpstat & 7) { 544 case 4: 545 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 546 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" 547 "Fixing up support for x2 & x1\n"); 548 break; 549 case 2: 550 *bridge_agpstat |= AGPSTAT2_1X; 551 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" 552 "Fixing up support for x1\n"); 553 break; 554 default: 555 break; 556 } 557 558 /* Check the speed bits make sense. Only one should be set. */ 559 tmp = *requested_mode & 7; 560 switch (tmp) { 561 case 0: 562 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 563 *requested_mode |= AGPSTAT2_1X; 564 break; 565 case 1: 566 case 2: 567 break; 568 case 3: 569 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 570 break; 571 case 4: 572 break; 573 case 5: 574 case 6: 575 case 7: 576 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 577 break; 578 } 579 580 /* disable SBA if it's not supported */ 581 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 582 *bridge_agpstat &= ~AGPSTAT_SBA; 583 584 /* Set rate */ 585 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 586 *bridge_agpstat &= ~AGPSTAT2_4X; 587 588 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 589 *bridge_agpstat &= ~AGPSTAT2_2X; 590 591 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 592 *bridge_agpstat &= ~AGPSTAT2_1X; 593 594 /* Now we know what mode it should be, clear out the unwanted bits. */ 595 if (*bridge_agpstat & AGPSTAT2_4X) 596 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 597 598 if (*bridge_agpstat & AGPSTAT2_2X) 599 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 600 601 if (*bridge_agpstat & AGPSTAT2_1X) 602 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 603 604 /* Apply any errata. */ 605 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 606 *bridge_agpstat &= ~AGPSTAT_FW; 607 608 if (agp_bridge->flags & AGP_ERRATA_SBA) 609 *bridge_agpstat &= ~AGPSTAT_SBA; 610 611 if (agp_bridge->flags & AGP_ERRATA_1X) { 612 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 613 *bridge_agpstat |= AGPSTAT2_1X; 614 } 615 616 /* If we've dropped down to 1X, disable fast writes. */ 617 if (*bridge_agpstat & AGPSTAT2_1X) 618 *bridge_agpstat &= ~AGPSTAT_FW; 619 } 620 621 /* 622 * requested_mode = Mode requested by (typically) X. 623 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 624 * vga_agpstat = PCI_AGP_STATUS from graphic card. 625 */ 626 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 627 { 628 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 629 u32 tmp; 630 631 if (*requested_mode & AGP3_RESERVED_MASK) { 632 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 633 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 634 *requested_mode &= ~AGP3_RESERVED_MASK; 635 } 636 637 /* Check the speed bits make sense. */ 638 tmp = *requested_mode & 7; 639 if (tmp == 0) { 640 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 641 *requested_mode |= AGPSTAT3_4X; 642 } 643 if (tmp >= 3) { 644 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 645 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 646 } 647 648 /* ARQSZ - Set the value to the maximum one. 649 * Don't allow the mode register to override values. */ 650 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 651 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 652 653 /* Calibration cycle. 654 * Don't allow the mode register to override values. */ 655 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 656 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 657 658 /* SBA *must* be supported for AGP v3 */ 659 *bridge_agpstat |= AGPSTAT_SBA; 660 661 /* 662 * Set speed. 663 * Check for invalid speeds. This can happen when applications 664 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 665 */ 666 if (*requested_mode & AGPSTAT_MODE_3_0) { 667 /* 668 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 669 * have been passed a 3.0 mode, but with 2.x speed bits set. 670 * AGP2.x 4x -> AGP3.0 4x. 671 */ 672 if (*requested_mode & AGPSTAT2_4X) { 673 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 674 current->comm, *requested_mode); 675 *requested_mode &= ~AGPSTAT2_4X; 676 *requested_mode |= AGPSTAT3_4X; 677 } 678 } else { 679 /* 680 * The caller doesn't know what they are doing. We are in 3.0 mode, 681 * but have been passed an AGP 2.x mode. 682 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 683 */ 684 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 685 current->comm, *requested_mode); 686 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 687 *requested_mode |= AGPSTAT3_4X; 688 } 689 690 if (*requested_mode & AGPSTAT3_8X) { 691 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 692 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 693 *bridge_agpstat |= AGPSTAT3_4X; 694 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 695 return; 696 } 697 if (!(*vga_agpstat & AGPSTAT3_8X)) { 698 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 699 *bridge_agpstat |= AGPSTAT3_4X; 700 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 701 return; 702 } 703 /* All set, bridge & device can do AGP x8*/ 704 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 705 goto done; 706 707 } else if (*requested_mode & AGPSTAT3_4X) { 708 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 709 *bridge_agpstat |= AGPSTAT3_4X; 710 goto done; 711 712 } else { 713 714 /* 715 * If we didn't specify an AGP mode, we see if both 716 * the graphics card, and the bridge can do x8, and use if so. 717 * If not, we fall back to x4 mode. 718 */ 719 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 720 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 721 "supported by bridge & card (x8).\n"); 722 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 723 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 724 } else { 725 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 726 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 727 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 728 *bridge_agpstat, origbridge); 729 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 730 *bridge_agpstat |= AGPSTAT3_4X; 731 } 732 if (!(*vga_agpstat & AGPSTAT3_8X)) { 733 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 734 *vga_agpstat, origvga); 735 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 736 *vga_agpstat |= AGPSTAT3_4X; 737 } 738 } 739 } 740 741 done: 742 /* Apply any errata. */ 743 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 744 *bridge_agpstat &= ~AGPSTAT_FW; 745 746 if (agp_bridge->flags & AGP_ERRATA_SBA) 747 *bridge_agpstat &= ~AGPSTAT_SBA; 748 749 if (agp_bridge->flags & AGP_ERRATA_1X) { 750 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 751 *bridge_agpstat |= AGPSTAT2_1X; 752 } 753 } 754 755 756 /** 757 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 758 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 759 * @requested_mode: requested agp_stat from userspace (Typically from X) 760 * @bridge_agpstat: current agp_stat from AGP bridge. 761 * 762 * This function will hunt for an AGP graphics card, and try to match 763 * the requested mode to the capabilities of both the bridge and the card. 764 */ 765 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 766 { 767 struct pci_dev *device = NULL; 768 u32 vga_agpstat; 769 u8 cap_ptr; 770 771 for (;;) { 772 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 773 if (!device) { 774 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 775 return 0; 776 } 777 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 778 if (cap_ptr) 779 break; 780 } 781 782 /* 783 * Ok, here we have a AGP device. Disable impossible 784 * settings, and adjust the readqueue to the minimum. 785 */ 786 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 787 788 /* adjust RQ depth */ 789 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 790 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 791 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 792 793 /* disable FW if it's not supported */ 794 if (!((bridge_agpstat & AGPSTAT_FW) && 795 (vga_agpstat & AGPSTAT_FW) && 796 (requested_mode & AGPSTAT_FW))) 797 bridge_agpstat &= ~AGPSTAT_FW; 798 799 /* Check to see if we are operating in 3.0 mode */ 800 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 801 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 802 else 803 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 804 805 pci_dev_put(device); 806 return bridge_agpstat; 807 } 808 EXPORT_SYMBOL(agp_collect_device_status); 809 810 811 void agp_device_command(u32 bridge_agpstat, bool agp_v3) 812 { 813 struct pci_dev *device = NULL; 814 int mode; 815 816 mode = bridge_agpstat & 0x7; 817 if (agp_v3) 818 mode *= 4; 819 820 for_each_pci_dev(device) { 821 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 822 if (!agp) 823 continue; 824 825 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", 826 agp_v3 ? 3 : 2, mode); 827 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 828 } 829 } 830 EXPORT_SYMBOL(agp_device_command); 831 832 833 void get_agp_version(struct agp_bridge_data *bridge) 834 { 835 u32 ncapid; 836 837 /* Exit early if already set by errata workarounds. */ 838 if (bridge->major_version != 0) 839 return; 840 841 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 842 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 843 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 844 } 845 EXPORT_SYMBOL(get_agp_version); 846 847 848 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 849 { 850 u32 bridge_agpstat, temp; 851 852 get_agp_version(agp_bridge); 853 854 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", 855 agp_bridge->major_version, agp_bridge->minor_version); 856 857 pci_read_config_dword(agp_bridge->dev, 858 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 859 860 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 861 if (bridge_agpstat == 0) 862 /* Something bad happened. FIXME: Return error code? */ 863 return; 864 865 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 866 867 /* Do AGP version specific frobbing. */ 868 if (bridge->major_version >= 3) { 869 if (bridge->mode & AGPSTAT_MODE_3_0) { 870 /* If we have 3.5, we can do the isoch stuff. */ 871 if (bridge->minor_version >= 5) 872 agp_3_5_enable(bridge); 873 agp_device_command(bridge_agpstat, true); 874 return; 875 } else { 876 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 877 bridge_agpstat &= ~(7<<10) ; 878 pci_read_config_dword(bridge->dev, 879 bridge->capndx+AGPCTRL, &temp); 880 temp |= (1<<9); 881 pci_write_config_dword(bridge->dev, 882 bridge->capndx+AGPCTRL, temp); 883 884 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); 885 } 886 } 887 888 /* AGP v<3 */ 889 agp_device_command(bridge_agpstat, false); 890 } 891 EXPORT_SYMBOL(agp_generic_enable); 892 893 894 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 895 { 896 char *table; 897 char *table_end; 898 int size; 899 int page_order; 900 int num_entries; 901 int i; 902 void *temp; 903 struct page *page; 904 905 /* The generic routines can't handle 2 level gatt's */ 906 if (bridge->driver->size_type == LVL2_APER_SIZE) 907 return -EINVAL; 908 909 table = NULL; 910 i = bridge->aperture_size_idx; 911 temp = bridge->current_size; 912 size = page_order = num_entries = 0; 913 914 if (bridge->driver->size_type != FIXED_APER_SIZE) { 915 do { 916 switch (bridge->driver->size_type) { 917 case U8_APER_SIZE: 918 size = A_SIZE_8(temp)->size; 919 page_order = 920 A_SIZE_8(temp)->page_order; 921 num_entries = 922 A_SIZE_8(temp)->num_entries; 923 break; 924 case U16_APER_SIZE: 925 size = A_SIZE_16(temp)->size; 926 page_order = A_SIZE_16(temp)->page_order; 927 num_entries = A_SIZE_16(temp)->num_entries; 928 break; 929 case U32_APER_SIZE: 930 size = A_SIZE_32(temp)->size; 931 page_order = A_SIZE_32(temp)->page_order; 932 num_entries = A_SIZE_32(temp)->num_entries; 933 break; 934 /* This case will never really happen. */ 935 case FIXED_APER_SIZE: 936 case LVL2_APER_SIZE: 937 default: 938 size = page_order = num_entries = 0; 939 break; 940 } 941 942 table = alloc_gatt_pages(page_order); 943 944 if (table == NULL) { 945 i++; 946 switch (bridge->driver->size_type) { 947 case U8_APER_SIZE: 948 bridge->current_size = A_IDX8(bridge); 949 break; 950 case U16_APER_SIZE: 951 bridge->current_size = A_IDX16(bridge); 952 break; 953 case U32_APER_SIZE: 954 bridge->current_size = A_IDX32(bridge); 955 break; 956 /* These cases will never really happen. */ 957 case FIXED_APER_SIZE: 958 case LVL2_APER_SIZE: 959 default: 960 break; 961 } 962 temp = bridge->current_size; 963 } else { 964 bridge->aperture_size_idx = i; 965 } 966 } while (!table && (i < bridge->driver->num_aperture_sizes)); 967 } else { 968 size = ((struct aper_size_info_fixed *) temp)->size; 969 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 970 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 971 table = alloc_gatt_pages(page_order); 972 } 973 974 if (table == NULL) 975 return -ENOMEM; 976 977 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 978 979 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 980 SetPageReserved(page); 981 982 bridge->gatt_table_real = (u32 *) table; 983 agp_gatt_table = (void *)table; 984 985 bridge->driver->cache_flush(); 986 #ifdef CONFIG_X86 987 set_memory_uc((unsigned long)table, 1 << page_order); 988 bridge->gatt_table = (void *)table; 989 #else 990 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 991 (PAGE_SIZE * (1 << page_order))); 992 bridge->driver->cache_flush(); 993 #endif 994 995 if (bridge->gatt_table == NULL) { 996 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 997 ClearPageReserved(page); 998 999 free_gatt_pages(table, page_order); 1000 1001 return -ENOMEM; 1002 } 1003 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 1004 1005 /* AK: bogus, should encode addresses > 4GB */ 1006 for (i = 0; i < num_entries; i++) { 1007 writel(bridge->scratch_page, bridge->gatt_table+i); 1008 readl(bridge->gatt_table+i); /* PCI Posting. */ 1009 } 1010 1011 return 0; 1012 } 1013 EXPORT_SYMBOL(agp_generic_create_gatt_table); 1014 1015 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 1016 { 1017 int page_order; 1018 char *table, *table_end; 1019 void *temp; 1020 struct page *page; 1021 1022 temp = bridge->current_size; 1023 1024 switch (bridge->driver->size_type) { 1025 case U8_APER_SIZE: 1026 page_order = A_SIZE_8(temp)->page_order; 1027 break; 1028 case U16_APER_SIZE: 1029 page_order = A_SIZE_16(temp)->page_order; 1030 break; 1031 case U32_APER_SIZE: 1032 page_order = A_SIZE_32(temp)->page_order; 1033 break; 1034 case FIXED_APER_SIZE: 1035 page_order = A_SIZE_FIX(temp)->page_order; 1036 break; 1037 case LVL2_APER_SIZE: 1038 /* The generic routines can't deal with 2 level gatt's */ 1039 return -EINVAL; 1040 break; 1041 default: 1042 page_order = 0; 1043 break; 1044 } 1045 1046 /* Do not worry about freeing memory, because if this is 1047 * called, then all agp memory is deallocated and removed 1048 * from the table. */ 1049 1050 #ifdef CONFIG_X86 1051 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); 1052 #else 1053 iounmap(bridge->gatt_table); 1054 #endif 1055 table = (char *) bridge->gatt_table_real; 1056 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1057 1058 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 1059 ClearPageReserved(page); 1060 1061 free_gatt_pages(bridge->gatt_table_real, page_order); 1062 1063 agp_gatt_table = NULL; 1064 bridge->gatt_table = NULL; 1065 bridge->gatt_table_real = NULL; 1066 bridge->gatt_bus_addr = 0; 1067 1068 return 0; 1069 } 1070 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1071 1072 1073 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1074 { 1075 int num_entries; 1076 size_t i; 1077 off_t j; 1078 void *temp; 1079 struct agp_bridge_data *bridge; 1080 int mask_type; 1081 1082 bridge = mem->bridge; 1083 if (!bridge) 1084 return -EINVAL; 1085 1086 if (mem->page_count == 0) 1087 return 0; 1088 1089 temp = bridge->current_size; 1090 1091 switch (bridge->driver->size_type) { 1092 case U8_APER_SIZE: 1093 num_entries = A_SIZE_8(temp)->num_entries; 1094 break; 1095 case U16_APER_SIZE: 1096 num_entries = A_SIZE_16(temp)->num_entries; 1097 break; 1098 case U32_APER_SIZE: 1099 num_entries = A_SIZE_32(temp)->num_entries; 1100 break; 1101 case FIXED_APER_SIZE: 1102 num_entries = A_SIZE_FIX(temp)->num_entries; 1103 break; 1104 case LVL2_APER_SIZE: 1105 /* The generic routines can't deal with 2 level gatt's */ 1106 return -EINVAL; 1107 break; 1108 default: 1109 num_entries = 0; 1110 break; 1111 } 1112 1113 num_entries -= agp_memory_reserved/PAGE_SIZE; 1114 if (num_entries < 0) num_entries = 0; 1115 1116 if (type != mem->type) 1117 return -EINVAL; 1118 1119 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1120 if (mask_type != 0) { 1121 /* The generic routines know nothing of memory types */ 1122 return -EINVAL; 1123 } 1124 1125 /* AK: could wrap */ 1126 if ((pg_start + mem->page_count) > num_entries) 1127 return -EINVAL; 1128 1129 j = pg_start; 1130 1131 while (j < (pg_start + mem->page_count)) { 1132 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1133 return -EBUSY; 1134 j++; 1135 } 1136 1137 if (!mem->is_flushed) { 1138 bridge->driver->cache_flush(); 1139 mem->is_flushed = true; 1140 } 1141 1142 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1143 writel(bridge->driver->mask_memory(bridge, 1144 page_to_phys(mem->pages[i]), 1145 mask_type), 1146 bridge->gatt_table+j); 1147 } 1148 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1149 1150 bridge->driver->tlb_flush(mem); 1151 return 0; 1152 } 1153 EXPORT_SYMBOL(agp_generic_insert_memory); 1154 1155 1156 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1157 { 1158 size_t i; 1159 struct agp_bridge_data *bridge; 1160 int mask_type; 1161 1162 bridge = mem->bridge; 1163 if (!bridge) 1164 return -EINVAL; 1165 1166 if (mem->page_count == 0) 1167 return 0; 1168 1169 if (type != mem->type) 1170 return -EINVAL; 1171 1172 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1173 if (mask_type != 0) { 1174 /* The generic routines know nothing of memory types */ 1175 return -EINVAL; 1176 } 1177 1178 /* AK: bogus, should encode addresses > 4GB */ 1179 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1180 writel(bridge->scratch_page, bridge->gatt_table+i); 1181 } 1182 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1183 1184 bridge->driver->tlb_flush(mem); 1185 return 0; 1186 } 1187 EXPORT_SYMBOL(agp_generic_remove_memory); 1188 1189 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1190 { 1191 return NULL; 1192 } 1193 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1194 1195 void agp_generic_free_by_type(struct agp_memory *curr) 1196 { 1197 agp_free_page_array(curr); 1198 agp_free_key(curr->key); 1199 kfree(curr); 1200 } 1201 EXPORT_SYMBOL(agp_generic_free_by_type); 1202 1203 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1204 { 1205 struct agp_memory *new; 1206 int i; 1207 int pages; 1208 1209 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1210 new = agp_create_user_memory(page_count); 1211 if (new == NULL) 1212 return NULL; 1213 1214 for (i = 0; i < page_count; i++) 1215 new->pages[i] = NULL; 1216 new->page_count = 0; 1217 new->type = type; 1218 new->num_scratch_pages = pages; 1219 1220 return new; 1221 } 1222 EXPORT_SYMBOL(agp_generic_alloc_user); 1223 1224 /* 1225 * Basic Page Allocation Routines - 1226 * These routines handle page allocation and by default they reserve the allocated 1227 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1228 * against a maximum value. 1229 */ 1230 1231 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) 1232 { 1233 struct page * page; 1234 int i, ret = -ENOMEM; 1235 1236 for (i = 0; i < num_pages; i++) { 1237 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1238 /* agp_free_memory() needs gart address */ 1239 if (page == NULL) 1240 goto out; 1241 1242 #ifndef CONFIG_X86 1243 map_page_into_agp(page); 1244 #endif 1245 get_page(page); 1246 atomic_inc(&agp_bridge->current_memory_agp); 1247 1248 mem->pages[i] = page; 1249 mem->page_count++; 1250 } 1251 1252 #ifdef CONFIG_X86 1253 set_pages_array_uc(mem->pages, num_pages); 1254 #endif 1255 ret = 0; 1256 out: 1257 return ret; 1258 } 1259 EXPORT_SYMBOL(agp_generic_alloc_pages); 1260 1261 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1262 { 1263 struct page * page; 1264 1265 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1266 if (page == NULL) 1267 return NULL; 1268 1269 map_page_into_agp(page); 1270 1271 get_page(page); 1272 atomic_inc(&agp_bridge->current_memory_agp); 1273 return page; 1274 } 1275 EXPORT_SYMBOL(agp_generic_alloc_page); 1276 1277 void agp_generic_destroy_pages(struct agp_memory *mem) 1278 { 1279 int i; 1280 struct page *page; 1281 1282 if (!mem) 1283 return; 1284 1285 #ifdef CONFIG_X86 1286 set_pages_array_wb(mem->pages, mem->page_count); 1287 #endif 1288 1289 for (i = 0; i < mem->page_count; i++) { 1290 page = mem->pages[i]; 1291 1292 #ifndef CONFIG_X86 1293 unmap_page_from_agp(page); 1294 #endif 1295 put_page(page); 1296 __free_page(page); 1297 atomic_dec(&agp_bridge->current_memory_agp); 1298 mem->pages[i] = NULL; 1299 } 1300 } 1301 EXPORT_SYMBOL(agp_generic_destroy_pages); 1302 1303 void agp_generic_destroy_page(struct page *page, int flags) 1304 { 1305 if (page == NULL) 1306 return; 1307 1308 if (flags & AGP_PAGE_DESTROY_UNMAP) 1309 unmap_page_from_agp(page); 1310 1311 if (flags & AGP_PAGE_DESTROY_FREE) { 1312 put_page(page); 1313 __free_page(page); 1314 atomic_dec(&agp_bridge->current_memory_agp); 1315 } 1316 } 1317 EXPORT_SYMBOL(agp_generic_destroy_page); 1318 1319 /* End Basic Page Allocation Routines */ 1320 1321 1322 /** 1323 * agp_enable - initialise the agp point-to-point connection. 1324 * 1325 * @mode: agp mode register value to configure with. 1326 */ 1327 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1328 { 1329 if (!bridge) 1330 return; 1331 bridge->driver->agp_enable(bridge, mode); 1332 } 1333 EXPORT_SYMBOL(agp_enable); 1334 1335 /* When we remove the global variable agp_bridge from all drivers 1336 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1337 */ 1338 1339 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1340 { 1341 if (list_empty(&agp_bridges)) 1342 return NULL; 1343 1344 return agp_bridge; 1345 } 1346 1347 static void ipi_handler(void *null) 1348 { 1349 flush_agp_cache(); 1350 } 1351 1352 void global_cache_flush(void) 1353 { 1354 if (on_each_cpu(ipi_handler, NULL, 1) != 0) 1355 panic(PFX "timed out waiting for the other CPUs!\n"); 1356 } 1357 EXPORT_SYMBOL(global_cache_flush); 1358 1359 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1360 dma_addr_t addr, int type) 1361 { 1362 /* memory type is ignored in the generic routine */ 1363 if (bridge->driver->masks) 1364 return addr | bridge->driver->masks[0].mask; 1365 else 1366 return addr; 1367 } 1368 EXPORT_SYMBOL(agp_generic_mask_memory); 1369 1370 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1371 int type) 1372 { 1373 if (type >= AGP_USER_TYPES) 1374 return 0; 1375 return type; 1376 } 1377 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1378 1379 /* 1380 * These functions are implemented according to the AGPv3 spec, 1381 * which covers implementation details that had previously been 1382 * left open. 1383 */ 1384 1385 int agp3_generic_fetch_size(void) 1386 { 1387 u16 temp_size; 1388 int i; 1389 struct aper_size_info_16 *values; 1390 1391 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1392 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1393 1394 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1395 if (temp_size == values[i].size_value) { 1396 agp_bridge->previous_size = 1397 agp_bridge->current_size = (void *) (values + i); 1398 1399 agp_bridge->aperture_size_idx = i; 1400 return values[i].size; 1401 } 1402 } 1403 return 0; 1404 } 1405 EXPORT_SYMBOL(agp3_generic_fetch_size); 1406 1407 void agp3_generic_tlbflush(struct agp_memory *mem) 1408 { 1409 u32 ctrl; 1410 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1411 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1412 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1413 } 1414 EXPORT_SYMBOL(agp3_generic_tlbflush); 1415 1416 int agp3_generic_configure(void) 1417 { 1418 u32 temp; 1419 struct aper_size_info_16 *current_size; 1420 1421 current_size = A_SIZE_16(agp_bridge->current_size); 1422 1423 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1424 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1425 1426 /* set aperture size */ 1427 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1428 /* set gart pointer */ 1429 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1430 /* enable aperture and GTLB */ 1431 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1432 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1433 return 0; 1434 } 1435 EXPORT_SYMBOL(agp3_generic_configure); 1436 1437 void agp3_generic_cleanup(void) 1438 { 1439 u32 ctrl; 1440 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1441 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1442 } 1443 EXPORT_SYMBOL(agp3_generic_cleanup); 1444 1445 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1446 { 1447 {4096, 1048576, 10,0x000}, 1448 {2048, 524288, 9, 0x800}, 1449 {1024, 262144, 8, 0xc00}, 1450 { 512, 131072, 7, 0xe00}, 1451 { 256, 65536, 6, 0xf00}, 1452 { 128, 32768, 5, 0xf20}, 1453 { 64, 16384, 4, 0xf30}, 1454 { 32, 8192, 3, 0xf38}, 1455 { 16, 4096, 2, 0xf3c}, 1456 { 8, 2048, 1, 0xf3e}, 1457 { 4, 1024, 0, 0xf3f} 1458 }; 1459 EXPORT_SYMBOL(agp3_generic_sizes); 1460 1461