1 /* 2 * AGPGART driver. 3 * Copyright (C) 2004 Silicon Graphics, Inc. 4 * Copyright (C) 2002-2005 Dave Jones. 5 * Copyright (C) 1999 Jeff Hartmann. 6 * Copyright (C) 1999 Precision Insight, Inc. 7 * Copyright (C) 1999 Xi Graphics, Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included 17 * in all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * TODO: 28 * - Allocate more than order 0 pages to avoid too much linear map splitting. 29 */ 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/init.h> 33 #include <linux/pagemap.h> 34 #include <linux/miscdevice.h> 35 #include <linux/pm.h> 36 #include <linux/agp_backend.h> 37 #include <linux/vmalloc.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/mm.h> 40 #include <linux/sched.h> 41 #include <linux/slab.h> 42 #include <asm/io.h> 43 #include <asm/cacheflush.h> 44 #include <asm/pgtable.h> 45 #include "agp.h" 46 47 __u32 *agp_gatt_table; 48 int agp_memory_reserved; 49 50 /* 51 * Needed by the Nforce GART driver for the time being. Would be 52 * nice to do this some other way instead of needing this export. 53 */ 54 EXPORT_SYMBOL_GPL(agp_memory_reserved); 55 56 /* 57 * Generic routines for handling agp_memory structures - 58 * They use the basic page allocation routines to do the brunt of the work. 59 */ 60 61 void agp_free_key(int key) 62 { 63 if (key < 0) 64 return; 65 66 if (key < MAXKEY) 67 clear_bit(key, agp_bridge->key_list); 68 } 69 EXPORT_SYMBOL(agp_free_key); 70 71 72 static int agp_get_key(void) 73 { 74 int bit; 75 76 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY); 77 if (bit < MAXKEY) { 78 set_bit(bit, agp_bridge->key_list); 79 return bit; 80 } 81 return -1; 82 } 83 84 void agp_flush_chipset(struct agp_bridge_data *bridge) 85 { 86 if (bridge->driver->chipset_flush) 87 bridge->driver->chipset_flush(bridge); 88 } 89 EXPORT_SYMBOL(agp_flush_chipset); 90 91 /* 92 * Use kmalloc if possible for the page list. Otherwise fall back to 93 * vmalloc. This speeds things up and also saves memory for small AGP 94 * regions. 95 */ 96 97 void agp_alloc_page_array(size_t size, struct agp_memory *mem) 98 { 99 mem->pages = NULL; 100 mem->vmalloc_flag = false; 101 102 if (size <= 2*PAGE_SIZE) 103 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); 104 if (mem->pages == NULL) { 105 mem->pages = vmalloc(size); 106 mem->vmalloc_flag = true; 107 } 108 } 109 EXPORT_SYMBOL(agp_alloc_page_array); 110 111 void agp_free_page_array(struct agp_memory *mem) 112 { 113 if (mem->vmalloc_flag) { 114 vfree(mem->pages); 115 } else { 116 kfree(mem->pages); 117 } 118 } 119 EXPORT_SYMBOL(agp_free_page_array); 120 121 122 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) 123 { 124 struct agp_memory *new; 125 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 126 127 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 128 if (new == NULL) 129 return NULL; 130 131 new->key = agp_get_key(); 132 133 if (new->key < 0) { 134 kfree(new); 135 return NULL; 136 } 137 138 agp_alloc_page_array(alloc_size, new); 139 140 if (new->pages == NULL) { 141 agp_free_key(new->key); 142 kfree(new); 143 return NULL; 144 } 145 new->num_scratch_pages = 0; 146 return new; 147 } 148 149 struct agp_memory *agp_create_memory(int scratch_pages) 150 { 151 struct agp_memory *new; 152 153 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 154 if (new == NULL) 155 return NULL; 156 157 new->key = agp_get_key(); 158 159 if (new->key < 0) { 160 kfree(new); 161 return NULL; 162 } 163 164 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); 165 166 if (new->pages == NULL) { 167 agp_free_key(new->key); 168 kfree(new); 169 return NULL; 170 } 171 new->num_scratch_pages = scratch_pages; 172 new->type = AGP_NORMAL_MEMORY; 173 return new; 174 } 175 EXPORT_SYMBOL(agp_create_memory); 176 177 /** 178 * agp_free_memory - free memory associated with an agp_memory pointer. 179 * 180 * @curr: agp_memory pointer to be freed. 181 * 182 * It is the only function that can be called when the backend is not owned 183 * by the caller. (So it can free memory on client death.) 184 */ 185 void agp_free_memory(struct agp_memory *curr) 186 { 187 size_t i; 188 189 if (curr == NULL) 190 return; 191 192 if (curr->is_bound) 193 agp_unbind_memory(curr); 194 195 if (curr->type >= AGP_USER_TYPES) { 196 agp_generic_free_by_type(curr); 197 return; 198 } 199 200 if (curr->type != 0) { 201 curr->bridge->driver->free_by_type(curr); 202 return; 203 } 204 if (curr->page_count != 0) { 205 if (curr->bridge->driver->agp_destroy_pages) { 206 curr->bridge->driver->agp_destroy_pages(curr); 207 } else { 208 209 for (i = 0; i < curr->page_count; i++) { 210 curr->bridge->driver->agp_destroy_page( 211 curr->pages[i], 212 AGP_PAGE_DESTROY_UNMAP); 213 } 214 for (i = 0; i < curr->page_count; i++) { 215 curr->bridge->driver->agp_destroy_page( 216 curr->pages[i], 217 AGP_PAGE_DESTROY_FREE); 218 } 219 } 220 } 221 agp_free_key(curr->key); 222 agp_free_page_array(curr); 223 kfree(curr); 224 } 225 EXPORT_SYMBOL(agp_free_memory); 226 227 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 228 229 /** 230 * agp_allocate_memory - allocate a group of pages of a certain type. 231 * 232 * @page_count: size_t argument of the number of pages 233 * @type: u32 argument of the type of memory to be allocated. 234 * 235 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which 236 * maps to physical ram. Any other type is device dependent. 237 * 238 * It returns NULL whenever memory is unavailable. 239 */ 240 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, 241 size_t page_count, u32 type) 242 { 243 int scratch_pages; 244 struct agp_memory *new; 245 size_t i; 246 247 if (!bridge) 248 return NULL; 249 250 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 251 return NULL; 252 253 if (type >= AGP_USER_TYPES) { 254 new = agp_generic_alloc_user(page_count, type); 255 if (new) 256 new->bridge = bridge; 257 return new; 258 } 259 260 if (type != 0) { 261 new = bridge->driver->alloc_by_type(page_count, type); 262 if (new) 263 new->bridge = bridge; 264 return new; 265 } 266 267 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 268 269 new = agp_create_memory(scratch_pages); 270 271 if (new == NULL) 272 return NULL; 273 274 if (bridge->driver->agp_alloc_pages) { 275 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { 276 agp_free_memory(new); 277 return NULL; 278 } 279 new->bridge = bridge; 280 return new; 281 } 282 283 for (i = 0; i < page_count; i++) { 284 struct page *page = bridge->driver->agp_alloc_page(bridge); 285 286 if (page == NULL) { 287 agp_free_memory(new); 288 return NULL; 289 } 290 new->pages[i] = page; 291 new->page_count++; 292 } 293 new->bridge = bridge; 294 295 return new; 296 } 297 EXPORT_SYMBOL(agp_allocate_memory); 298 299 300 /* End - Generic routines for handling agp_memory structures */ 301 302 303 static int agp_return_size(void) 304 { 305 int current_size; 306 void *temp; 307 308 temp = agp_bridge->current_size; 309 310 switch (agp_bridge->driver->size_type) { 311 case U8_APER_SIZE: 312 current_size = A_SIZE_8(temp)->size; 313 break; 314 case U16_APER_SIZE: 315 current_size = A_SIZE_16(temp)->size; 316 break; 317 case U32_APER_SIZE: 318 current_size = A_SIZE_32(temp)->size; 319 break; 320 case LVL2_APER_SIZE: 321 current_size = A_SIZE_LVL2(temp)->size; 322 break; 323 case FIXED_APER_SIZE: 324 current_size = A_SIZE_FIX(temp)->size; 325 break; 326 default: 327 current_size = 0; 328 break; 329 } 330 331 current_size -= (agp_memory_reserved / (1024*1024)); 332 if (current_size <0) 333 current_size = 0; 334 return current_size; 335 } 336 337 338 int agp_num_entries(void) 339 { 340 int num_entries; 341 void *temp; 342 343 temp = agp_bridge->current_size; 344 345 switch (agp_bridge->driver->size_type) { 346 case U8_APER_SIZE: 347 num_entries = A_SIZE_8(temp)->num_entries; 348 break; 349 case U16_APER_SIZE: 350 num_entries = A_SIZE_16(temp)->num_entries; 351 break; 352 case U32_APER_SIZE: 353 num_entries = A_SIZE_32(temp)->num_entries; 354 break; 355 case LVL2_APER_SIZE: 356 num_entries = A_SIZE_LVL2(temp)->num_entries; 357 break; 358 case FIXED_APER_SIZE: 359 num_entries = A_SIZE_FIX(temp)->num_entries; 360 break; 361 default: 362 num_entries = 0; 363 break; 364 } 365 366 num_entries -= agp_memory_reserved>>PAGE_SHIFT; 367 if (num_entries<0) 368 num_entries = 0; 369 return num_entries; 370 } 371 EXPORT_SYMBOL_GPL(agp_num_entries); 372 373 374 /** 375 * agp_copy_info - copy bridge state information 376 * 377 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. 378 * 379 * This function copies information about the agp bridge device and the state of 380 * the agp backend into an agp_kern_info pointer. 381 */ 382 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info) 383 { 384 memset(info, 0, sizeof(struct agp_kern_info)); 385 if (!bridge) { 386 info->chipset = NOT_SUPPORTED; 387 return -EIO; 388 } 389 390 info->version.major = bridge->version->major; 391 info->version.minor = bridge->version->minor; 392 info->chipset = SUPPORTED; 393 info->device = bridge->dev; 394 if (bridge->mode & AGPSTAT_MODE_3_0) 395 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 396 else 397 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; 398 info->aper_base = bridge->gart_bus_addr; 399 info->aper_size = agp_return_size(); 400 info->max_memory = bridge->max_memory_agp; 401 info->current_memory = atomic_read(&bridge->current_memory_agp); 402 info->cant_use_aperture = bridge->driver->cant_use_aperture; 403 info->vm_ops = bridge->vm_ops; 404 info->page_mask = ~0UL; 405 return 0; 406 } 407 EXPORT_SYMBOL(agp_copy_info); 408 409 /* End - Routine to copy over information structure */ 410 411 /* 412 * Routines for handling swapping of agp_memory into the GATT - 413 * These routines take agp_memory and insert them into the GATT. 414 * They call device specific routines to actually write to the GATT. 415 */ 416 417 /** 418 * agp_bind_memory - Bind an agp_memory structure into the GATT. 419 * 420 * @curr: agp_memory pointer 421 * @pg_start: an offset into the graphics aperture translation table 422 * 423 * It returns -EINVAL if the pointer == NULL. 424 * It returns -EBUSY if the area of the table requested is already in use. 425 */ 426 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) 427 { 428 int ret_val; 429 430 if (curr == NULL) 431 return -EINVAL; 432 433 if (curr->is_bound) { 434 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); 435 return -EINVAL; 436 } 437 if (!curr->is_flushed) { 438 curr->bridge->driver->cache_flush(); 439 curr->is_flushed = true; 440 } 441 442 if (curr->bridge->driver->agp_map_memory) { 443 ret_val = curr->bridge->driver->agp_map_memory(curr); 444 if (ret_val) 445 return ret_val; 446 } 447 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 448 449 if (ret_val != 0) 450 return ret_val; 451 452 curr->is_bound = true; 453 curr->pg_start = pg_start; 454 spin_lock(&agp_bridge->mapped_lock); 455 list_add(&curr->mapped_list, &agp_bridge->mapped_list); 456 spin_unlock(&agp_bridge->mapped_lock); 457 458 return 0; 459 } 460 EXPORT_SYMBOL(agp_bind_memory); 461 462 463 /** 464 * agp_unbind_memory - Removes an agp_memory structure from the GATT 465 * 466 * @curr: agp_memory pointer to be removed from the GATT. 467 * 468 * It returns -EINVAL if this piece of agp_memory is not currently bound to 469 * the graphics aperture translation table or if the agp_memory pointer == NULL 470 */ 471 int agp_unbind_memory(struct agp_memory *curr) 472 { 473 int ret_val; 474 475 if (curr == NULL) 476 return -EINVAL; 477 478 if (!curr->is_bound) { 479 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); 480 return -EINVAL; 481 } 482 483 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); 484 485 if (ret_val != 0) 486 return ret_val; 487 488 if (curr->bridge->driver->agp_unmap_memory) 489 curr->bridge->driver->agp_unmap_memory(curr); 490 491 curr->is_bound = false; 492 curr->pg_start = 0; 493 spin_lock(&curr->bridge->mapped_lock); 494 list_del(&curr->mapped_list); 495 spin_unlock(&curr->bridge->mapped_lock); 496 return 0; 497 } 498 EXPORT_SYMBOL(agp_unbind_memory); 499 500 /** 501 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume 502 */ 503 int agp_rebind_memory(void) 504 { 505 struct agp_memory *curr; 506 int ret_val = 0; 507 508 spin_lock(&agp_bridge->mapped_lock); 509 list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { 510 ret_val = curr->bridge->driver->insert_memory(curr, 511 curr->pg_start, 512 curr->type); 513 if (ret_val != 0) 514 break; 515 } 516 spin_unlock(&agp_bridge->mapped_lock); 517 return ret_val; 518 } 519 EXPORT_SYMBOL(agp_rebind_memory); 520 521 /* End - Routines for handling swapping of agp_memory into the GATT */ 522 523 524 /* Generic Agp routines - Start */ 525 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 526 { 527 u32 tmp; 528 529 if (*requested_mode & AGP2_RESERVED_MASK) { 530 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 531 *requested_mode & AGP2_RESERVED_MASK, *requested_mode); 532 *requested_mode &= ~AGP2_RESERVED_MASK; 533 } 534 535 /* 536 * Some dumb bridges are programmed to disobey the AGP2 spec. 537 * This is likely a BIOS misprogramming rather than poweron default, or 538 * it would be a lot more common. 539 * https://bugs.freedesktop.org/show_bug.cgi?id=8816 540 * AGPv2 spec 6.1.9 states: 541 * The RATE field indicates the data transfer rates supported by this 542 * device. A.G.P. devices must report all that apply. 543 * Fix them up as best we can. 544 */ 545 switch (*bridge_agpstat & 7) { 546 case 4: 547 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); 548 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" 549 "Fixing up support for x2 & x1\n"); 550 break; 551 case 2: 552 *bridge_agpstat |= AGPSTAT2_1X; 553 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" 554 "Fixing up support for x1\n"); 555 break; 556 default: 557 break; 558 } 559 560 /* Check the speed bits make sense. Only one should be set. */ 561 tmp = *requested_mode & 7; 562 switch (tmp) { 563 case 0: 564 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); 565 *requested_mode |= AGPSTAT2_1X; 566 break; 567 case 1: 568 case 2: 569 break; 570 case 3: 571 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */ 572 break; 573 case 4: 574 break; 575 case 5: 576 case 6: 577 case 7: 578 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/ 579 break; 580 } 581 582 /* disable SBA if it's not supported */ 583 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA))) 584 *bridge_agpstat &= ~AGPSTAT_SBA; 585 586 /* Set rate */ 587 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X))) 588 *bridge_agpstat &= ~AGPSTAT2_4X; 589 590 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X))) 591 *bridge_agpstat &= ~AGPSTAT2_2X; 592 593 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X))) 594 *bridge_agpstat &= ~AGPSTAT2_1X; 595 596 /* Now we know what mode it should be, clear out the unwanted bits. */ 597 if (*bridge_agpstat & AGPSTAT2_4X) 598 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */ 599 600 if (*bridge_agpstat & AGPSTAT2_2X) 601 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */ 602 603 if (*bridge_agpstat & AGPSTAT2_1X) 604 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */ 605 606 /* Apply any errata. */ 607 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 608 *bridge_agpstat &= ~AGPSTAT_FW; 609 610 if (agp_bridge->flags & AGP_ERRATA_SBA) 611 *bridge_agpstat &= ~AGPSTAT_SBA; 612 613 if (agp_bridge->flags & AGP_ERRATA_1X) { 614 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 615 *bridge_agpstat |= AGPSTAT2_1X; 616 } 617 618 /* If we've dropped down to 1X, disable fast writes. */ 619 if (*bridge_agpstat & AGPSTAT2_1X) 620 *bridge_agpstat &= ~AGPSTAT_FW; 621 } 622 623 /* 624 * requested_mode = Mode requested by (typically) X. 625 * bridge_agpstat = PCI_AGP_STATUS from agp bridge. 626 * vga_agpstat = PCI_AGP_STATUS from graphic card. 627 */ 628 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat) 629 { 630 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat; 631 u32 tmp; 632 633 if (*requested_mode & AGP3_RESERVED_MASK) { 634 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n", 635 *requested_mode & AGP3_RESERVED_MASK, *requested_mode); 636 *requested_mode &= ~AGP3_RESERVED_MASK; 637 } 638 639 /* Check the speed bits make sense. */ 640 tmp = *requested_mode & 7; 641 if (tmp == 0) { 642 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); 643 *requested_mode |= AGPSTAT3_4X; 644 } 645 if (tmp >= 3) { 646 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); 647 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X; 648 } 649 650 /* ARQSZ - Set the value to the maximum one. 651 * Don't allow the mode register to override values. */ 652 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) | 653 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ))); 654 655 /* Calibration cycle. 656 * Don't allow the mode register to override values. */ 657 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) | 658 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK))); 659 660 /* SBA *must* be supported for AGP v3 */ 661 *bridge_agpstat |= AGPSTAT_SBA; 662 663 /* 664 * Set speed. 665 * Check for invalid speeds. This can happen when applications 666 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware 667 */ 668 if (*requested_mode & AGPSTAT_MODE_3_0) { 669 /* 670 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode, 671 * have been passed a 3.0 mode, but with 2.x speed bits set. 672 * AGP2.x 4x -> AGP3.0 4x. 673 */ 674 if (*requested_mode & AGPSTAT2_4X) { 675 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n", 676 current->comm, *requested_mode); 677 *requested_mode &= ~AGPSTAT2_4X; 678 *requested_mode |= AGPSTAT3_4X; 679 } 680 } else { 681 /* 682 * The caller doesn't know what they are doing. We are in 3.0 mode, 683 * but have been passed an AGP 2.x mode. 684 * Convert AGP 1x,2x,4x -> AGP 3.0 4x. 685 */ 686 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n", 687 current->comm, *requested_mode); 688 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X); 689 *requested_mode |= AGPSTAT3_4X; 690 } 691 692 if (*requested_mode & AGPSTAT3_8X) { 693 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 694 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 695 *bridge_agpstat |= AGPSTAT3_4X; 696 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); 697 return; 698 } 699 if (!(*vga_agpstat & AGPSTAT3_8X)) { 700 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 701 *bridge_agpstat |= AGPSTAT3_4X; 702 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); 703 return; 704 } 705 /* All set, bridge & device can do AGP x8*/ 706 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 707 goto done; 708 709 } else if (*requested_mode & AGPSTAT3_4X) { 710 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 711 *bridge_agpstat |= AGPSTAT3_4X; 712 goto done; 713 714 } else { 715 716 /* 717 * If we didn't specify an AGP mode, we see if both 718 * the graphics card, and the bridge can do x8, and use if so. 719 * If not, we fall back to x4 mode. 720 */ 721 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { 722 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " 723 "supported by bridge & card (x8).\n"); 724 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 725 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 726 } else { 727 printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); 728 if (!(*bridge_agpstat & AGPSTAT3_8X)) { 729 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", 730 *bridge_agpstat, origbridge); 731 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 732 *bridge_agpstat |= AGPSTAT3_4X; 733 } 734 if (!(*vga_agpstat & AGPSTAT3_8X)) { 735 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", 736 *vga_agpstat, origvga); 737 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 738 *vga_agpstat |= AGPSTAT3_4X; 739 } 740 } 741 } 742 743 done: 744 /* Apply any errata. */ 745 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES) 746 *bridge_agpstat &= ~AGPSTAT_FW; 747 748 if (agp_bridge->flags & AGP_ERRATA_SBA) 749 *bridge_agpstat &= ~AGPSTAT_SBA; 750 751 if (agp_bridge->flags & AGP_ERRATA_1X) { 752 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); 753 *bridge_agpstat |= AGPSTAT2_1X; 754 } 755 } 756 757 758 /** 759 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's 760 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge. 761 * @requested_mode: requested agp_stat from userspace (Typically from X) 762 * @bridge_agpstat: current agp_stat from AGP bridge. 763 * 764 * This function will hunt for an AGP graphics card, and try to match 765 * the requested mode to the capabilities of both the bridge and the card. 766 */ 767 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat) 768 { 769 struct pci_dev *device = NULL; 770 u32 vga_agpstat; 771 u8 cap_ptr; 772 773 for (;;) { 774 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device); 775 if (!device) { 776 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 777 return 0; 778 } 779 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); 780 if (cap_ptr) 781 break; 782 } 783 784 /* 785 * Ok, here we have a AGP device. Disable impossible 786 * settings, and adjust the readqueue to the minimum. 787 */ 788 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat); 789 790 /* adjust RQ depth */ 791 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) | 792 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH), 793 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH)))); 794 795 /* disable FW if it's not supported */ 796 if (!((bridge_agpstat & AGPSTAT_FW) && 797 (vga_agpstat & AGPSTAT_FW) && 798 (requested_mode & AGPSTAT_FW))) 799 bridge_agpstat &= ~AGPSTAT_FW; 800 801 /* Check to see if we are operating in 3.0 mode */ 802 if (agp_bridge->mode & AGPSTAT_MODE_3_0) 803 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 804 else 805 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 806 807 pci_dev_put(device); 808 return bridge_agpstat; 809 } 810 EXPORT_SYMBOL(agp_collect_device_status); 811 812 813 void agp_device_command(u32 bridge_agpstat, bool agp_v3) 814 { 815 struct pci_dev *device = NULL; 816 int mode; 817 818 mode = bridge_agpstat & 0x7; 819 if (agp_v3) 820 mode *= 4; 821 822 for_each_pci_dev(device) { 823 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); 824 if (!agp) 825 continue; 826 827 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", 828 agp_v3 ? 3 : 2, mode); 829 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 830 } 831 } 832 EXPORT_SYMBOL(agp_device_command); 833 834 835 void get_agp_version(struct agp_bridge_data *bridge) 836 { 837 u32 ncapid; 838 839 /* Exit early if already set by errata workarounds. */ 840 if (bridge->major_version != 0) 841 return; 842 843 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid); 844 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 845 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf; 846 } 847 EXPORT_SYMBOL(get_agp_version); 848 849 850 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) 851 { 852 u32 bridge_agpstat, temp; 853 854 get_agp_version(agp_bridge); 855 856 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", 857 agp_bridge->major_version, agp_bridge->minor_version); 858 859 pci_read_config_dword(agp_bridge->dev, 860 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 861 862 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat); 863 if (bridge_agpstat == 0) 864 /* Something bad happened. FIXME: Return error code? */ 865 return; 866 867 bridge_agpstat |= AGPSTAT_AGP_ENABLE; 868 869 /* Do AGP version specific frobbing. */ 870 if (bridge->major_version >= 3) { 871 if (bridge->mode & AGPSTAT_MODE_3_0) { 872 /* If we have 3.5, we can do the isoch stuff. */ 873 if (bridge->minor_version >= 5) 874 agp_3_5_enable(bridge); 875 agp_device_command(bridge_agpstat, true); 876 return; 877 } else { 878 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ 879 bridge_agpstat &= ~(7<<10) ; 880 pci_read_config_dword(bridge->dev, 881 bridge->capndx+AGPCTRL, &temp); 882 temp |= (1<<9); 883 pci_write_config_dword(bridge->dev, 884 bridge->capndx+AGPCTRL, temp); 885 886 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); 887 } 888 } 889 890 /* AGP v<3 */ 891 agp_device_command(bridge_agpstat, false); 892 } 893 EXPORT_SYMBOL(agp_generic_enable); 894 895 896 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) 897 { 898 char *table; 899 char *table_end; 900 int size; 901 int page_order; 902 int num_entries; 903 int i; 904 void *temp; 905 struct page *page; 906 907 /* The generic routines can't handle 2 level gatt's */ 908 if (bridge->driver->size_type == LVL2_APER_SIZE) 909 return -EINVAL; 910 911 table = NULL; 912 i = bridge->aperture_size_idx; 913 temp = bridge->current_size; 914 size = page_order = num_entries = 0; 915 916 if (bridge->driver->size_type != FIXED_APER_SIZE) { 917 do { 918 switch (bridge->driver->size_type) { 919 case U8_APER_SIZE: 920 size = A_SIZE_8(temp)->size; 921 page_order = 922 A_SIZE_8(temp)->page_order; 923 num_entries = 924 A_SIZE_8(temp)->num_entries; 925 break; 926 case U16_APER_SIZE: 927 size = A_SIZE_16(temp)->size; 928 page_order = A_SIZE_16(temp)->page_order; 929 num_entries = A_SIZE_16(temp)->num_entries; 930 break; 931 case U32_APER_SIZE: 932 size = A_SIZE_32(temp)->size; 933 page_order = A_SIZE_32(temp)->page_order; 934 num_entries = A_SIZE_32(temp)->num_entries; 935 break; 936 /* This case will never really happen. */ 937 case FIXED_APER_SIZE: 938 case LVL2_APER_SIZE: 939 default: 940 size = page_order = num_entries = 0; 941 break; 942 } 943 944 table = alloc_gatt_pages(page_order); 945 946 if (table == NULL) { 947 i++; 948 switch (bridge->driver->size_type) { 949 case U8_APER_SIZE: 950 bridge->current_size = A_IDX8(bridge); 951 break; 952 case U16_APER_SIZE: 953 bridge->current_size = A_IDX16(bridge); 954 break; 955 case U32_APER_SIZE: 956 bridge->current_size = A_IDX32(bridge); 957 break; 958 /* These cases will never really happen. */ 959 case FIXED_APER_SIZE: 960 case LVL2_APER_SIZE: 961 default: 962 break; 963 } 964 temp = bridge->current_size; 965 } else { 966 bridge->aperture_size_idx = i; 967 } 968 } while (!table && (i < bridge->driver->num_aperture_sizes)); 969 } else { 970 size = ((struct aper_size_info_fixed *) temp)->size; 971 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 972 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 973 table = alloc_gatt_pages(page_order); 974 } 975 976 if (table == NULL) 977 return -ENOMEM; 978 979 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 980 981 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 982 SetPageReserved(page); 983 984 bridge->gatt_table_real = (u32 *) table; 985 agp_gatt_table = (void *)table; 986 987 bridge->driver->cache_flush(); 988 #ifdef CONFIG_X86 989 set_memory_uc((unsigned long)table, 1 << page_order); 990 bridge->gatt_table = (void *)table; 991 #else 992 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 993 (PAGE_SIZE * (1 << page_order))); 994 bridge->driver->cache_flush(); 995 #endif 996 997 if (bridge->gatt_table == NULL) { 998 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 999 ClearPageReserved(page); 1000 1001 free_gatt_pages(table, page_order); 1002 1003 return -ENOMEM; 1004 } 1005 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 1006 1007 /* AK: bogus, should encode addresses > 4GB */ 1008 for (i = 0; i < num_entries; i++) { 1009 writel(bridge->scratch_page, bridge->gatt_table+i); 1010 readl(bridge->gatt_table+i); /* PCI Posting. */ 1011 } 1012 1013 return 0; 1014 } 1015 EXPORT_SYMBOL(agp_generic_create_gatt_table); 1016 1017 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) 1018 { 1019 int page_order; 1020 char *table, *table_end; 1021 void *temp; 1022 struct page *page; 1023 1024 temp = bridge->current_size; 1025 1026 switch (bridge->driver->size_type) { 1027 case U8_APER_SIZE: 1028 page_order = A_SIZE_8(temp)->page_order; 1029 break; 1030 case U16_APER_SIZE: 1031 page_order = A_SIZE_16(temp)->page_order; 1032 break; 1033 case U32_APER_SIZE: 1034 page_order = A_SIZE_32(temp)->page_order; 1035 break; 1036 case FIXED_APER_SIZE: 1037 page_order = A_SIZE_FIX(temp)->page_order; 1038 break; 1039 case LVL2_APER_SIZE: 1040 /* The generic routines can't deal with 2 level gatt's */ 1041 return -EINVAL; 1042 break; 1043 default: 1044 page_order = 0; 1045 break; 1046 } 1047 1048 /* Do not worry about freeing memory, because if this is 1049 * called, then all agp memory is deallocated and removed 1050 * from the table. */ 1051 1052 #ifdef CONFIG_X86 1053 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); 1054 #else 1055 iounmap(bridge->gatt_table); 1056 #endif 1057 table = (char *) bridge->gatt_table_real; 1058 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1059 1060 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 1061 ClearPageReserved(page); 1062 1063 free_gatt_pages(bridge->gatt_table_real, page_order); 1064 1065 agp_gatt_table = NULL; 1066 bridge->gatt_table = NULL; 1067 bridge->gatt_table_real = NULL; 1068 bridge->gatt_bus_addr = 0; 1069 1070 return 0; 1071 } 1072 EXPORT_SYMBOL(agp_generic_free_gatt_table); 1073 1074 1075 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) 1076 { 1077 int num_entries; 1078 size_t i; 1079 off_t j; 1080 void *temp; 1081 struct agp_bridge_data *bridge; 1082 int mask_type; 1083 1084 bridge = mem->bridge; 1085 if (!bridge) 1086 return -EINVAL; 1087 1088 if (mem->page_count == 0) 1089 return 0; 1090 1091 temp = bridge->current_size; 1092 1093 switch (bridge->driver->size_type) { 1094 case U8_APER_SIZE: 1095 num_entries = A_SIZE_8(temp)->num_entries; 1096 break; 1097 case U16_APER_SIZE: 1098 num_entries = A_SIZE_16(temp)->num_entries; 1099 break; 1100 case U32_APER_SIZE: 1101 num_entries = A_SIZE_32(temp)->num_entries; 1102 break; 1103 case FIXED_APER_SIZE: 1104 num_entries = A_SIZE_FIX(temp)->num_entries; 1105 break; 1106 case LVL2_APER_SIZE: 1107 /* The generic routines can't deal with 2 level gatt's */ 1108 return -EINVAL; 1109 break; 1110 default: 1111 num_entries = 0; 1112 break; 1113 } 1114 1115 num_entries -= agp_memory_reserved/PAGE_SIZE; 1116 if (num_entries < 0) num_entries = 0; 1117 1118 if (type != mem->type) 1119 return -EINVAL; 1120 1121 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1122 if (mask_type != 0) { 1123 /* The generic routines know nothing of memory types */ 1124 return -EINVAL; 1125 } 1126 1127 /* AK: could wrap */ 1128 if ((pg_start + mem->page_count) > num_entries) 1129 return -EINVAL; 1130 1131 j = pg_start; 1132 1133 while (j < (pg_start + mem->page_count)) { 1134 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) 1135 return -EBUSY; 1136 j++; 1137 } 1138 1139 if (!mem->is_flushed) { 1140 bridge->driver->cache_flush(); 1141 mem->is_flushed = true; 1142 } 1143 1144 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1145 writel(bridge->driver->mask_memory(bridge, 1146 page_to_phys(mem->pages[i]), 1147 mask_type), 1148 bridge->gatt_table+j); 1149 } 1150 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1151 1152 bridge->driver->tlb_flush(mem); 1153 return 0; 1154 } 1155 EXPORT_SYMBOL(agp_generic_insert_memory); 1156 1157 1158 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 1159 { 1160 size_t i; 1161 struct agp_bridge_data *bridge; 1162 int mask_type; 1163 1164 bridge = mem->bridge; 1165 if (!bridge) 1166 return -EINVAL; 1167 1168 if (mem->page_count == 0) 1169 return 0; 1170 1171 if (type != mem->type) 1172 return -EINVAL; 1173 1174 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1175 if (mask_type != 0) { 1176 /* The generic routines know nothing of memory types */ 1177 return -EINVAL; 1178 } 1179 1180 /* AK: bogus, should encode addresses > 4GB */ 1181 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1182 writel(bridge->scratch_page, bridge->gatt_table+i); 1183 } 1184 readl(bridge->gatt_table+i-1); /* PCI Posting. */ 1185 1186 bridge->driver->tlb_flush(mem); 1187 return 0; 1188 } 1189 EXPORT_SYMBOL(agp_generic_remove_memory); 1190 1191 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) 1192 { 1193 return NULL; 1194 } 1195 EXPORT_SYMBOL(agp_generic_alloc_by_type); 1196 1197 void agp_generic_free_by_type(struct agp_memory *curr) 1198 { 1199 agp_free_page_array(curr); 1200 agp_free_key(curr->key); 1201 kfree(curr); 1202 } 1203 EXPORT_SYMBOL(agp_generic_free_by_type); 1204 1205 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) 1206 { 1207 struct agp_memory *new; 1208 int i; 1209 int pages; 1210 1211 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; 1212 new = agp_create_user_memory(page_count); 1213 if (new == NULL) 1214 return NULL; 1215 1216 for (i = 0; i < page_count; i++) 1217 new->pages[i] = NULL; 1218 new->page_count = 0; 1219 new->type = type; 1220 new->num_scratch_pages = pages; 1221 1222 return new; 1223 } 1224 EXPORT_SYMBOL(agp_generic_alloc_user); 1225 1226 /* 1227 * Basic Page Allocation Routines - 1228 * These routines handle page allocation and by default they reserve the allocated 1229 * memory. They also handle incrementing the current_memory_agp value, Which is checked 1230 * against a maximum value. 1231 */ 1232 1233 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) 1234 { 1235 struct page * page; 1236 int i, ret = -ENOMEM; 1237 1238 for (i = 0; i < num_pages; i++) { 1239 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1240 /* agp_free_memory() needs gart address */ 1241 if (page == NULL) 1242 goto out; 1243 1244 #ifndef CONFIG_X86 1245 map_page_into_agp(page); 1246 #endif 1247 get_page(page); 1248 atomic_inc(&agp_bridge->current_memory_agp); 1249 1250 mem->pages[i] = page; 1251 mem->page_count++; 1252 } 1253 1254 #ifdef CONFIG_X86 1255 set_pages_array_uc(mem->pages, num_pages); 1256 #endif 1257 ret = 0; 1258 out: 1259 return ret; 1260 } 1261 EXPORT_SYMBOL(agp_generic_alloc_pages); 1262 1263 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) 1264 { 1265 struct page * page; 1266 1267 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1268 if (page == NULL) 1269 return NULL; 1270 1271 map_page_into_agp(page); 1272 1273 get_page(page); 1274 atomic_inc(&agp_bridge->current_memory_agp); 1275 return page; 1276 } 1277 EXPORT_SYMBOL(agp_generic_alloc_page); 1278 1279 void agp_generic_destroy_pages(struct agp_memory *mem) 1280 { 1281 int i; 1282 struct page *page; 1283 1284 if (!mem) 1285 return; 1286 1287 #ifdef CONFIG_X86 1288 set_pages_array_wb(mem->pages, mem->page_count); 1289 #endif 1290 1291 for (i = 0; i < mem->page_count; i++) { 1292 page = mem->pages[i]; 1293 1294 #ifndef CONFIG_X86 1295 unmap_page_from_agp(page); 1296 #endif 1297 put_page(page); 1298 __free_page(page); 1299 atomic_dec(&agp_bridge->current_memory_agp); 1300 mem->pages[i] = NULL; 1301 } 1302 } 1303 EXPORT_SYMBOL(agp_generic_destroy_pages); 1304 1305 void agp_generic_destroy_page(struct page *page, int flags) 1306 { 1307 if (page == NULL) 1308 return; 1309 1310 if (flags & AGP_PAGE_DESTROY_UNMAP) 1311 unmap_page_from_agp(page); 1312 1313 if (flags & AGP_PAGE_DESTROY_FREE) { 1314 put_page(page); 1315 __free_page(page); 1316 atomic_dec(&agp_bridge->current_memory_agp); 1317 } 1318 } 1319 EXPORT_SYMBOL(agp_generic_destroy_page); 1320 1321 /* End Basic Page Allocation Routines */ 1322 1323 1324 /** 1325 * agp_enable - initialise the agp point-to-point connection. 1326 * 1327 * @mode: agp mode register value to configure with. 1328 */ 1329 void agp_enable(struct agp_bridge_data *bridge, u32 mode) 1330 { 1331 if (!bridge) 1332 return; 1333 bridge->driver->agp_enable(bridge, mode); 1334 } 1335 EXPORT_SYMBOL(agp_enable); 1336 1337 /* When we remove the global variable agp_bridge from all drivers 1338 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated 1339 */ 1340 1341 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev) 1342 { 1343 if (list_empty(&agp_bridges)) 1344 return NULL; 1345 1346 return agp_bridge; 1347 } 1348 1349 static void ipi_handler(void *null) 1350 { 1351 flush_agp_cache(); 1352 } 1353 1354 void global_cache_flush(void) 1355 { 1356 if (on_each_cpu(ipi_handler, NULL, 1) != 0) 1357 panic(PFX "timed out waiting for the other CPUs!\n"); 1358 } 1359 EXPORT_SYMBOL(global_cache_flush); 1360 1361 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1362 dma_addr_t addr, int type) 1363 { 1364 /* memory type is ignored in the generic routine */ 1365 if (bridge->driver->masks) 1366 return addr | bridge->driver->masks[0].mask; 1367 else 1368 return addr; 1369 } 1370 EXPORT_SYMBOL(agp_generic_mask_memory); 1371 1372 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 1373 int type) 1374 { 1375 if (type >= AGP_USER_TYPES) 1376 return 0; 1377 return type; 1378 } 1379 EXPORT_SYMBOL(agp_generic_type_to_mask_type); 1380 1381 /* 1382 * These functions are implemented according to the AGPv3 spec, 1383 * which covers implementation details that had previously been 1384 * left open. 1385 */ 1386 1387 int agp3_generic_fetch_size(void) 1388 { 1389 u16 temp_size; 1390 int i; 1391 struct aper_size_info_16 *values; 1392 1393 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size); 1394 values = A_SIZE_16(agp_bridge->driver->aperture_sizes); 1395 1396 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 1397 if (temp_size == values[i].size_value) { 1398 agp_bridge->previous_size = 1399 agp_bridge->current_size = (void *) (values + i); 1400 1401 agp_bridge->aperture_size_idx = i; 1402 return values[i].size; 1403 } 1404 } 1405 return 0; 1406 } 1407 EXPORT_SYMBOL(agp3_generic_fetch_size); 1408 1409 void agp3_generic_tlbflush(struct agp_memory *mem) 1410 { 1411 u32 ctrl; 1412 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1413 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN); 1414 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl); 1415 } 1416 EXPORT_SYMBOL(agp3_generic_tlbflush); 1417 1418 int agp3_generic_configure(void) 1419 { 1420 u32 temp; 1421 struct aper_size_info_16 *current_size; 1422 1423 current_size = A_SIZE_16(agp_bridge->current_size); 1424 1425 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); 1426 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); 1427 1428 /* set aperture size */ 1429 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); 1430 /* set gart pointer */ 1431 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr); 1432 /* enable aperture and GTLB */ 1433 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp); 1434 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN); 1435 return 0; 1436 } 1437 EXPORT_SYMBOL(agp3_generic_configure); 1438 1439 void agp3_generic_cleanup(void) 1440 { 1441 u32 ctrl; 1442 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl); 1443 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB); 1444 } 1445 EXPORT_SYMBOL(agp3_generic_cleanup); 1446 1447 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = 1448 { 1449 {4096, 1048576, 10,0x000}, 1450 {2048, 524288, 9, 0x800}, 1451 {1024, 262144, 8, 0xc00}, 1452 { 512, 131072, 7, 0xe00}, 1453 { 256, 65536, 6, 0xf00}, 1454 { 128, 32768, 5, 0xf20}, 1455 { 64, 16384, 4, 0xf30}, 1456 { 32, 8192, 3, 0xf38}, 1457 { 16, 4096, 2, 0xf3c}, 1458 { 8, 2048, 1, 0xf3e}, 1459 { 4, 1024, 0, 0xf3f} 1460 }; 1461 EXPORT_SYMBOL(agp3_generic_sizes); 1462 1463