xref: /linux/drivers/char/agp/generic.c (revision 023ff3eee6255390384e050d9daab1490c88edf8)
1 /*
2  * AGPGART driver.
3  * Copyright (C) 2004 Silicon Graphics, Inc.
4  * Copyright (C) 2002-2005 Dave Jones.
5  * Copyright (C) 1999 Jeff Hartmann.
6  * Copyright (C) 1999 Precision Insight, Inc.
7  * Copyright (C) 1999 Xi Graphics, Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included
17  * in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * TODO:
28  * - Allocate more than order 0 pages to avoid too much linear map splitting.
29  */
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h>
35 #include <linux/pm.h>
36 #include <linux/agp_backend.h>
37 #include <linux/vmalloc.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/mm.h>
40 #include <asm/io.h>
41 #include <asm/cacheflush.h>
42 #include <asm/pgtable.h>
43 #include "agp.h"
44 
45 __u32 *agp_gatt_table;
46 int agp_memory_reserved;
47 
48 /*
49  * Needed by the Nforce GART driver for the time being. Would be
50  * nice to do this some other way instead of needing this export.
51  */
52 EXPORT_SYMBOL_GPL(agp_memory_reserved);
53 
54 /*
55  * Generic routines for handling agp_memory structures -
56  * They use the basic page allocation routines to do the brunt of the work.
57  */
58 
59 void agp_free_key(int key)
60 {
61 	if (key < 0)
62 		return;
63 
64 	if (key < MAXKEY)
65 		clear_bit(key, agp_bridge->key_list);
66 }
67 EXPORT_SYMBOL(agp_free_key);
68 
69 
70 static int agp_get_key(void)
71 {
72 	int bit;
73 
74 	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
75 	if (bit < MAXKEY) {
76 		set_bit(bit, agp_bridge->key_list);
77 		return bit;
78 	}
79 	return -1;
80 }
81 
82 /*
83  * Use kmalloc if possible for the page list. Otherwise fall back to
84  * vmalloc. This speeds things up and also saves memory for small AGP
85  * regions.
86  */
87 
88 void agp_alloc_page_array(size_t size, struct agp_memory *mem)
89 {
90 	mem->memory = NULL;
91 	mem->vmalloc_flag = 0;
92 
93 	if (size <= 2*PAGE_SIZE)
94 		mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
95 	if (mem->memory == NULL) {
96 		mem->memory = vmalloc(size);
97 		mem->vmalloc_flag = 1;
98 	}
99 }
100 EXPORT_SYMBOL(agp_alloc_page_array);
101 
102 void agp_free_page_array(struct agp_memory *mem)
103 {
104 	if (mem->vmalloc_flag) {
105 		vfree(mem->memory);
106 	} else {
107 		kfree(mem->memory);
108 	}
109 }
110 EXPORT_SYMBOL(agp_free_page_array);
111 
112 
113 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
114 {
115 	struct agp_memory *new;
116 	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
117 
118 	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
119 	if (new == NULL)
120 		return NULL;
121 
122 	new->key = agp_get_key();
123 
124 	if (new->key < 0) {
125 		kfree(new);
126 		return NULL;
127 	}
128 
129 	agp_alloc_page_array(alloc_size, new);
130 
131 	if (new->memory == NULL) {
132 		agp_free_key(new->key);
133 		kfree(new);
134 		return NULL;
135 	}
136 	new->num_scratch_pages = 0;
137 	return new;
138 }
139 
140 struct agp_memory *agp_create_memory(int scratch_pages)
141 {
142 	struct agp_memory *new;
143 
144 	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
145 	if (new == NULL)
146 		return NULL;
147 
148 	new->key = agp_get_key();
149 
150 	if (new->key < 0) {
151 		kfree(new);
152 		return NULL;
153 	}
154 
155 	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
156 
157 	if (new->memory == NULL) {
158 		agp_free_key(new->key);
159 		kfree(new);
160 		return NULL;
161 	}
162 	new->num_scratch_pages = scratch_pages;
163 	new->type = AGP_NORMAL_MEMORY;
164 	return new;
165 }
166 EXPORT_SYMBOL(agp_create_memory);
167 
168 /**
169  *	agp_free_memory - free memory associated with an agp_memory pointer.
170  *
171  *	@curr:		agp_memory pointer to be freed.
172  *
173  *	It is the only function that can be called when the backend is not owned
174  *	by the caller.  (So it can free memory on client death.)
175  */
176 void agp_free_memory(struct agp_memory *curr)
177 {
178 	size_t i;
179 
180 	if (curr == NULL)
181 		return;
182 
183 	if (curr->is_bound == TRUE)
184 		agp_unbind_memory(curr);
185 
186 	if (curr->type >= AGP_USER_TYPES) {
187 		agp_generic_free_by_type(curr);
188 		return;
189 	}
190 
191 	if (curr->type != 0) {
192 		curr->bridge->driver->free_by_type(curr);
193 		return;
194 	}
195 	if (curr->page_count != 0) {
196 		for (i = 0; i < curr->page_count; i++) {
197 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
198 		}
199 		flush_agp_mappings();
200 	}
201 	agp_free_key(curr->key);
202 	agp_free_page_array(curr);
203 	kfree(curr);
204 }
205 EXPORT_SYMBOL(agp_free_memory);
206 
207 #define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
208 
209 /**
210  *	agp_allocate_memory  -  allocate a group of pages of a certain type.
211  *
212  *	@page_count:	size_t argument of the number of pages
213  *	@type:	u32 argument of the type of memory to be allocated.
214  *
215  *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
216  *	maps to physical ram.  Any other type is device dependent.
217  *
218  *	It returns NULL whenever memory is unavailable.
219  */
220 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
221 					size_t page_count, u32 type)
222 {
223 	int scratch_pages;
224 	struct agp_memory *new;
225 	size_t i;
226 
227 	if (!bridge)
228 		return NULL;
229 
230 	if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
231 		return NULL;
232 
233 	if (type >= AGP_USER_TYPES) {
234 		new = agp_generic_alloc_user(page_count, type);
235 		if (new)
236 			new->bridge = bridge;
237 		return new;
238 	}
239 
240 	if (type != 0) {
241 		new = bridge->driver->alloc_by_type(page_count, type);
242 		if (new)
243 			new->bridge = bridge;
244 		return new;
245 	}
246 
247 	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
248 
249 	new = agp_create_memory(scratch_pages);
250 
251 	if (new == NULL)
252 		return NULL;
253 
254 	for (i = 0; i < page_count; i++) {
255 		void *addr = bridge->driver->agp_alloc_page(bridge);
256 
257 		if (addr == NULL) {
258 			agp_free_memory(new);
259 			return NULL;
260 		}
261 		new->memory[i] = virt_to_gart(addr);
262 		new->page_count++;
263 	}
264 	new->bridge = bridge;
265 
266 	flush_agp_mappings();
267 
268 	return new;
269 }
270 EXPORT_SYMBOL(agp_allocate_memory);
271 
272 
273 /* End - Generic routines for handling agp_memory structures */
274 
275 
276 static int agp_return_size(void)
277 {
278 	int current_size;
279 	void *temp;
280 
281 	temp = agp_bridge->current_size;
282 
283 	switch (agp_bridge->driver->size_type) {
284 	case U8_APER_SIZE:
285 		current_size = A_SIZE_8(temp)->size;
286 		break;
287 	case U16_APER_SIZE:
288 		current_size = A_SIZE_16(temp)->size;
289 		break;
290 	case U32_APER_SIZE:
291 		current_size = A_SIZE_32(temp)->size;
292 		break;
293 	case LVL2_APER_SIZE:
294 		current_size = A_SIZE_LVL2(temp)->size;
295 		break;
296 	case FIXED_APER_SIZE:
297 		current_size = A_SIZE_FIX(temp)->size;
298 		break;
299 	default:
300 		current_size = 0;
301 		break;
302 	}
303 
304 	current_size -= (agp_memory_reserved / (1024*1024));
305 	if (current_size <0)
306 		current_size = 0;
307 	return current_size;
308 }
309 
310 
311 int agp_num_entries(void)
312 {
313 	int num_entries;
314 	void *temp;
315 
316 	temp = agp_bridge->current_size;
317 
318 	switch (agp_bridge->driver->size_type) {
319 	case U8_APER_SIZE:
320 		num_entries = A_SIZE_8(temp)->num_entries;
321 		break;
322 	case U16_APER_SIZE:
323 		num_entries = A_SIZE_16(temp)->num_entries;
324 		break;
325 	case U32_APER_SIZE:
326 		num_entries = A_SIZE_32(temp)->num_entries;
327 		break;
328 	case LVL2_APER_SIZE:
329 		num_entries = A_SIZE_LVL2(temp)->num_entries;
330 		break;
331 	case FIXED_APER_SIZE:
332 		num_entries = A_SIZE_FIX(temp)->num_entries;
333 		break;
334 	default:
335 		num_entries = 0;
336 		break;
337 	}
338 
339 	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
340 	if (num_entries<0)
341 		num_entries = 0;
342 	return num_entries;
343 }
344 EXPORT_SYMBOL_GPL(agp_num_entries);
345 
346 
347 /**
348  *	agp_copy_info  -  copy bridge state information
349  *
350  *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
351  *
352  *	This function copies information about the agp bridge device and the state of
353  *	the agp backend into an agp_kern_info pointer.
354  */
355 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
356 {
357 	memset(info, 0, sizeof(struct agp_kern_info));
358 	if (!bridge) {
359 		info->chipset = NOT_SUPPORTED;
360 		return -EIO;
361 	}
362 
363 	info->version.major = bridge->version->major;
364 	info->version.minor = bridge->version->minor;
365 	info->chipset = SUPPORTED;
366 	info->device = bridge->dev;
367 	if (bridge->mode & AGPSTAT_MODE_3_0)
368 		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
369 	else
370 		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
371 	info->aper_base = bridge->gart_bus_addr;
372 	info->aper_size = agp_return_size();
373 	info->max_memory = bridge->max_memory_agp;
374 	info->current_memory = atomic_read(&bridge->current_memory_agp);
375 	info->cant_use_aperture = bridge->driver->cant_use_aperture;
376 	info->vm_ops = bridge->vm_ops;
377 	info->page_mask = ~0UL;
378 	return 0;
379 }
380 EXPORT_SYMBOL(agp_copy_info);
381 
382 /* End - Routine to copy over information structure */
383 
384 /*
385  * Routines for handling swapping of agp_memory into the GATT -
386  * These routines take agp_memory and insert them into the GATT.
387  * They call device specific routines to actually write to the GATT.
388  */
389 
390 /**
391  *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
392  *
393  *	@curr:		agp_memory pointer
394  *	@pg_start:	an offset into the graphics aperture translation table
395  *
396  *	It returns -EINVAL if the pointer == NULL.
397  *	It returns -EBUSY if the area of the table requested is already in use.
398  */
399 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
400 {
401 	int ret_val;
402 
403 	if (curr == NULL)
404 		return -EINVAL;
405 
406 	if (curr->is_bound == TRUE) {
407 		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
408 		return -EINVAL;
409 	}
410 	if (curr->is_flushed == FALSE) {
411 		curr->bridge->driver->cache_flush();
412 		curr->is_flushed = TRUE;
413 	}
414 	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
415 
416 	if (ret_val != 0)
417 		return ret_val;
418 
419 	curr->is_bound = TRUE;
420 	curr->pg_start = pg_start;
421 	return 0;
422 }
423 EXPORT_SYMBOL(agp_bind_memory);
424 
425 
426 /**
427  *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
428  *
429  * @curr:	agp_memory pointer to be removed from the GATT.
430  *
431  * It returns -EINVAL if this piece of agp_memory is not currently bound to
432  * the graphics aperture translation table or if the agp_memory pointer == NULL
433  */
434 int agp_unbind_memory(struct agp_memory *curr)
435 {
436 	int ret_val;
437 
438 	if (curr == NULL)
439 		return -EINVAL;
440 
441 	if (curr->is_bound != TRUE) {
442 		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
443 		return -EINVAL;
444 	}
445 
446 	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
447 
448 	if (ret_val != 0)
449 		return ret_val;
450 
451 	curr->is_bound = FALSE;
452 	curr->pg_start = 0;
453 	return 0;
454 }
455 EXPORT_SYMBOL(agp_unbind_memory);
456 
457 /* End - Routines for handling swapping of agp_memory into the GATT */
458 
459 
460 /* Generic Agp routines - Start */
461 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
462 {
463 	u32 tmp;
464 
465 	if (*requested_mode & AGP2_RESERVED_MASK) {
466 		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
467 			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
468 		*requested_mode &= ~AGP2_RESERVED_MASK;
469 	}
470 
471 	/*
472 	 * Some dumb bridges are programmed to disobey the AGP2 spec.
473 	 * This is likely a BIOS misprogramming rather than poweron default, or
474 	 * it would be a lot more common.
475 	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
476 	 * AGPv2 spec 6.1.9 states:
477 	 *   The RATE field indicates the data transfer rates supported by this
478 	 *   device. A.G.P. devices must report all that apply.
479 	 * Fix them up as best we can.
480 	 */
481 	switch (*bridge_agpstat & 7) {
482 	case 4:
483 		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
484 		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
485 			"Fixing up support for x2 & x1\n");
486 		break;
487 	case 2:
488 		*bridge_agpstat |= AGPSTAT2_1X;
489 		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
490 			"Fixing up support for x1\n");
491 		break;
492 	default:
493 		break;
494 	}
495 
496 	/* Check the speed bits make sense. Only one should be set. */
497 	tmp = *requested_mode & 7;
498 	switch (tmp) {
499 		case 0:
500 			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
501 			*requested_mode |= AGPSTAT2_1X;
502 			break;
503 		case 1:
504 		case 2:
505 			break;
506 		case 3:
507 			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
508 			break;
509 		case 4:
510 			break;
511 		case 5:
512 		case 6:
513 		case 7:
514 			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
515 			break;
516 	}
517 
518 	/* disable SBA if it's not supported */
519 	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
520 		*bridge_agpstat &= ~AGPSTAT_SBA;
521 
522 	/* Set rate */
523 	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
524 		*bridge_agpstat &= ~AGPSTAT2_4X;
525 
526 	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
527 		*bridge_agpstat &= ~AGPSTAT2_2X;
528 
529 	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
530 		*bridge_agpstat &= ~AGPSTAT2_1X;
531 
532 	/* Now we know what mode it should be, clear out the unwanted bits. */
533 	if (*bridge_agpstat & AGPSTAT2_4X)
534 		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
535 
536 	if (*bridge_agpstat & AGPSTAT2_2X)
537 		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
538 
539 	if (*bridge_agpstat & AGPSTAT2_1X)
540 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
541 
542 	/* Apply any errata. */
543 	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
544 		*bridge_agpstat &= ~AGPSTAT_FW;
545 
546 	if (agp_bridge->flags & AGP_ERRATA_SBA)
547 		*bridge_agpstat &= ~AGPSTAT_SBA;
548 
549 	if (agp_bridge->flags & AGP_ERRATA_1X) {
550 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
551 		*bridge_agpstat |= AGPSTAT2_1X;
552 	}
553 
554 	/* If we've dropped down to 1X, disable fast writes. */
555 	if (*bridge_agpstat & AGPSTAT2_1X)
556 		*bridge_agpstat &= ~AGPSTAT_FW;
557 }
558 
559 /*
560  * requested_mode = Mode requested by (typically) X.
561  * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
562  * vga_agpstat = PCI_AGP_STATUS from graphic card.
563  */
564 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
565 {
566 	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
567 	u32 tmp;
568 
569 	if (*requested_mode & AGP3_RESERVED_MASK) {
570 		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
571 			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
572 		*requested_mode &= ~AGP3_RESERVED_MASK;
573 	}
574 
575 	/* Check the speed bits make sense. */
576 	tmp = *requested_mode & 7;
577 	if (tmp == 0) {
578 		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
579 		*requested_mode |= AGPSTAT3_4X;
580 	}
581 	if (tmp >= 3) {
582 		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
583 		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
584 	}
585 
586 	/* ARQSZ - Set the value to the maximum one.
587 	 * Don't allow the mode register to override values. */
588 	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
589 		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
590 
591 	/* Calibration cycle.
592 	 * Don't allow the mode register to override values. */
593 	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
594 		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
595 
596 	/* SBA *must* be supported for AGP v3 */
597 	*bridge_agpstat |= AGPSTAT_SBA;
598 
599 	/*
600 	 * Set speed.
601 	 * Check for invalid speeds. This can happen when applications
602 	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
603 	 */
604 	if (*requested_mode & AGPSTAT_MODE_3_0) {
605 		/*
606 		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
607 		 * have been passed a 3.0 mode, but with 2.x speed bits set.
608 		 * AGP2.x 4x -> AGP3.0 4x.
609 		 */
610 		if (*requested_mode & AGPSTAT2_4X) {
611 			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
612 						current->comm, *requested_mode);
613 			*requested_mode &= ~AGPSTAT2_4X;
614 			*requested_mode |= AGPSTAT3_4X;
615 		}
616 	} else {
617 		/*
618 		 * The caller doesn't know what they are doing. We are in 3.0 mode,
619 		 * but have been passed an AGP 2.x mode.
620 		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
621 		 */
622 		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
623 					current->comm, *requested_mode);
624 		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
625 		*requested_mode |= AGPSTAT3_4X;
626 	}
627 
628 	if (*requested_mode & AGPSTAT3_8X) {
629 		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
630 			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
631 			*bridge_agpstat |= AGPSTAT3_4X;
632 			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
633 			return;
634 		}
635 		if (!(*vga_agpstat & AGPSTAT3_8X)) {
636 			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
637 			*bridge_agpstat |= AGPSTAT3_4X;
638 			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
639 			return;
640 		}
641 		/* All set, bridge & device can do AGP x8*/
642 		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
643 		goto done;
644 
645 	} else if (*requested_mode & AGPSTAT3_4X) {
646 		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
647 		*bridge_agpstat |= AGPSTAT3_4X;
648 		goto done;
649 
650 	} else {
651 
652 		/*
653 		 * If we didn't specify an AGP mode, we see if both
654 		 * the graphics card, and the bridge can do x8, and use if so.
655 		 * If not, we fall back to x4 mode.
656 		 */
657 		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
658 			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
659 				"supported by bridge & card (x8).\n");
660 			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
661 			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
662 		} else {
663 			printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
664 			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
665 				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
666 					*bridge_agpstat, origbridge);
667 				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
668 				*bridge_agpstat |= AGPSTAT3_4X;
669 			}
670 			if (!(*vga_agpstat & AGPSTAT3_8X)) {
671 				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
672 					*vga_agpstat, origvga);
673 				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
674 				*vga_agpstat |= AGPSTAT3_4X;
675 			}
676 		}
677 	}
678 
679 done:
680 	/* Apply any errata. */
681 	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
682 		*bridge_agpstat &= ~AGPSTAT_FW;
683 
684 	if (agp_bridge->flags & AGP_ERRATA_SBA)
685 		*bridge_agpstat &= ~AGPSTAT_SBA;
686 
687 	if (agp_bridge->flags & AGP_ERRATA_1X) {
688 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
689 		*bridge_agpstat |= AGPSTAT2_1X;
690 	}
691 }
692 
693 
694 /**
695  * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
696  * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
697  * @requested_mode: requested agp_stat from userspace (Typically from X)
698  * @bridge_agpstat: current agp_stat from AGP bridge.
699  *
700  * This function will hunt for an AGP graphics card, and try to match
701  * the requested mode to the capabilities of both the bridge and the card.
702  */
703 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
704 {
705 	struct pci_dev *device = NULL;
706 	u32 vga_agpstat;
707 	u8 cap_ptr;
708 
709 	for (;;) {
710 		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
711 		if (!device) {
712 			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
713 			return 0;
714 		}
715 		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
716 		if (cap_ptr)
717 			break;
718 	}
719 
720 	/*
721 	 * Ok, here we have a AGP device. Disable impossible
722 	 * settings, and adjust the readqueue to the minimum.
723 	 */
724 	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
725 
726 	/* adjust RQ depth */
727 	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
728 	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
729 		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
730 
731 	/* disable FW if it's not supported */
732 	if (!((bridge_agpstat & AGPSTAT_FW) &&
733 		 (vga_agpstat & AGPSTAT_FW) &&
734 		 (requested_mode & AGPSTAT_FW)))
735 		bridge_agpstat &= ~AGPSTAT_FW;
736 
737 	/* Check to see if we are operating in 3.0 mode */
738 	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
739 		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
740 	else
741 		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
742 
743 	pci_dev_put(device);
744 	return bridge_agpstat;
745 }
746 EXPORT_SYMBOL(agp_collect_device_status);
747 
748 
749 void agp_device_command(u32 bridge_agpstat, int agp_v3)
750 {
751 	struct pci_dev *device = NULL;
752 	int mode;
753 
754 	mode = bridge_agpstat & 0x7;
755 	if (agp_v3)
756 		mode *= 4;
757 
758 	for_each_pci_dev(device) {
759 		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
760 		if (!agp)
761 			continue;
762 
763 		printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
764 				agp_v3 ? 3 : 2, pci_name(device), mode);
765 		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
766 	}
767 }
768 EXPORT_SYMBOL(agp_device_command);
769 
770 
771 void get_agp_version(struct agp_bridge_data *bridge)
772 {
773 	u32 ncapid;
774 
775 	/* Exit early if already set by errata workarounds. */
776 	if (bridge->major_version != 0)
777 		return;
778 
779 	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
780 	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
781 	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
782 }
783 EXPORT_SYMBOL(get_agp_version);
784 
785 
786 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
787 {
788 	u32 bridge_agpstat, temp;
789 
790 	get_agp_version(agp_bridge);
791 
792 	printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
793 				agp_bridge->major_version,
794 				agp_bridge->minor_version,
795 				pci_name(agp_bridge->dev));
796 
797 	pci_read_config_dword(agp_bridge->dev,
798 		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
799 
800 	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
801 	if (bridge_agpstat == 0)
802 		/* Something bad happened. FIXME: Return error code? */
803 		return;
804 
805 	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
806 
807 	/* Do AGP version specific frobbing. */
808 	if (bridge->major_version >= 3) {
809 		if (bridge->mode & AGPSTAT_MODE_3_0) {
810 			/* If we have 3.5, we can do the isoch stuff. */
811 			if (bridge->minor_version >= 5)
812 				agp_3_5_enable(bridge);
813 			agp_device_command(bridge_agpstat, TRUE);
814 			return;
815 		} else {
816 		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
817 		    bridge_agpstat &= ~(7<<10) ;
818 		    pci_read_config_dword(bridge->dev,
819 					bridge->capndx+AGPCTRL, &temp);
820 		    temp |= (1<<9);
821 		    pci_write_config_dword(bridge->dev,
822 					bridge->capndx+AGPCTRL, temp);
823 
824 		    printk(KERN_INFO PFX "Device is in legacy mode,"
825 				" falling back to 2.x\n");
826 		}
827 	}
828 
829 	/* AGP v<3 */
830 	agp_device_command(bridge_agpstat, FALSE);
831 }
832 EXPORT_SYMBOL(agp_generic_enable);
833 
834 
835 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
836 {
837 	char *table;
838 	char *table_end;
839 	int size;
840 	int page_order;
841 	int num_entries;
842 	int i;
843 	void *temp;
844 	struct page *page;
845 
846 	/* The generic routines can't handle 2 level gatt's */
847 	if (bridge->driver->size_type == LVL2_APER_SIZE)
848 		return -EINVAL;
849 
850 	table = NULL;
851 	i = bridge->aperture_size_idx;
852 	temp = bridge->current_size;
853 	size = page_order = num_entries = 0;
854 
855 	if (bridge->driver->size_type != FIXED_APER_SIZE) {
856 		do {
857 			switch (bridge->driver->size_type) {
858 			case U8_APER_SIZE:
859 				size = A_SIZE_8(temp)->size;
860 				page_order =
861 				    A_SIZE_8(temp)->page_order;
862 				num_entries =
863 				    A_SIZE_8(temp)->num_entries;
864 				break;
865 			case U16_APER_SIZE:
866 				size = A_SIZE_16(temp)->size;
867 				page_order = A_SIZE_16(temp)->page_order;
868 				num_entries = A_SIZE_16(temp)->num_entries;
869 				break;
870 			case U32_APER_SIZE:
871 				size = A_SIZE_32(temp)->size;
872 				page_order = A_SIZE_32(temp)->page_order;
873 				num_entries = A_SIZE_32(temp)->num_entries;
874 				break;
875 				/* This case will never really happen. */
876 			case FIXED_APER_SIZE:
877 			case LVL2_APER_SIZE:
878 			default:
879 				size = page_order = num_entries = 0;
880 				break;
881 			}
882 
883 			table = alloc_gatt_pages(page_order);
884 
885 			if (table == NULL) {
886 				i++;
887 				switch (bridge->driver->size_type) {
888 				case U8_APER_SIZE:
889 					bridge->current_size = A_IDX8(bridge);
890 					break;
891 				case U16_APER_SIZE:
892 					bridge->current_size = A_IDX16(bridge);
893 					break;
894 				case U32_APER_SIZE:
895 					bridge->current_size = A_IDX32(bridge);
896 					break;
897 				/* These cases will never really happen. */
898 				case FIXED_APER_SIZE:
899 				case LVL2_APER_SIZE:
900 				default:
901 					break;
902 				}
903 				temp = bridge->current_size;
904 			} else {
905 				bridge->aperture_size_idx = i;
906 			}
907 		} while (!table && (i < bridge->driver->num_aperture_sizes));
908 	} else {
909 		size = ((struct aper_size_info_fixed *) temp)->size;
910 		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
911 		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
912 		table = alloc_gatt_pages(page_order);
913 	}
914 
915 	if (table == NULL)
916 		return -ENOMEM;
917 
918 	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
919 
920 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
921 		SetPageReserved(page);
922 
923 	bridge->gatt_table_real = (u32 *) table;
924 	agp_gatt_table = (void *)table;
925 
926 	bridge->driver->cache_flush();
927 	bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
928 					(PAGE_SIZE * (1 << page_order)));
929 	bridge->driver->cache_flush();
930 
931 	if (bridge->gatt_table == NULL) {
932 		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
933 			ClearPageReserved(page);
934 
935 		free_gatt_pages(table, page_order);
936 
937 		return -ENOMEM;
938 	}
939 	bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
940 
941 	/* AK: bogus, should encode addresses > 4GB */
942 	for (i = 0; i < num_entries; i++) {
943 		writel(bridge->scratch_page, bridge->gatt_table+i);
944 		readl(bridge->gatt_table+i);	/* PCI Posting. */
945 	}
946 
947 	return 0;
948 }
949 EXPORT_SYMBOL(agp_generic_create_gatt_table);
950 
951 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
952 {
953 	int page_order;
954 	char *table, *table_end;
955 	void *temp;
956 	struct page *page;
957 
958 	temp = bridge->current_size;
959 
960 	switch (bridge->driver->size_type) {
961 	case U8_APER_SIZE:
962 		page_order = A_SIZE_8(temp)->page_order;
963 		break;
964 	case U16_APER_SIZE:
965 		page_order = A_SIZE_16(temp)->page_order;
966 		break;
967 	case U32_APER_SIZE:
968 		page_order = A_SIZE_32(temp)->page_order;
969 		break;
970 	case FIXED_APER_SIZE:
971 		page_order = A_SIZE_FIX(temp)->page_order;
972 		break;
973 	case LVL2_APER_SIZE:
974 		/* The generic routines can't deal with 2 level gatt's */
975 		return -EINVAL;
976 		break;
977 	default:
978 		page_order = 0;
979 		break;
980 	}
981 
982 	/* Do not worry about freeing memory, because if this is
983 	 * called, then all agp memory is deallocated and removed
984 	 * from the table. */
985 
986 	iounmap(bridge->gatt_table);
987 	table = (char *) bridge->gatt_table_real;
988 	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
989 
990 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
991 		ClearPageReserved(page);
992 
993 	free_gatt_pages(bridge->gatt_table_real, page_order);
994 
995 	agp_gatt_table = NULL;
996 	bridge->gatt_table = NULL;
997 	bridge->gatt_table_real = NULL;
998 	bridge->gatt_bus_addr = 0;
999 
1000 	return 0;
1001 }
1002 EXPORT_SYMBOL(agp_generic_free_gatt_table);
1003 
1004 
1005 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1006 {
1007 	int num_entries;
1008 	size_t i;
1009 	off_t j;
1010 	void *temp;
1011 	struct agp_bridge_data *bridge;
1012 	int mask_type;
1013 
1014 	bridge = mem->bridge;
1015 	if (!bridge)
1016 		return -EINVAL;
1017 
1018 	if (mem->page_count == 0)
1019 		return 0;
1020 
1021 	temp = bridge->current_size;
1022 
1023 	switch (bridge->driver->size_type) {
1024 	case U8_APER_SIZE:
1025 		num_entries = A_SIZE_8(temp)->num_entries;
1026 		break;
1027 	case U16_APER_SIZE:
1028 		num_entries = A_SIZE_16(temp)->num_entries;
1029 		break;
1030 	case U32_APER_SIZE:
1031 		num_entries = A_SIZE_32(temp)->num_entries;
1032 		break;
1033 	case FIXED_APER_SIZE:
1034 		num_entries = A_SIZE_FIX(temp)->num_entries;
1035 		break;
1036 	case LVL2_APER_SIZE:
1037 		/* The generic routines can't deal with 2 level gatt's */
1038 		return -EINVAL;
1039 		break;
1040 	default:
1041 		num_entries = 0;
1042 		break;
1043 	}
1044 
1045 	num_entries -= agp_memory_reserved/PAGE_SIZE;
1046 	if (num_entries < 0) num_entries = 0;
1047 
1048 	if (type != mem->type)
1049 		return -EINVAL;
1050 
1051 	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1052 	if (mask_type != 0) {
1053 		/* The generic routines know nothing of memory types */
1054 		return -EINVAL;
1055 	}
1056 
1057 	/* AK: could wrap */
1058 	if ((pg_start + mem->page_count) > num_entries)
1059 		return -EINVAL;
1060 
1061 	j = pg_start;
1062 
1063 	while (j < (pg_start + mem->page_count)) {
1064 		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1065 			return -EBUSY;
1066 		j++;
1067 	}
1068 
1069 	if (mem->is_flushed == FALSE) {
1070 		bridge->driver->cache_flush();
1071 		mem->is_flushed = TRUE;
1072 	}
1073 
1074 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1075 		writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
1076 		       bridge->gatt_table+j);
1077 	}
1078 	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1079 
1080 	bridge->driver->tlb_flush(mem);
1081 	return 0;
1082 }
1083 EXPORT_SYMBOL(agp_generic_insert_memory);
1084 
1085 
1086 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1087 {
1088 	size_t i;
1089 	struct agp_bridge_data *bridge;
1090 	int mask_type;
1091 
1092 	bridge = mem->bridge;
1093 	if (!bridge)
1094 		return -EINVAL;
1095 
1096 	if (mem->page_count == 0)
1097 		return 0;
1098 
1099 	if (type != mem->type)
1100 		return -EINVAL;
1101 
1102 	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1103 	if (mask_type != 0) {
1104 		/* The generic routines know nothing of memory types */
1105 		return -EINVAL;
1106 	}
1107 
1108 	/* AK: bogus, should encode addresses > 4GB */
1109 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1110 		writel(bridge->scratch_page, bridge->gatt_table+i);
1111 	}
1112 	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1113 
1114 	bridge->driver->tlb_flush(mem);
1115 	return 0;
1116 }
1117 EXPORT_SYMBOL(agp_generic_remove_memory);
1118 
1119 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1120 {
1121 	return NULL;
1122 }
1123 EXPORT_SYMBOL(agp_generic_alloc_by_type);
1124 
1125 void agp_generic_free_by_type(struct agp_memory *curr)
1126 {
1127 	agp_free_page_array(curr);
1128 	agp_free_key(curr->key);
1129 	kfree(curr);
1130 }
1131 EXPORT_SYMBOL(agp_generic_free_by_type);
1132 
1133 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1134 {
1135 	struct agp_memory *new;
1136 	int i;
1137 	int pages;
1138 
1139 	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1140 	new = agp_create_user_memory(page_count);
1141 	if (new == NULL)
1142 		return NULL;
1143 
1144 	for (i = 0; i < page_count; i++)
1145 		new->memory[i] = 0;
1146 	new->page_count = 0;
1147 	new->type = type;
1148 	new->num_scratch_pages = pages;
1149 
1150 	return new;
1151 }
1152 EXPORT_SYMBOL(agp_generic_alloc_user);
1153 
1154 /*
1155  * Basic Page Allocation Routines -
1156  * These routines handle page allocation and by default they reserve the allocated
1157  * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1158  * against a maximum value.
1159  */
1160 
1161 void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1162 {
1163 	struct page * page;
1164 
1165 	page = alloc_page(GFP_KERNEL | GFP_DMA32);
1166 	if (page == NULL)
1167 		return NULL;
1168 
1169 	map_page_into_agp(page);
1170 
1171 	get_page(page);
1172 	SetPageLocked(page);
1173 	atomic_inc(&agp_bridge->current_memory_agp);
1174 	return page_address(page);
1175 }
1176 EXPORT_SYMBOL(agp_generic_alloc_page);
1177 
1178 
1179 void agp_generic_destroy_page(void *addr)
1180 {
1181 	struct page *page;
1182 
1183 	if (addr == NULL)
1184 		return;
1185 
1186 	page = virt_to_page(addr);
1187 	unmap_page_from_agp(page);
1188 	put_page(page);
1189 	unlock_page(page);
1190 	free_page((unsigned long)addr);
1191 	atomic_dec(&agp_bridge->current_memory_agp);
1192 }
1193 EXPORT_SYMBOL(agp_generic_destroy_page);
1194 
1195 /* End Basic Page Allocation Routines */
1196 
1197 
1198 /**
1199  * agp_enable  -  initialise the agp point-to-point connection.
1200  *
1201  * @mode:	agp mode register value to configure with.
1202  */
1203 void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1204 {
1205 	if (!bridge)
1206 		return;
1207 	bridge->driver->agp_enable(bridge, mode);
1208 }
1209 EXPORT_SYMBOL(agp_enable);
1210 
1211 /* When we remove the global variable agp_bridge from all drivers
1212  * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1213  */
1214 
1215 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1216 {
1217 	if (list_empty(&agp_bridges))
1218 		return NULL;
1219 
1220 	return agp_bridge;
1221 }
1222 
1223 static void ipi_handler(void *null)
1224 {
1225 	flush_agp_cache();
1226 }
1227 
1228 void global_cache_flush(void)
1229 {
1230 	if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
1231 		panic(PFX "timed out waiting for the other CPUs!\n");
1232 }
1233 EXPORT_SYMBOL(global_cache_flush);
1234 
1235 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1236 	unsigned long addr, int type)
1237 {
1238 	/* memory type is ignored in the generic routine */
1239 	if (bridge->driver->masks)
1240 		return addr | bridge->driver->masks[0].mask;
1241 	else
1242 		return addr;
1243 }
1244 EXPORT_SYMBOL(agp_generic_mask_memory);
1245 
1246 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1247 				  int type)
1248 {
1249 	if (type >= AGP_USER_TYPES)
1250 		return 0;
1251 	return type;
1252 }
1253 EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1254 
1255 /*
1256  * These functions are implemented according to the AGPv3 spec,
1257  * which covers implementation details that had previously been
1258  * left open.
1259  */
1260 
1261 int agp3_generic_fetch_size(void)
1262 {
1263 	u16 temp_size;
1264 	int i;
1265 	struct aper_size_info_16 *values;
1266 
1267 	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1268 	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1269 
1270 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1271 		if (temp_size == values[i].size_value) {
1272 			agp_bridge->previous_size =
1273 				agp_bridge->current_size = (void *) (values + i);
1274 
1275 			agp_bridge->aperture_size_idx = i;
1276 			return values[i].size;
1277 		}
1278 	}
1279 	return 0;
1280 }
1281 EXPORT_SYMBOL(agp3_generic_fetch_size);
1282 
1283 void agp3_generic_tlbflush(struct agp_memory *mem)
1284 {
1285 	u32 ctrl;
1286 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1287 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1288 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1289 }
1290 EXPORT_SYMBOL(agp3_generic_tlbflush);
1291 
1292 int agp3_generic_configure(void)
1293 {
1294 	u32 temp;
1295 	struct aper_size_info_16 *current_size;
1296 
1297 	current_size = A_SIZE_16(agp_bridge->current_size);
1298 
1299 	pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1300 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1301 
1302 	/* set aperture size */
1303 	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1304 	/* set gart pointer */
1305 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1306 	/* enable aperture and GTLB */
1307 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1308 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1309 	return 0;
1310 }
1311 EXPORT_SYMBOL(agp3_generic_configure);
1312 
1313 void agp3_generic_cleanup(void)
1314 {
1315 	u32 ctrl;
1316 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1317 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1318 }
1319 EXPORT_SYMBOL(agp3_generic_cleanup);
1320 
1321 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1322 {
1323 	{4096, 1048576, 10,0x000},
1324 	{2048,  524288, 9, 0x800},
1325 	{1024,  262144, 8, 0xc00},
1326 	{ 512,  131072, 7, 0xe00},
1327 	{ 256,   65536, 6, 0xf00},
1328 	{ 128,   32768, 5, 0xf20},
1329 	{  64,   16384, 4, 0xf30},
1330 	{  32,    8192, 3, 0xf38},
1331 	{  16,    4096, 2, 0xf3c},
1332 	{   8,    2048, 1, 0xf3e},
1333 	{   4,    1024, 0, 0xf3f}
1334 };
1335 EXPORT_SYMBOL(agp3_generic_sizes);
1336 
1337