xref: /linux/drivers/char/agp/generic.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  * AGPGART driver.
3  * Copyright (C) 2004 Silicon Graphics, Inc.
4  * Copyright (C) 2002-2005 Dave Jones.
5  * Copyright (C) 1999 Jeff Hartmann.
6  * Copyright (C) 1999 Precision Insight, Inc.
7  * Copyright (C) 1999 Xi Graphics, Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included
17  * in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * TODO:
28  * - Allocate more than order 0 pages to avoid too much linear map splitting.
29  */
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h>
35 #include <linux/pm.h>
36 #include <linux/agp_backend.h>
37 #include <linux/vmalloc.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/mm.h>
40 #include <asm/io.h>
41 #include <asm/cacheflush.h>
42 #include <asm/pgtable.h>
43 #include "agp.h"
44 
45 __u32 *agp_gatt_table;
46 int agp_memory_reserved;
47 
48 /*
49  * Needed by the Nforce GART driver for the time being. Would be
50  * nice to do this some other way instead of needing this export.
51  */
52 EXPORT_SYMBOL_GPL(agp_memory_reserved);
53 
54 #if defined(CONFIG_X86)
55 int map_page_into_agp(struct page *page)
56 {
57 	int i;
58 	i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
59 	/* Caller's responsibility to call global_flush_tlb() for
60 	 * performance reasons */
61 	return i;
62 }
63 EXPORT_SYMBOL_GPL(map_page_into_agp);
64 
65 int unmap_page_from_agp(struct page *page)
66 {
67 	int i;
68 	i = change_page_attr(page, 1, PAGE_KERNEL);
69 	/* Caller's responsibility to call global_flush_tlb() for
70 	 * performance reasons */
71 	return i;
72 }
73 EXPORT_SYMBOL_GPL(unmap_page_from_agp);
74 #endif
75 
76 /*
77  * Generic routines for handling agp_memory structures -
78  * They use the basic page allocation routines to do the brunt of the work.
79  */
80 
81 void agp_free_key(int key)
82 {
83 	if (key < 0)
84 		return;
85 
86 	if (key < MAXKEY)
87 		clear_bit(key, agp_bridge->key_list);
88 }
89 EXPORT_SYMBOL(agp_free_key);
90 
91 
92 static int agp_get_key(void)
93 {
94 	int bit;
95 
96 	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
97 	if (bit < MAXKEY) {
98 		set_bit(bit, agp_bridge->key_list);
99 		return bit;
100 	}
101 	return -1;
102 }
103 
104 
105 struct agp_memory *agp_create_memory(int scratch_pages)
106 {
107 	struct agp_memory *new;
108 
109 	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
110 	if (new == NULL)
111 		return NULL;
112 
113 	new->key = agp_get_key();
114 
115 	if (new->key < 0) {
116 		kfree(new);
117 		return NULL;
118 	}
119 	new->memory = vmalloc(PAGE_SIZE * scratch_pages);
120 
121 	if (new->memory == NULL) {
122 		agp_free_key(new->key);
123 		kfree(new);
124 		return NULL;
125 	}
126 	new->num_scratch_pages = scratch_pages;
127 	return new;
128 }
129 EXPORT_SYMBOL(agp_create_memory);
130 
131 /**
132  *	agp_free_memory - free memory associated with an agp_memory pointer.
133  *
134  *	@curr:		agp_memory pointer to be freed.
135  *
136  *	It is the only function that can be called when the backend is not owned
137  *	by the caller.  (So it can free memory on client death.)
138  */
139 void agp_free_memory(struct agp_memory *curr)
140 {
141 	size_t i;
142 
143 	if (curr == NULL)
144 		return;
145 
146 	if (curr->is_bound == TRUE)
147 		agp_unbind_memory(curr);
148 
149 	if (curr->type != 0) {
150 		curr->bridge->driver->free_by_type(curr);
151 		return;
152 	}
153 	if (curr->page_count != 0) {
154 		for (i = 0; i < curr->page_count; i++) {
155 			curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
156 		}
157 		flush_agp_mappings();
158 	}
159 	agp_free_key(curr->key);
160 	vfree(curr->memory);
161 	kfree(curr);
162 }
163 EXPORT_SYMBOL(agp_free_memory);
164 
165 #define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
166 
167 /**
168  *	agp_allocate_memory  -  allocate a group of pages of a certain type.
169  *
170  *	@page_count:	size_t argument of the number of pages
171  *	@type:	u32 argument of the type of memory to be allocated.
172  *
173  *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
174  *	maps to physical ram.  Any other type is device dependent.
175  *
176  *	It returns NULL whenever memory is unavailable.
177  */
178 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
179 					size_t page_count, u32 type)
180 {
181 	int scratch_pages;
182 	struct agp_memory *new;
183 	size_t i;
184 
185 	if (!bridge)
186 		return NULL;
187 
188 	if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
189 		return NULL;
190 
191 	if (type != 0) {
192 		new = bridge->driver->alloc_by_type(page_count, type);
193 		if (new)
194 			new->bridge = bridge;
195 		return new;
196 	}
197 
198 	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
199 
200 	new = agp_create_memory(scratch_pages);
201 
202 	if (new == NULL)
203 		return NULL;
204 
205 	for (i = 0; i < page_count; i++) {
206 		void *addr = bridge->driver->agp_alloc_page(bridge);
207 
208 		if (addr == NULL) {
209 			agp_free_memory(new);
210 			return NULL;
211 		}
212 		new->memory[i] = virt_to_gart(addr);
213 		new->page_count++;
214 	}
215 	new->bridge = bridge;
216 
217 	flush_agp_mappings();
218 
219 	return new;
220 }
221 EXPORT_SYMBOL(agp_allocate_memory);
222 
223 
224 /* End - Generic routines for handling agp_memory structures */
225 
226 
227 static int agp_return_size(void)
228 {
229 	int current_size;
230 	void *temp;
231 
232 	temp = agp_bridge->current_size;
233 
234 	switch (agp_bridge->driver->size_type) {
235 	case U8_APER_SIZE:
236 		current_size = A_SIZE_8(temp)->size;
237 		break;
238 	case U16_APER_SIZE:
239 		current_size = A_SIZE_16(temp)->size;
240 		break;
241 	case U32_APER_SIZE:
242 		current_size = A_SIZE_32(temp)->size;
243 		break;
244 	case LVL2_APER_SIZE:
245 		current_size = A_SIZE_LVL2(temp)->size;
246 		break;
247 	case FIXED_APER_SIZE:
248 		current_size = A_SIZE_FIX(temp)->size;
249 		break;
250 	default:
251 		current_size = 0;
252 		break;
253 	}
254 
255 	current_size -= (agp_memory_reserved / (1024*1024));
256 	if (current_size <0)
257 		current_size = 0;
258 	return current_size;
259 }
260 
261 
262 int agp_num_entries(void)
263 {
264 	int num_entries;
265 	void *temp;
266 
267 	temp = agp_bridge->current_size;
268 
269 	switch (agp_bridge->driver->size_type) {
270 	case U8_APER_SIZE:
271 		num_entries = A_SIZE_8(temp)->num_entries;
272 		break;
273 	case U16_APER_SIZE:
274 		num_entries = A_SIZE_16(temp)->num_entries;
275 		break;
276 	case U32_APER_SIZE:
277 		num_entries = A_SIZE_32(temp)->num_entries;
278 		break;
279 	case LVL2_APER_SIZE:
280 		num_entries = A_SIZE_LVL2(temp)->num_entries;
281 		break;
282 	case FIXED_APER_SIZE:
283 		num_entries = A_SIZE_FIX(temp)->num_entries;
284 		break;
285 	default:
286 		num_entries = 0;
287 		break;
288 	}
289 
290 	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
291 	if (num_entries<0)
292 		num_entries = 0;
293 	return num_entries;
294 }
295 EXPORT_SYMBOL_GPL(agp_num_entries);
296 
297 
298 /**
299  *	agp_copy_info  -  copy bridge state information
300  *
301  *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
302  *
303  *	This function copies information about the agp bridge device and the state of
304  *	the agp backend into an agp_kern_info pointer.
305  */
306 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
307 {
308 	memset(info, 0, sizeof(struct agp_kern_info));
309 	if (!bridge) {
310 		info->chipset = NOT_SUPPORTED;
311 		return -EIO;
312 	}
313 
314 	info->version.major = bridge->version->major;
315 	info->version.minor = bridge->version->minor;
316 	info->chipset = SUPPORTED;
317 	info->device = bridge->dev;
318 	if (bridge->mode & AGPSTAT_MODE_3_0)
319 		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
320 	else
321 		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
322 	info->aper_base = bridge->gart_bus_addr;
323 	info->aper_size = agp_return_size();
324 	info->max_memory = bridge->max_memory_agp;
325 	info->current_memory = atomic_read(&bridge->current_memory_agp);
326 	info->cant_use_aperture = bridge->driver->cant_use_aperture;
327 	info->vm_ops = bridge->vm_ops;
328 	info->page_mask = ~0UL;
329 	return 0;
330 }
331 EXPORT_SYMBOL(agp_copy_info);
332 
333 /* End - Routine to copy over information structure */
334 
335 /*
336  * Routines for handling swapping of agp_memory into the GATT -
337  * These routines take agp_memory and insert them into the GATT.
338  * They call device specific routines to actually write to the GATT.
339  */
340 
341 /**
342  *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
343  *
344  *	@curr:		agp_memory pointer
345  *	@pg_start:	an offset into the graphics aperture translation table
346  *
347  *	It returns -EINVAL if the pointer == NULL.
348  *	It returns -EBUSY if the area of the table requested is already in use.
349  */
350 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
351 {
352 	int ret_val;
353 
354 	if (curr == NULL)
355 		return -EINVAL;
356 
357 	if (curr->is_bound == TRUE) {
358 		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
359 		return -EINVAL;
360 	}
361 	if (curr->is_flushed == FALSE) {
362 		curr->bridge->driver->cache_flush();
363 		curr->is_flushed = TRUE;
364 	}
365 	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
366 
367 	if (ret_val != 0)
368 		return ret_val;
369 
370 	curr->is_bound = TRUE;
371 	curr->pg_start = pg_start;
372 	return 0;
373 }
374 EXPORT_SYMBOL(agp_bind_memory);
375 
376 
377 /**
378  *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
379  *
380  * @curr:	agp_memory pointer to be removed from the GATT.
381  *
382  * It returns -EINVAL if this piece of agp_memory is not currently bound to
383  * the graphics aperture translation table or if the agp_memory pointer == NULL
384  */
385 int agp_unbind_memory(struct agp_memory *curr)
386 {
387 	int ret_val;
388 
389 	if (curr == NULL)
390 		return -EINVAL;
391 
392 	if (curr->is_bound != TRUE) {
393 		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
394 		return -EINVAL;
395 	}
396 
397 	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
398 
399 	if (ret_val != 0)
400 		return ret_val;
401 
402 	curr->is_bound = FALSE;
403 	curr->pg_start = 0;
404 	return 0;
405 }
406 EXPORT_SYMBOL(agp_unbind_memory);
407 
408 /* End - Routines for handling swapping of agp_memory into the GATT */
409 
410 
411 /* Generic Agp routines - Start */
412 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
413 {
414 	u32 tmp;
415 
416 	if (*requested_mode & AGP2_RESERVED_MASK) {
417 		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
418 			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
419 		*requested_mode &= ~AGP2_RESERVED_MASK;
420 	}
421 
422 	/* Check the speed bits make sense. Only one should be set. */
423 	tmp = *requested_mode & 7;
424 	switch (tmp) {
425 		case 0:
426 			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
427 			*requested_mode |= AGPSTAT2_1X;
428 			break;
429 		case 1:
430 		case 2:
431 			break;
432 		case 3:
433 			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
434 			break;
435 		case 4:
436 			break;
437 		case 5:
438 		case 6:
439 		case 7:
440 			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
441 			break;
442 	}
443 
444 	/* disable SBA if it's not supported */
445 	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
446 		*bridge_agpstat &= ~AGPSTAT_SBA;
447 
448 	/* Set rate */
449 	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
450 		*bridge_agpstat &= ~AGPSTAT2_4X;
451 
452 	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
453 		*bridge_agpstat &= ~AGPSTAT2_2X;
454 
455 	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
456 		*bridge_agpstat &= ~AGPSTAT2_1X;
457 
458 	/* Now we know what mode it should be, clear out the unwanted bits. */
459 	if (*bridge_agpstat & AGPSTAT2_4X)
460 		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
461 
462 	if (*bridge_agpstat & AGPSTAT2_2X)
463 		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
464 
465 	if (*bridge_agpstat & AGPSTAT2_1X)
466 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
467 
468 	/* Apply any errata. */
469 	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
470 		*bridge_agpstat &= ~AGPSTAT_FW;
471 
472 	if (agp_bridge->flags & AGP_ERRATA_SBA)
473 		*bridge_agpstat &= ~AGPSTAT_SBA;
474 
475 	if (agp_bridge->flags & AGP_ERRATA_1X) {
476 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
477 		*bridge_agpstat |= AGPSTAT2_1X;
478 	}
479 
480 	/* If we've dropped down to 1X, disable fast writes. */
481 	if (*bridge_agpstat & AGPSTAT2_1X)
482 		*bridge_agpstat &= ~AGPSTAT_FW;
483 }
484 
485 /*
486  * requested_mode = Mode requested by (typically) X.
487  * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
488  * vga_agpstat = PCI_AGP_STATUS from graphic card.
489  */
490 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
491 {
492 	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
493 	u32 tmp;
494 
495 	if (*requested_mode & AGP3_RESERVED_MASK) {
496 		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
497 			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
498 		*requested_mode &= ~AGP3_RESERVED_MASK;
499 	}
500 
501 	/* Check the speed bits make sense. */
502 	tmp = *requested_mode & 7;
503 	if (tmp == 0) {
504 		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
505 		*requested_mode |= AGPSTAT3_4X;
506 	}
507 	if (tmp >= 3) {
508 		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
509 		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
510 	}
511 
512 	/* ARQSZ - Set the value to the maximum one.
513 	 * Don't allow the mode register to override values. */
514 	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
515 		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
516 
517 	/* Calibration cycle.
518 	 * Don't allow the mode register to override values. */
519 	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
520 		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
521 
522 	/* SBA *must* be supported for AGP v3 */
523 	*bridge_agpstat |= AGPSTAT_SBA;
524 
525 	/*
526 	 * Set speed.
527 	 * Check for invalid speeds. This can happen when applications
528 	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
529 	 */
530 	if (*requested_mode & AGPSTAT_MODE_3_0) {
531 		/*
532 		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
533 		 * have been passed a 3.0 mode, but with 2.x speed bits set.
534 		 * AGP2.x 4x -> AGP3.0 4x.
535 		 */
536 		if (*requested_mode & AGPSTAT2_4X) {
537 			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
538 						current->comm, *requested_mode);
539 			*requested_mode &= ~AGPSTAT2_4X;
540 			*requested_mode |= AGPSTAT3_4X;
541 		}
542 	} else {
543 		/*
544 		 * The caller doesn't know what they are doing. We are in 3.0 mode,
545 		 * but have been passed an AGP 2.x mode.
546 		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
547 		 */
548 		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
549 					current->comm, *requested_mode);
550 		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
551 		*requested_mode |= AGPSTAT3_4X;
552 	}
553 
554 	if (*requested_mode & AGPSTAT3_8X) {
555 		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
556 			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
557 			*bridge_agpstat |= AGPSTAT3_4X;
558 			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
559 			return;
560 		}
561 		if (!(*vga_agpstat & AGPSTAT3_8X)) {
562 			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
563 			*bridge_agpstat |= AGPSTAT3_4X;
564 			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
565 			return;
566 		}
567 		/* All set, bridge & device can do AGP x8*/
568 		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
569 		goto done;
570 
571 	} else if (*requested_mode & AGPSTAT3_4X) {
572 		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
573 		*bridge_agpstat |= AGPSTAT3_4X;
574 		goto done;
575 
576 	} else {
577 
578 		/*
579 		 * If we didn't specify an AGP mode, we see if both
580 		 * the graphics card, and the bridge can do x8, and use if so.
581 		 * If not, we fall back to x4 mode.
582 		 */
583 		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
584 			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
585 				"supported by bridge & card (x8).\n");
586 			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
587 			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
588 		} else {
589 			printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
590 			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
591 				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
592 					*bridge_agpstat, origbridge);
593 				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
594 				*bridge_agpstat |= AGPSTAT3_4X;
595 			}
596 			if (!(*vga_agpstat & AGPSTAT3_8X)) {
597 				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
598 					*vga_agpstat, origvga);
599 				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
600 				*vga_agpstat |= AGPSTAT3_4X;
601 			}
602 		}
603 	}
604 
605 done:
606 	/* Apply any errata. */
607 	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
608 		*bridge_agpstat &= ~AGPSTAT_FW;
609 
610 	if (agp_bridge->flags & AGP_ERRATA_SBA)
611 		*bridge_agpstat &= ~AGPSTAT_SBA;
612 
613 	if (agp_bridge->flags & AGP_ERRATA_1X) {
614 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
615 		*bridge_agpstat |= AGPSTAT2_1X;
616 	}
617 }
618 
619 
620 /**
621  * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
622  * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
623  * @requested_mode: requested agp_stat from userspace (Typically from X)
624  * @bridge_agpstat: current agp_stat from AGP bridge.
625  *
626  * This function will hunt for an AGP graphics card, and try to match
627  * the requested mode to the capabilities of both the bridge and the card.
628  */
629 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
630 {
631 	struct pci_dev *device = NULL;
632 	u32 vga_agpstat;
633 	u8 cap_ptr;
634 
635 	for (;;) {
636 		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
637 		if (!device) {
638 			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
639 			return 0;
640 		}
641 		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
642 		if (cap_ptr)
643 			break;
644 	}
645 
646 	/*
647 	 * Ok, here we have a AGP device. Disable impossible
648 	 * settings, and adjust the readqueue to the minimum.
649 	 */
650 	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
651 
652 	/* adjust RQ depth */
653 	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
654 	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
655 		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
656 
657 	/* disable FW if it's not supported */
658 	if (!((bridge_agpstat & AGPSTAT_FW) &&
659 		 (vga_agpstat & AGPSTAT_FW) &&
660 		 (requested_mode & AGPSTAT_FW)))
661 		bridge_agpstat &= ~AGPSTAT_FW;
662 
663 	/* Check to see if we are operating in 3.0 mode */
664 	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
665 		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
666 	else
667 		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
668 
669 	pci_dev_put(device);
670 	return bridge_agpstat;
671 }
672 EXPORT_SYMBOL(agp_collect_device_status);
673 
674 
675 void agp_device_command(u32 bridge_agpstat, int agp_v3)
676 {
677 	struct pci_dev *device = NULL;
678 	int mode;
679 
680 	mode = bridge_agpstat & 0x7;
681 	if (agp_v3)
682 		mode *= 4;
683 
684 	for_each_pci_dev(device) {
685 		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
686 		if (!agp)
687 			continue;
688 
689 		printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
690 				agp_v3 ? 3 : 2, pci_name(device), mode);
691 		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
692 	}
693 }
694 EXPORT_SYMBOL(agp_device_command);
695 
696 
697 void get_agp_version(struct agp_bridge_data *bridge)
698 {
699 	u32 ncapid;
700 
701 	/* Exit early if already set by errata workarounds. */
702 	if (bridge->major_version != 0)
703 		return;
704 
705 	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
706 	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
707 	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
708 }
709 EXPORT_SYMBOL(get_agp_version);
710 
711 
712 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
713 {
714 	u32 bridge_agpstat, temp;
715 
716 	get_agp_version(agp_bridge);
717 
718 	printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
719 				agp_bridge->major_version,
720 				agp_bridge->minor_version,
721 				pci_name(agp_bridge->dev));
722 
723 	pci_read_config_dword(agp_bridge->dev,
724 		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
725 
726 	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
727 	if (bridge_agpstat == 0)
728 		/* Something bad happened. FIXME: Return error code? */
729 		return;
730 
731 	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
732 
733 	/* Do AGP version specific frobbing. */
734 	if (bridge->major_version >= 3) {
735 		if (bridge->mode & AGPSTAT_MODE_3_0) {
736 			/* If we have 3.5, we can do the isoch stuff. */
737 			if (bridge->minor_version >= 5)
738 				agp_3_5_enable(bridge);
739 			agp_device_command(bridge_agpstat, TRUE);
740 			return;
741 		} else {
742 		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
743 		    bridge_agpstat &= ~(7<<10) ;
744 		    pci_read_config_dword(bridge->dev,
745 					bridge->capndx+AGPCTRL, &temp);
746 		    temp |= (1<<9);
747 		    pci_write_config_dword(bridge->dev,
748 					bridge->capndx+AGPCTRL, temp);
749 
750 		    printk(KERN_INFO PFX "Device is in legacy mode,"
751 				" falling back to 2.x\n");
752 		}
753 	}
754 
755 	/* AGP v<3 */
756 	agp_device_command(bridge_agpstat, FALSE);
757 }
758 EXPORT_SYMBOL(agp_generic_enable);
759 
760 
761 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
762 {
763 	char *table;
764 	char *table_end;
765 	int size;
766 	int page_order;
767 	int num_entries;
768 	int i;
769 	void *temp;
770 	struct page *page;
771 
772 	/* The generic routines can't handle 2 level gatt's */
773 	if (bridge->driver->size_type == LVL2_APER_SIZE)
774 		return -EINVAL;
775 
776 	table = NULL;
777 	i = bridge->aperture_size_idx;
778 	temp = bridge->current_size;
779 	size = page_order = num_entries = 0;
780 
781 	if (bridge->driver->size_type != FIXED_APER_SIZE) {
782 		do {
783 			switch (bridge->driver->size_type) {
784 			case U8_APER_SIZE:
785 				size = A_SIZE_8(temp)->size;
786 				page_order =
787 				    A_SIZE_8(temp)->page_order;
788 				num_entries =
789 				    A_SIZE_8(temp)->num_entries;
790 				break;
791 			case U16_APER_SIZE:
792 				size = A_SIZE_16(temp)->size;
793 				page_order = A_SIZE_16(temp)->page_order;
794 				num_entries = A_SIZE_16(temp)->num_entries;
795 				break;
796 			case U32_APER_SIZE:
797 				size = A_SIZE_32(temp)->size;
798 				page_order = A_SIZE_32(temp)->page_order;
799 				num_entries = A_SIZE_32(temp)->num_entries;
800 				break;
801 				/* This case will never really happen. */
802 			case FIXED_APER_SIZE:
803 			case LVL2_APER_SIZE:
804 			default:
805 				size = page_order = num_entries = 0;
806 				break;
807 			}
808 
809 			table = alloc_gatt_pages(page_order);
810 
811 			if (table == NULL) {
812 				i++;
813 				switch (bridge->driver->size_type) {
814 				case U8_APER_SIZE:
815 					bridge->current_size = A_IDX8(bridge);
816 					break;
817 				case U16_APER_SIZE:
818 					bridge->current_size = A_IDX16(bridge);
819 					break;
820 				case U32_APER_SIZE:
821 					bridge->current_size = A_IDX32(bridge);
822 					break;
823 				/* These cases will never really happen. */
824 				case FIXED_APER_SIZE:
825 				case LVL2_APER_SIZE:
826 				default:
827 					break;
828 				}
829 				temp = bridge->current_size;
830 			} else {
831 				bridge->aperture_size_idx = i;
832 			}
833 		} while (!table && (i < bridge->driver->num_aperture_sizes));
834 	} else {
835 		size = ((struct aper_size_info_fixed *) temp)->size;
836 		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
837 		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
838 		table = alloc_gatt_pages(page_order);
839 	}
840 
841 	if (table == NULL)
842 		return -ENOMEM;
843 
844 	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
845 
846 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
847 		SetPageReserved(page);
848 
849 	bridge->gatt_table_real = (u32 *) table;
850 	agp_gatt_table = (void *)table;
851 
852 	bridge->driver->cache_flush();
853 	bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
854 					(PAGE_SIZE * (1 << page_order)));
855 	bridge->driver->cache_flush();
856 
857 	if (bridge->gatt_table == NULL) {
858 		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
859 			ClearPageReserved(page);
860 
861 		free_gatt_pages(table, page_order);
862 
863 		return -ENOMEM;
864 	}
865 	bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
866 
867 	/* AK: bogus, should encode addresses > 4GB */
868 	for (i = 0; i < num_entries; i++) {
869 		writel(bridge->scratch_page, bridge->gatt_table+i);
870 		readl(bridge->gatt_table+i);	/* PCI Posting. */
871 	}
872 
873 	return 0;
874 }
875 EXPORT_SYMBOL(agp_generic_create_gatt_table);
876 
877 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
878 {
879 	int page_order;
880 	char *table, *table_end;
881 	void *temp;
882 	struct page *page;
883 
884 	temp = bridge->current_size;
885 
886 	switch (bridge->driver->size_type) {
887 	case U8_APER_SIZE:
888 		page_order = A_SIZE_8(temp)->page_order;
889 		break;
890 	case U16_APER_SIZE:
891 		page_order = A_SIZE_16(temp)->page_order;
892 		break;
893 	case U32_APER_SIZE:
894 		page_order = A_SIZE_32(temp)->page_order;
895 		break;
896 	case FIXED_APER_SIZE:
897 		page_order = A_SIZE_FIX(temp)->page_order;
898 		break;
899 	case LVL2_APER_SIZE:
900 		/* The generic routines can't deal with 2 level gatt's */
901 		return -EINVAL;
902 		break;
903 	default:
904 		page_order = 0;
905 		break;
906 	}
907 
908 	/* Do not worry about freeing memory, because if this is
909 	 * called, then all agp memory is deallocated and removed
910 	 * from the table. */
911 
912 	iounmap(bridge->gatt_table);
913 	table = (char *) bridge->gatt_table_real;
914 	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
915 
916 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
917 		ClearPageReserved(page);
918 
919 	free_gatt_pages(bridge->gatt_table_real, page_order);
920 
921 	agp_gatt_table = NULL;
922 	bridge->gatt_table = NULL;
923 	bridge->gatt_table_real = NULL;
924 	bridge->gatt_bus_addr = 0;
925 
926 	return 0;
927 }
928 EXPORT_SYMBOL(agp_generic_free_gatt_table);
929 
930 
931 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
932 {
933 	int num_entries;
934 	size_t i;
935 	off_t j;
936 	void *temp;
937 	struct agp_bridge_data *bridge;
938 
939 	bridge = mem->bridge;
940 	if (!bridge)
941 		return -EINVAL;
942 
943 	temp = bridge->current_size;
944 
945 	switch (bridge->driver->size_type) {
946 	case U8_APER_SIZE:
947 		num_entries = A_SIZE_8(temp)->num_entries;
948 		break;
949 	case U16_APER_SIZE:
950 		num_entries = A_SIZE_16(temp)->num_entries;
951 		break;
952 	case U32_APER_SIZE:
953 		num_entries = A_SIZE_32(temp)->num_entries;
954 		break;
955 	case FIXED_APER_SIZE:
956 		num_entries = A_SIZE_FIX(temp)->num_entries;
957 		break;
958 	case LVL2_APER_SIZE:
959 		/* The generic routines can't deal with 2 level gatt's */
960 		return -EINVAL;
961 		break;
962 	default:
963 		num_entries = 0;
964 		break;
965 	}
966 
967 	num_entries -= agp_memory_reserved/PAGE_SIZE;
968 	if (num_entries < 0) num_entries = 0;
969 
970 	if (type != 0 || mem->type != 0) {
971 		/* The generic routines know nothing of memory types */
972 		return -EINVAL;
973 	}
974 
975 	/* AK: could wrap */
976 	if ((pg_start + mem->page_count) > num_entries)
977 		return -EINVAL;
978 
979 	j = pg_start;
980 
981 	while (j < (pg_start + mem->page_count)) {
982 		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
983 			return -EBUSY;
984 		j++;
985 	}
986 
987 	if (mem->is_flushed == FALSE) {
988 		bridge->driver->cache_flush();
989 		mem->is_flushed = TRUE;
990 	}
991 
992 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
993 		writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
994 		readl(bridge->gatt_table+j);	/* PCI Posting. */
995 	}
996 
997 	bridge->driver->tlb_flush(mem);
998 	return 0;
999 }
1000 EXPORT_SYMBOL(agp_generic_insert_memory);
1001 
1002 
1003 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1004 {
1005 	size_t i;
1006 	struct agp_bridge_data *bridge;
1007 
1008 	bridge = mem->bridge;
1009 	if (!bridge)
1010 		return -EINVAL;
1011 
1012 	if (type != 0 || mem->type != 0) {
1013 		/* The generic routines know nothing of memory types */
1014 		return -EINVAL;
1015 	}
1016 
1017 	/* AK: bogus, should encode addresses > 4GB */
1018 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1019 		writel(bridge->scratch_page, bridge->gatt_table+i);
1020 		readl(bridge->gatt_table+i);	/* PCI Posting. */
1021 	}
1022 
1023 	global_cache_flush();
1024 	bridge->driver->tlb_flush(mem);
1025 	return 0;
1026 }
1027 EXPORT_SYMBOL(agp_generic_remove_memory);
1028 
1029 
1030 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1031 {
1032 	return NULL;
1033 }
1034 EXPORT_SYMBOL(agp_generic_alloc_by_type);
1035 
1036 
1037 void agp_generic_free_by_type(struct agp_memory *curr)
1038 {
1039 	vfree(curr->memory);
1040 	agp_free_key(curr->key);
1041 	kfree(curr);
1042 }
1043 EXPORT_SYMBOL(agp_generic_free_by_type);
1044 
1045 
1046 /*
1047  * Basic Page Allocation Routines -
1048  * These routines handle page allocation and by default they reserve the allocated
1049  * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1050  * against a maximum value.
1051  */
1052 
1053 void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1054 {
1055 	struct page * page;
1056 
1057 	page = alloc_page(GFP_KERNEL | GFP_DMA32);
1058 	if (page == NULL)
1059 		return NULL;
1060 
1061 	map_page_into_agp(page);
1062 
1063 	get_page(page);
1064 	SetPageLocked(page);
1065 	atomic_inc(&agp_bridge->current_memory_agp);
1066 	return page_address(page);
1067 }
1068 EXPORT_SYMBOL(agp_generic_alloc_page);
1069 
1070 
1071 void agp_generic_destroy_page(void *addr)
1072 {
1073 	struct page *page;
1074 
1075 	if (addr == NULL)
1076 		return;
1077 
1078 	page = virt_to_page(addr);
1079 	unmap_page_from_agp(page);
1080 	put_page(page);
1081 	unlock_page(page);
1082 	free_page((unsigned long)addr);
1083 	atomic_dec(&agp_bridge->current_memory_agp);
1084 }
1085 EXPORT_SYMBOL(agp_generic_destroy_page);
1086 
1087 /* End Basic Page Allocation Routines */
1088 
1089 
1090 /**
1091  * agp_enable  -  initialise the agp point-to-point connection.
1092  *
1093  * @mode:	agp mode register value to configure with.
1094  */
1095 void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1096 {
1097 	if (!bridge)
1098 		return;
1099 	bridge->driver->agp_enable(bridge, mode);
1100 }
1101 EXPORT_SYMBOL(agp_enable);
1102 
1103 /* When we remove the global variable agp_bridge from all drivers
1104  * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1105  */
1106 
1107 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1108 {
1109 	if (list_empty(&agp_bridges))
1110 		return NULL;
1111 
1112 	return agp_bridge;
1113 }
1114 
1115 static void ipi_handler(void *null)
1116 {
1117 	flush_agp_cache();
1118 }
1119 
1120 void global_cache_flush(void)
1121 {
1122 	if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
1123 		panic(PFX "timed out waiting for the other CPUs!\n");
1124 }
1125 EXPORT_SYMBOL(global_cache_flush);
1126 
1127 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1128 	unsigned long addr, int type)
1129 {
1130 	/* memory type is ignored in the generic routine */
1131 	if (bridge->driver->masks)
1132 		return addr | bridge->driver->masks[0].mask;
1133 	else
1134 		return addr;
1135 }
1136 EXPORT_SYMBOL(agp_generic_mask_memory);
1137 
1138 /*
1139  * These functions are implemented according to the AGPv3 spec,
1140  * which covers implementation details that had previously been
1141  * left open.
1142  */
1143 
1144 int agp3_generic_fetch_size(void)
1145 {
1146 	u16 temp_size;
1147 	int i;
1148 	struct aper_size_info_16 *values;
1149 
1150 	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1151 	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1152 
1153 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1154 		if (temp_size == values[i].size_value) {
1155 			agp_bridge->previous_size =
1156 				agp_bridge->current_size = (void *) (values + i);
1157 
1158 			agp_bridge->aperture_size_idx = i;
1159 			return values[i].size;
1160 		}
1161 	}
1162 	return 0;
1163 }
1164 EXPORT_SYMBOL(agp3_generic_fetch_size);
1165 
1166 void agp3_generic_tlbflush(struct agp_memory *mem)
1167 {
1168 	u32 ctrl;
1169 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1170 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1171 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1172 }
1173 EXPORT_SYMBOL(agp3_generic_tlbflush);
1174 
1175 int agp3_generic_configure(void)
1176 {
1177 	u32 temp;
1178 	struct aper_size_info_16 *current_size;
1179 
1180 	current_size = A_SIZE_16(agp_bridge->current_size);
1181 
1182 	pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1183 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1184 
1185 	/* set aperture size */
1186 	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1187 	/* set gart pointer */
1188 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1189 	/* enable aperture and GTLB */
1190 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1191 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1192 	return 0;
1193 }
1194 EXPORT_SYMBOL(agp3_generic_configure);
1195 
1196 void agp3_generic_cleanup(void)
1197 {
1198 	u32 ctrl;
1199 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1200 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1201 }
1202 EXPORT_SYMBOL(agp3_generic_cleanup);
1203 
1204 struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1205 {
1206 	{4096, 1048576, 10,0x000},
1207 	{2048,  524288, 9, 0x800},
1208 	{1024,  262144, 8, 0xc00},
1209 	{ 512,  131072, 7, 0xe00},
1210 	{ 256,   65536, 6, 0xf00},
1211 	{ 128,   32768, 5, 0xf20},
1212 	{  64,   16384, 4, 0xf30},
1213 	{  32,    8192, 3, 0xf38},
1214 	{  16,    4096, 2, 0xf3c},
1215 	{   8,    2048, 1, 0xf3e},
1216 	{   4,    1024, 0, 0xf3f}
1217 };
1218 EXPORT_SYMBOL(agp3_generic_sizes);
1219 
1220