xref: /linux/drivers/char/agp/intel-gtt.c (revision f8324e20f8289dffc646d64366332e05eaacab25)
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17 
18 /*
19  * If we have Intel graphics, we're not going to have anything other than
20  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21  * on the Intel IOMMU support (CONFIG_DMAR).
22  * Only newer chipsets need to bother with this, of course.
23  */
24 #ifdef CONFIG_DMAR
25 #define USE_PCI_DMA_API 1
26 #endif
27 
28 static const struct aper_size_info_fixed intel_i810_sizes[] =
29 {
30 	{64, 16384, 4},
31 	/* The 32M mode still requires a 64k gatt */
32 	{32, 8192, 4}
33 };
34 
35 #define AGP_DCACHE_MEMORY	1
36 #define AGP_PHYS_MEMORY		2
37 #define INTEL_AGP_CACHED_MEMORY 3
38 
39 static struct gatt_mask intel_i810_masks[] =
40 {
41 	{.mask = I810_PTE_VALID, .type = 0},
42 	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43 	{.mask = I810_PTE_VALID, .type = 0},
44 	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45 	 .type = INTEL_AGP_CACHED_MEMORY}
46 };
47 
48 static struct _intel_private {
49 	struct pci_dev *pcidev;	/* device one */
50 	u8 __iomem *registers;
51 	u32 __iomem *gtt;		/* I915G */
52 	int num_dcache_entries;
53 	/* gtt_entries is the number of gtt entries that are already mapped
54 	 * to stolen memory.  Stolen memory is larger than the memory mapped
55 	 * through gtt_entries, as it includes some reserved space for the BIOS
56 	 * popup and for the GTT.
57 	 */
58 	int gtt_entries;			/* i830+ */
59 	int gtt_total_size;
60 	union {
61 		void __iomem *i9xx_flush_page;
62 		void *i8xx_flush_page;
63 	};
64 	struct page *i8xx_page;
65 	struct resource ifp_resource;
66 	int resource_valid;
67 } intel_private;
68 
69 #ifdef USE_PCI_DMA_API
70 static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71 {
72 	*ret = pci_map_page(intel_private.pcidev, page, 0,
73 			    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74 	if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75 		return -EINVAL;
76 	return 0;
77 }
78 
79 static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80 {
81 	pci_unmap_page(intel_private.pcidev, dma,
82 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83 }
84 
85 static void intel_agp_free_sglist(struct agp_memory *mem)
86 {
87 	struct sg_table st;
88 
89 	st.sgl = mem->sg_list;
90 	st.orig_nents = st.nents = mem->page_count;
91 
92 	sg_free_table(&st);
93 
94 	mem->sg_list = NULL;
95 	mem->num_sg = 0;
96 }
97 
98 static int intel_agp_map_memory(struct agp_memory *mem)
99 {
100 	struct sg_table st;
101 	struct scatterlist *sg;
102 	int i;
103 
104 	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105 
106 	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 		return -ENOMEM;
108 
109 	mem->sg_list = sg = st.sgl;
110 
111 	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112 		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113 
114 	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 	if (unlikely(!mem->num_sg)) {
117 		intel_agp_free_sglist(mem);
118 		return -ENOMEM;
119 	}
120 	return 0;
121 }
122 
123 static void intel_agp_unmap_memory(struct agp_memory *mem)
124 {
125 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126 
127 	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128 		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
129 	intel_agp_free_sglist(mem);
130 }
131 
132 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133 					off_t pg_start, int mask_type)
134 {
135 	struct scatterlist *sg;
136 	int i, j;
137 
138 	j = pg_start;
139 
140 	WARN_ON(!mem->num_sg);
141 
142 	if (mem->num_sg == mem->page_count) {
143 		for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144 			writel(agp_bridge->driver->mask_memory(agp_bridge,
145 					sg_dma_address(sg), mask_type),
146 					intel_private.gtt+j);
147 			j++;
148 		}
149 	} else {
150 		/* sg may merge pages, but we have to separate
151 		 * per-page addr for GTT */
152 		unsigned int len, m;
153 
154 		for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155 			len = sg_dma_len(sg) / PAGE_SIZE;
156 			for (m = 0; m < len; m++) {
157 				writel(agp_bridge->driver->mask_memory(agp_bridge,
158 								       sg_dma_address(sg) + m * PAGE_SIZE,
159 								       mask_type),
160 				       intel_private.gtt+j);
161 				j++;
162 			}
163 		}
164 	}
165 	readl(intel_private.gtt+j-1);
166 }
167 
168 #else
169 
170 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171 					off_t pg_start, int mask_type)
172 {
173 	int i, j;
174 	u32 cache_bits = 0;
175 
176 	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 	{
179 		cache_bits = I830_PTE_SYSTEM_CACHED;
180 	}
181 
182 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183 		writel(agp_bridge->driver->mask_memory(agp_bridge,
184 				page_to_phys(mem->pages[i]), mask_type),
185 		       intel_private.gtt+j);
186 	}
187 
188 	readl(intel_private.gtt+j-1);
189 }
190 
191 #endif
192 
193 static int intel_i810_fetch_size(void)
194 {
195 	u32 smram_miscc;
196 	struct aper_size_info_fixed *values;
197 
198 	pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199 	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200 
201 	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202 		dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203 		return 0;
204 	}
205 	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
206 		agp_bridge->current_size = (void *) (values + 1);
207 		agp_bridge->aperture_size_idx = 1;
208 		return values[1].size;
209 	} else {
210 		agp_bridge->current_size = (void *) (values);
211 		agp_bridge->aperture_size_idx = 0;
212 		return values[0].size;
213 	}
214 
215 	return 0;
216 }
217 
218 static int intel_i810_configure(void)
219 {
220 	struct aper_size_info_fixed *current_size;
221 	u32 temp;
222 	int i;
223 
224 	current_size = A_SIZE_FIX(agp_bridge->current_size);
225 
226 	if (!intel_private.registers) {
227 		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
228 		temp &= 0xfff80000;
229 
230 		intel_private.registers = ioremap(temp, 128 * 4096);
231 		if (!intel_private.registers) {
232 			dev_err(&intel_private.pcidev->dev,
233 				"can't remap memory\n");
234 			return -ENOMEM;
235 		}
236 	}
237 
238 	if ((readl(intel_private.registers+I810_DRAM_CTL)
239 		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
240 		/* This will need to be dynamically assigned */
241 		dev_info(&intel_private.pcidev->dev,
242 			 "detected 4MB dedicated video ram\n");
243 		intel_private.num_dcache_entries = 1024;
244 	}
245 	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
246 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
247 	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
248 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
249 
250 	if (agp_bridge->driver->needs_scratch_page) {
251 		for (i = 0; i < current_size->num_entries; i++) {
252 			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
253 		}
254 		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
255 	}
256 	global_cache_flush();
257 	return 0;
258 }
259 
260 static void intel_i810_cleanup(void)
261 {
262 	writel(0, intel_private.registers+I810_PGETBL_CTL);
263 	readl(intel_private.registers);	/* PCI Posting. */
264 	iounmap(intel_private.registers);
265 }
266 
267 static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
268 {
269 	return;
270 }
271 
272 /* Exists to support ARGB cursors */
273 static struct page *i8xx_alloc_pages(void)
274 {
275 	struct page *page;
276 
277 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
278 	if (page == NULL)
279 		return NULL;
280 
281 	if (set_pages_uc(page, 4) < 0) {
282 		set_pages_wb(page, 4);
283 		__free_pages(page, 2);
284 		return NULL;
285 	}
286 	get_page(page);
287 	atomic_inc(&agp_bridge->current_memory_agp);
288 	return page;
289 }
290 
291 static void i8xx_destroy_pages(struct page *page)
292 {
293 	if (page == NULL)
294 		return;
295 
296 	set_pages_wb(page, 4);
297 	put_page(page);
298 	__free_pages(page, 2);
299 	atomic_dec(&agp_bridge->current_memory_agp);
300 }
301 
302 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
303 					int type)
304 {
305 	if (type < AGP_USER_TYPES)
306 		return type;
307 	else if (type == AGP_USER_CACHED_MEMORY)
308 		return INTEL_AGP_CACHED_MEMORY;
309 	else
310 		return 0;
311 }
312 
313 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
314 				int type)
315 {
316 	int i, j, num_entries;
317 	void *temp;
318 	int ret = -EINVAL;
319 	int mask_type;
320 
321 	if (mem->page_count == 0)
322 		goto out;
323 
324 	temp = agp_bridge->current_size;
325 	num_entries = A_SIZE_FIX(temp)->num_entries;
326 
327 	if ((pg_start + mem->page_count) > num_entries)
328 		goto out_err;
329 
330 
331 	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
332 		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
333 			ret = -EBUSY;
334 			goto out_err;
335 		}
336 	}
337 
338 	if (type != mem->type)
339 		goto out_err;
340 
341 	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
342 
343 	switch (mask_type) {
344 	case AGP_DCACHE_MEMORY:
345 		if (!mem->is_flushed)
346 			global_cache_flush();
347 		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
348 			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
349 			       intel_private.registers+I810_PTE_BASE+(i*4));
350 		}
351 		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
352 		break;
353 	case AGP_PHYS_MEMORY:
354 	case AGP_NORMAL_MEMORY:
355 		if (!mem->is_flushed)
356 			global_cache_flush();
357 		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
358 			writel(agp_bridge->driver->mask_memory(agp_bridge,
359 					page_to_phys(mem->pages[i]), mask_type),
360 			       intel_private.registers+I810_PTE_BASE+(j*4));
361 		}
362 		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
363 		break;
364 	default:
365 		goto out_err;
366 	}
367 
368 out:
369 	ret = 0;
370 out_err:
371 	mem->is_flushed = true;
372 	return ret;
373 }
374 
375 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
376 				int type)
377 {
378 	int i;
379 
380 	if (mem->page_count == 0)
381 		return 0;
382 
383 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
384 		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
385 	}
386 	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
387 
388 	return 0;
389 }
390 
391 /*
392  * The i810/i830 requires a physical address to program its mouse
393  * pointer into hardware.
394  * However the Xserver still writes to it through the agp aperture.
395  */
396 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
397 {
398 	struct agp_memory *new;
399 	struct page *page;
400 
401 	switch (pg_count) {
402 	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
403 		break;
404 	case 4:
405 		/* kludge to get 4 physical pages for ARGB cursor */
406 		page = i8xx_alloc_pages();
407 		break;
408 	default:
409 		return NULL;
410 	}
411 
412 	if (page == NULL)
413 		return NULL;
414 
415 	new = agp_create_memory(pg_count);
416 	if (new == NULL)
417 		return NULL;
418 
419 	new->pages[0] = page;
420 	if (pg_count == 4) {
421 		/* kludge to get 4 physical pages for ARGB cursor */
422 		new->pages[1] = new->pages[0] + 1;
423 		new->pages[2] = new->pages[1] + 1;
424 		new->pages[3] = new->pages[2] + 1;
425 	}
426 	new->page_count = pg_count;
427 	new->num_scratch_pages = pg_count;
428 	new->type = AGP_PHYS_MEMORY;
429 	new->physical = page_to_phys(new->pages[0]);
430 	return new;
431 }
432 
433 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
434 {
435 	struct agp_memory *new;
436 
437 	if (type == AGP_DCACHE_MEMORY) {
438 		if (pg_count != intel_private.num_dcache_entries)
439 			return NULL;
440 
441 		new = agp_create_memory(1);
442 		if (new == NULL)
443 			return NULL;
444 
445 		new->type = AGP_DCACHE_MEMORY;
446 		new->page_count = pg_count;
447 		new->num_scratch_pages = 0;
448 		agp_free_page_array(new);
449 		return new;
450 	}
451 	if (type == AGP_PHYS_MEMORY)
452 		return alloc_agpphysmem_i8xx(pg_count, type);
453 	return NULL;
454 }
455 
456 static void intel_i810_free_by_type(struct agp_memory *curr)
457 {
458 	agp_free_key(curr->key);
459 	if (curr->type == AGP_PHYS_MEMORY) {
460 		if (curr->page_count == 4)
461 			i8xx_destroy_pages(curr->pages[0]);
462 		else {
463 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
464 							     AGP_PAGE_DESTROY_UNMAP);
465 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
466 							     AGP_PAGE_DESTROY_FREE);
467 		}
468 		agp_free_page_array(curr);
469 	}
470 	kfree(curr);
471 }
472 
473 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
474 					    dma_addr_t addr, int type)
475 {
476 	/* Type checking must be done elsewhere */
477 	return addr | bridge->driver->masks[type].mask;
478 }
479 
480 static struct aper_size_info_fixed intel_i830_sizes[] =
481 {
482 	{128, 32768, 5},
483 	/* The 64M mode still requires a 128k gatt */
484 	{64, 16384, 5},
485 	{256, 65536, 6},
486 	{512, 131072, 7},
487 };
488 
489 static void intel_i830_init_gtt_entries(void)
490 {
491 	u16 gmch_ctrl;
492 	int gtt_entries = 0;
493 	u8 rdct;
494 	int local = 0;
495 	static const int ddt[4] = { 0, 16, 32, 64 };
496 	int size; /* reserved space (in kb) at the top of stolen memory */
497 
498 	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
499 
500 	if (IS_I965) {
501 		u32 pgetbl_ctl;
502 		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
503 
504 		/* The 965 has a field telling us the size of the GTT,
505 		 * which may be larger than what is necessary to map the
506 		 * aperture.
507 		 */
508 		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
509 		case I965_PGETBL_SIZE_128KB:
510 			size = 128;
511 			break;
512 		case I965_PGETBL_SIZE_256KB:
513 			size = 256;
514 			break;
515 		case I965_PGETBL_SIZE_512KB:
516 			size = 512;
517 			break;
518 		case I965_PGETBL_SIZE_1MB:
519 			size = 1024;
520 			break;
521 		case I965_PGETBL_SIZE_2MB:
522 			size = 2048;
523 			break;
524 		case I965_PGETBL_SIZE_1_5MB:
525 			size = 1024 + 512;
526 			break;
527 		default:
528 			dev_info(&intel_private.pcidev->dev,
529 				 "unknown page table size, assuming 512KB\n");
530 			size = 512;
531 		}
532 		size += 4; /* add in BIOS popup space */
533 	} else if (IS_G33 && !IS_PINEVIEW) {
534 	/* G33's GTT size defined in gmch_ctrl */
535 		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
536 		case G33_PGETBL_SIZE_1M:
537 			size = 1024;
538 			break;
539 		case G33_PGETBL_SIZE_2M:
540 			size = 2048;
541 			break;
542 		default:
543 			dev_info(&agp_bridge->dev->dev,
544 				 "unknown page table size 0x%x, assuming 512KB\n",
545 				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
546 			size = 512;
547 		}
548 		size += 4;
549 	} else if (IS_G4X || IS_PINEVIEW) {
550 		/* On 4 series hardware, GTT stolen is separate from graphics
551 		 * stolen, ignore it in stolen gtt entries counting.  However,
552 		 * 4KB of the stolen memory doesn't get mapped to the GTT.
553 		 */
554 		size = 4;
555 	} else {
556 		/* On previous hardware, the GTT size was just what was
557 		 * required to map the aperture.
558 		 */
559 		size = agp_bridge->driver->fetch_size() + 4;
560 	}
561 
562 	if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
563 	    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
564 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
565 		case I830_GMCH_GMS_STOLEN_512:
566 			gtt_entries = KB(512) - KB(size);
567 			break;
568 		case I830_GMCH_GMS_STOLEN_1024:
569 			gtt_entries = MB(1) - KB(size);
570 			break;
571 		case I830_GMCH_GMS_STOLEN_8192:
572 			gtt_entries = MB(8) - KB(size);
573 			break;
574 		case I830_GMCH_GMS_LOCAL:
575 			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
576 			gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
577 					MB(ddt[I830_RDRAM_DDT(rdct)]);
578 			local = 1;
579 			break;
580 		default:
581 			gtt_entries = 0;
582 			break;
583 		}
584 	} else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
585 		   agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
586 		/*
587 		 * SandyBridge has new memory control reg at 0x50.w
588 		 */
589 		u16 snb_gmch_ctl;
590 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
591 		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
592 		case SNB_GMCH_GMS_STOLEN_32M:
593 			gtt_entries = MB(32) - KB(size);
594 			break;
595 		case SNB_GMCH_GMS_STOLEN_64M:
596 			gtt_entries = MB(64) - KB(size);
597 			break;
598 		case SNB_GMCH_GMS_STOLEN_96M:
599 			gtt_entries = MB(96) - KB(size);
600 			break;
601 		case SNB_GMCH_GMS_STOLEN_128M:
602 			gtt_entries = MB(128) - KB(size);
603 			break;
604 		case SNB_GMCH_GMS_STOLEN_160M:
605 			gtt_entries = MB(160) - KB(size);
606 			break;
607 		case SNB_GMCH_GMS_STOLEN_192M:
608 			gtt_entries = MB(192) - KB(size);
609 			break;
610 		case SNB_GMCH_GMS_STOLEN_224M:
611 			gtt_entries = MB(224) - KB(size);
612 			break;
613 		case SNB_GMCH_GMS_STOLEN_256M:
614 			gtt_entries = MB(256) - KB(size);
615 			break;
616 		case SNB_GMCH_GMS_STOLEN_288M:
617 			gtt_entries = MB(288) - KB(size);
618 			break;
619 		case SNB_GMCH_GMS_STOLEN_320M:
620 			gtt_entries = MB(320) - KB(size);
621 			break;
622 		case SNB_GMCH_GMS_STOLEN_352M:
623 			gtt_entries = MB(352) - KB(size);
624 			break;
625 		case SNB_GMCH_GMS_STOLEN_384M:
626 			gtt_entries = MB(384) - KB(size);
627 			break;
628 		case SNB_GMCH_GMS_STOLEN_416M:
629 			gtt_entries = MB(416) - KB(size);
630 			break;
631 		case SNB_GMCH_GMS_STOLEN_448M:
632 			gtt_entries = MB(448) - KB(size);
633 			break;
634 		case SNB_GMCH_GMS_STOLEN_480M:
635 			gtt_entries = MB(480) - KB(size);
636 			break;
637 		case SNB_GMCH_GMS_STOLEN_512M:
638 			gtt_entries = MB(512) - KB(size);
639 			break;
640 		}
641 	} else {
642 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
643 		case I855_GMCH_GMS_STOLEN_1M:
644 			gtt_entries = MB(1) - KB(size);
645 			break;
646 		case I855_GMCH_GMS_STOLEN_4M:
647 			gtt_entries = MB(4) - KB(size);
648 			break;
649 		case I855_GMCH_GMS_STOLEN_8M:
650 			gtt_entries = MB(8) - KB(size);
651 			break;
652 		case I855_GMCH_GMS_STOLEN_16M:
653 			gtt_entries = MB(16) - KB(size);
654 			break;
655 		case I855_GMCH_GMS_STOLEN_32M:
656 			gtt_entries = MB(32) - KB(size);
657 			break;
658 		case I915_GMCH_GMS_STOLEN_48M:
659 			/* Check it's really I915G */
660 			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
661 				gtt_entries = MB(48) - KB(size);
662 			else
663 				gtt_entries = 0;
664 			break;
665 		case I915_GMCH_GMS_STOLEN_64M:
666 			/* Check it's really I915G */
667 			if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
668 				gtt_entries = MB(64) - KB(size);
669 			else
670 				gtt_entries = 0;
671 			break;
672 		case G33_GMCH_GMS_STOLEN_128M:
673 			if (IS_G33 || IS_I965 || IS_G4X)
674 				gtt_entries = MB(128) - KB(size);
675 			else
676 				gtt_entries = 0;
677 			break;
678 		case G33_GMCH_GMS_STOLEN_256M:
679 			if (IS_G33 || IS_I965 || IS_G4X)
680 				gtt_entries = MB(256) - KB(size);
681 			else
682 				gtt_entries = 0;
683 			break;
684 		case INTEL_GMCH_GMS_STOLEN_96M:
685 			if (IS_I965 || IS_G4X)
686 				gtt_entries = MB(96) - KB(size);
687 			else
688 				gtt_entries = 0;
689 			break;
690 		case INTEL_GMCH_GMS_STOLEN_160M:
691 			if (IS_I965 || IS_G4X)
692 				gtt_entries = MB(160) - KB(size);
693 			else
694 				gtt_entries = 0;
695 			break;
696 		case INTEL_GMCH_GMS_STOLEN_224M:
697 			if (IS_I965 || IS_G4X)
698 				gtt_entries = MB(224) - KB(size);
699 			else
700 				gtt_entries = 0;
701 			break;
702 		case INTEL_GMCH_GMS_STOLEN_352M:
703 			if (IS_I965 || IS_G4X)
704 				gtt_entries = MB(352) - KB(size);
705 			else
706 				gtt_entries = 0;
707 			break;
708 		default:
709 			gtt_entries = 0;
710 			break;
711 		}
712 	}
713 	if (gtt_entries > 0) {
714 		dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
715 		       gtt_entries / KB(1), local ? "local" : "stolen");
716 		gtt_entries /= KB(4);
717 	} else {
718 		dev_info(&agp_bridge->dev->dev,
719 		       "no pre-allocated video memory detected\n");
720 		gtt_entries = 0;
721 	}
722 
723 	intel_private.gtt_entries = gtt_entries;
724 }
725 
726 static void intel_i830_fini_flush(void)
727 {
728 	kunmap(intel_private.i8xx_page);
729 	intel_private.i8xx_flush_page = NULL;
730 	unmap_page_from_agp(intel_private.i8xx_page);
731 
732 	__free_page(intel_private.i8xx_page);
733 	intel_private.i8xx_page = NULL;
734 }
735 
736 static void intel_i830_setup_flush(void)
737 {
738 	/* return if we've already set the flush mechanism up */
739 	if (intel_private.i8xx_page)
740 		return;
741 
742 	intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
743 	if (!intel_private.i8xx_page)
744 		return;
745 
746 	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
747 	if (!intel_private.i8xx_flush_page)
748 		intel_i830_fini_flush();
749 }
750 
751 /* The chipset_flush interface needs to get data that has already been
752  * flushed out of the CPU all the way out to main memory, because the GPU
753  * doesn't snoop those buffers.
754  *
755  * The 8xx series doesn't have the same lovely interface for flushing the
756  * chipset write buffers that the later chips do. According to the 865
757  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
758  * that buffer out, we just fill 1KB and clflush it out, on the assumption
759  * that it'll push whatever was in there out.  It appears to work.
760  */
761 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
762 {
763 	unsigned int *pg = intel_private.i8xx_flush_page;
764 
765 	memset(pg, 0, 1024);
766 
767 	if (cpu_has_clflush)
768 		clflush_cache_range(pg, 1024);
769 	else if (wbinvd_on_all_cpus() != 0)
770 		printk(KERN_ERR "Timed out waiting for cache flush.\n");
771 }
772 
773 /* The intel i830 automatically initializes the agp aperture during POST.
774  * Use the memory already set aside for in the GTT.
775  */
776 static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
777 {
778 	int page_order;
779 	struct aper_size_info_fixed *size;
780 	int num_entries;
781 	u32 temp;
782 
783 	size = agp_bridge->current_size;
784 	page_order = size->page_order;
785 	num_entries = size->num_entries;
786 	agp_bridge->gatt_table_real = NULL;
787 
788 	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
789 	temp &= 0xfff80000;
790 
791 	intel_private.registers = ioremap(temp, 128 * 4096);
792 	if (!intel_private.registers)
793 		return -ENOMEM;
794 
795 	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
796 	global_cache_flush();	/* FIXME: ?? */
797 
798 	/* we have to call this as early as possible after the MMIO base address is known */
799 	intel_i830_init_gtt_entries();
800 
801 	agp_bridge->gatt_table = NULL;
802 
803 	agp_bridge->gatt_bus_addr = temp;
804 
805 	return 0;
806 }
807 
808 /* Return the gatt table to a sane state. Use the top of stolen
809  * memory for the GTT.
810  */
811 static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
812 {
813 	return 0;
814 }
815 
816 static int intel_i830_fetch_size(void)
817 {
818 	u16 gmch_ctrl;
819 	struct aper_size_info_fixed *values;
820 
821 	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
822 
823 	if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
824 	    agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
825 		/* 855GM/852GM/865G has 128MB aperture size */
826 		agp_bridge->current_size = (void *) values;
827 		agp_bridge->aperture_size_idx = 0;
828 		return values[0].size;
829 	}
830 
831 	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
832 
833 	if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
834 		agp_bridge->current_size = (void *) values;
835 		agp_bridge->aperture_size_idx = 0;
836 		return values[0].size;
837 	} else {
838 		agp_bridge->current_size = (void *) (values + 1);
839 		agp_bridge->aperture_size_idx = 1;
840 		return values[1].size;
841 	}
842 
843 	return 0;
844 }
845 
846 static int intel_i830_configure(void)
847 {
848 	struct aper_size_info_fixed *current_size;
849 	u32 temp;
850 	u16 gmch_ctrl;
851 	int i;
852 
853 	current_size = A_SIZE_FIX(agp_bridge->current_size);
854 
855 	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
856 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
857 
858 	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
859 	gmch_ctrl |= I830_GMCH_ENABLED;
860 	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
861 
862 	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
863 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
864 
865 	if (agp_bridge->driver->needs_scratch_page) {
866 		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
867 			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
868 		}
869 		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI Posting. */
870 	}
871 
872 	global_cache_flush();
873 
874 	intel_i830_setup_flush();
875 	return 0;
876 }
877 
878 static void intel_i830_cleanup(void)
879 {
880 	iounmap(intel_private.registers);
881 }
882 
883 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
884 				     int type)
885 {
886 	int i, j, num_entries;
887 	void *temp;
888 	int ret = -EINVAL;
889 	int mask_type;
890 
891 	if (mem->page_count == 0)
892 		goto out;
893 
894 	temp = agp_bridge->current_size;
895 	num_entries = A_SIZE_FIX(temp)->num_entries;
896 
897 	if (pg_start < intel_private.gtt_entries) {
898 		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
899 			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
900 			   pg_start, intel_private.gtt_entries);
901 
902 		dev_info(&intel_private.pcidev->dev,
903 			 "trying to insert into local/stolen memory\n");
904 		goto out_err;
905 	}
906 
907 	if ((pg_start + mem->page_count) > num_entries)
908 		goto out_err;
909 
910 	/* The i830 can't check the GTT for entries since its read only,
911 	 * depend on the caller to make the correct offset decisions.
912 	 */
913 
914 	if (type != mem->type)
915 		goto out_err;
916 
917 	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
918 
919 	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
920 	    mask_type != INTEL_AGP_CACHED_MEMORY)
921 		goto out_err;
922 
923 	if (!mem->is_flushed)
924 		global_cache_flush();
925 
926 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
927 		writel(agp_bridge->driver->mask_memory(agp_bridge,
928 				page_to_phys(mem->pages[i]), mask_type),
929 		       intel_private.registers+I810_PTE_BASE+(j*4));
930 	}
931 	readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
932 
933 out:
934 	ret = 0;
935 out_err:
936 	mem->is_flushed = true;
937 	return ret;
938 }
939 
940 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
941 				     int type)
942 {
943 	int i;
944 
945 	if (mem->page_count == 0)
946 		return 0;
947 
948 	if (pg_start < intel_private.gtt_entries) {
949 		dev_info(&intel_private.pcidev->dev,
950 			 "trying to disable local/stolen memory\n");
951 		return -EINVAL;
952 	}
953 
954 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
955 		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
956 	}
957 	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
958 
959 	return 0;
960 }
961 
962 static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
963 {
964 	if (type == AGP_PHYS_MEMORY)
965 		return alloc_agpphysmem_i8xx(pg_count, type);
966 	/* always return NULL for other allocation types for now */
967 	return NULL;
968 }
969 
970 static int intel_alloc_chipset_flush_resource(void)
971 {
972 	int ret;
973 	ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
974 				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
975 				     pcibios_align_resource, agp_bridge->dev);
976 
977 	return ret;
978 }
979 
980 static void intel_i915_setup_chipset_flush(void)
981 {
982 	int ret;
983 	u32 temp;
984 
985 	pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
986 	if (!(temp & 0x1)) {
987 		intel_alloc_chipset_flush_resource();
988 		intel_private.resource_valid = 1;
989 		pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
990 	} else {
991 		temp &= ~1;
992 
993 		intel_private.resource_valid = 1;
994 		intel_private.ifp_resource.start = temp;
995 		intel_private.ifp_resource.end = temp + PAGE_SIZE;
996 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
997 		/* some BIOSes reserve this area in a pnp some don't */
998 		if (ret)
999 			intel_private.resource_valid = 0;
1000 	}
1001 }
1002 
1003 static void intel_i965_g33_setup_chipset_flush(void)
1004 {
1005 	u32 temp_hi, temp_lo;
1006 	int ret;
1007 
1008 	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1009 	pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1010 
1011 	if (!(temp_lo & 0x1)) {
1012 
1013 		intel_alloc_chipset_flush_resource();
1014 
1015 		intel_private.resource_valid = 1;
1016 		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1017 			upper_32_bits(intel_private.ifp_resource.start));
1018 		pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1019 	} else {
1020 		u64 l64;
1021 
1022 		temp_lo &= ~0x1;
1023 		l64 = ((u64)temp_hi << 32) | temp_lo;
1024 
1025 		intel_private.resource_valid = 1;
1026 		intel_private.ifp_resource.start = l64;
1027 		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1028 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1029 		/* some BIOSes reserve this area in a pnp some don't */
1030 		if (ret)
1031 			intel_private.resource_valid = 0;
1032 	}
1033 }
1034 
1035 static void intel_i9xx_setup_flush(void)
1036 {
1037 	/* return if already configured */
1038 	if (intel_private.ifp_resource.start)
1039 		return;
1040 
1041 	if (IS_SNB)
1042 		return;
1043 
1044 	/* setup a resource for this object */
1045 	intel_private.ifp_resource.name = "Intel Flush Page";
1046 	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1047 
1048 	/* Setup chipset flush for 915 */
1049 	if (IS_I965 || IS_G33 || IS_G4X) {
1050 		intel_i965_g33_setup_chipset_flush();
1051 	} else {
1052 		intel_i915_setup_chipset_flush();
1053 	}
1054 
1055 	if (intel_private.ifp_resource.start) {
1056 		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1057 		if (!intel_private.i9xx_flush_page)
1058 			dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1059 	}
1060 }
1061 
1062 static int intel_i9xx_configure(void)
1063 {
1064 	struct aper_size_info_fixed *current_size;
1065 	u32 temp;
1066 	u16 gmch_ctrl;
1067 	int i;
1068 
1069 	current_size = A_SIZE_FIX(agp_bridge->current_size);
1070 
1071 	pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1072 
1073 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1074 
1075 	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1076 	gmch_ctrl |= I830_GMCH_ENABLED;
1077 	pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1078 
1079 	writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1080 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
1081 
1082 	if (agp_bridge->driver->needs_scratch_page) {
1083 		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1084 			writel(agp_bridge->scratch_page, intel_private.gtt+i);
1085 		}
1086 		readl(intel_private.gtt+i-1);	/* PCI Posting. */
1087 	}
1088 
1089 	global_cache_flush();
1090 
1091 	intel_i9xx_setup_flush();
1092 
1093 	return 0;
1094 }
1095 
1096 static void intel_i915_cleanup(void)
1097 {
1098 	if (intel_private.i9xx_flush_page)
1099 		iounmap(intel_private.i9xx_flush_page);
1100 	if (intel_private.resource_valid)
1101 		release_resource(&intel_private.ifp_resource);
1102 	intel_private.ifp_resource.start = 0;
1103 	intel_private.resource_valid = 0;
1104 	iounmap(intel_private.gtt);
1105 	iounmap(intel_private.registers);
1106 }
1107 
1108 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1109 {
1110 	if (intel_private.i9xx_flush_page)
1111 		writel(1, intel_private.i9xx_flush_page);
1112 }
1113 
1114 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1115 				     int type)
1116 {
1117 	int num_entries;
1118 	void *temp;
1119 	int ret = -EINVAL;
1120 	int mask_type;
1121 
1122 	if (mem->page_count == 0)
1123 		goto out;
1124 
1125 	temp = agp_bridge->current_size;
1126 	num_entries = A_SIZE_FIX(temp)->num_entries;
1127 
1128 	if (pg_start < intel_private.gtt_entries) {
1129 		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1130 			   "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1131 			   pg_start, intel_private.gtt_entries);
1132 
1133 		dev_info(&intel_private.pcidev->dev,
1134 			 "trying to insert into local/stolen memory\n");
1135 		goto out_err;
1136 	}
1137 
1138 	if ((pg_start + mem->page_count) > num_entries)
1139 		goto out_err;
1140 
1141 	/* The i915 can't check the GTT for entries since it's read only;
1142 	 * depend on the caller to make the correct offset decisions.
1143 	 */
1144 
1145 	if (type != mem->type)
1146 		goto out_err;
1147 
1148 	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1149 
1150 	if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1151 	    mask_type != INTEL_AGP_CACHED_MEMORY)
1152 		goto out_err;
1153 
1154 	if (!mem->is_flushed)
1155 		global_cache_flush();
1156 
1157 	intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1158 
1159  out:
1160 	ret = 0;
1161  out_err:
1162 	mem->is_flushed = true;
1163 	return ret;
1164 }
1165 
1166 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1167 				     int type)
1168 {
1169 	int i;
1170 
1171 	if (mem->page_count == 0)
1172 		return 0;
1173 
1174 	if (pg_start < intel_private.gtt_entries) {
1175 		dev_info(&intel_private.pcidev->dev,
1176 			 "trying to disable local/stolen memory\n");
1177 		return -EINVAL;
1178 	}
1179 
1180 	for (i = pg_start; i < (mem->page_count + pg_start); i++)
1181 		writel(agp_bridge->scratch_page, intel_private.gtt+i);
1182 
1183 	readl(intel_private.gtt+i-1);
1184 
1185 	return 0;
1186 }
1187 
1188 /* Return the aperture size by just checking the resource length.  The effect
1189  * described in the spec of the MSAC registers is just changing of the
1190  * resource size.
1191  */
1192 static int intel_i9xx_fetch_size(void)
1193 {
1194 	int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1195 	int aper_size; /* size in megabytes */
1196 	int i;
1197 
1198 	aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1199 
1200 	for (i = 0; i < num_sizes; i++) {
1201 		if (aper_size == intel_i830_sizes[i].size) {
1202 			agp_bridge->current_size = intel_i830_sizes + i;
1203 			return aper_size;
1204 		}
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int intel_i915_get_gtt_size(void)
1211 {
1212 	int size;
1213 
1214 	if (IS_G33) {
1215 		u16 gmch_ctrl;
1216 
1217 		/* G33's GTT size defined in gmch_ctrl */
1218 		pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1219 		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
1220 		case G33_PGETBL_SIZE_1M:
1221 			size = 1024;
1222 			break;
1223 		case G33_PGETBL_SIZE_2M:
1224 			size = 2048;
1225 			break;
1226 		default:
1227 			dev_info(&agp_bridge->dev->dev,
1228 				 "unknown page table size 0x%x, assuming 512KB\n",
1229 				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
1230 			size = 512;
1231 		}
1232 	} else {
1233 		/* On previous hardware, the GTT size was just what was
1234 		 * required to map the aperture.
1235 		 */
1236 		size = agp_bridge->driver->fetch_size();
1237 	}
1238 
1239 	return KB(size);
1240 }
1241 
1242 /* The intel i915 automatically initializes the agp aperture during POST.
1243  * Use the memory already set aside for in the GTT.
1244  */
1245 static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1246 {
1247 	int page_order;
1248 	struct aper_size_info_fixed *size;
1249 	int num_entries;
1250 	u32 temp, temp2;
1251 	int gtt_map_size;
1252 
1253 	size = agp_bridge->current_size;
1254 	page_order = size->page_order;
1255 	num_entries = size->num_entries;
1256 	agp_bridge->gatt_table_real = NULL;
1257 
1258 	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1259 	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1260 
1261 	gtt_map_size = intel_i915_get_gtt_size();
1262 
1263 	intel_private.gtt = ioremap(temp2, gtt_map_size);
1264 	if (!intel_private.gtt)
1265 		return -ENOMEM;
1266 
1267 	intel_private.gtt_total_size = gtt_map_size / 4;
1268 
1269 	temp &= 0xfff80000;
1270 
1271 	intel_private.registers = ioremap(temp, 128 * 4096);
1272 	if (!intel_private.registers) {
1273 		iounmap(intel_private.gtt);
1274 		return -ENOMEM;
1275 	}
1276 
1277 	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1278 	global_cache_flush();	/* FIXME: ? */
1279 
1280 	/* we have to call this as early as possible after the MMIO base address is known */
1281 	intel_i830_init_gtt_entries();
1282 
1283 	agp_bridge->gatt_table = NULL;
1284 
1285 	agp_bridge->gatt_bus_addr = temp;
1286 
1287 	return 0;
1288 }
1289 
1290 /*
1291  * The i965 supports 36-bit physical addresses, but to keep
1292  * the format of the GTT the same, the bits that don't fit
1293  * in a 32-bit word are shifted down to bits 4..7.
1294  *
1295  * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1296  * is always zero on 32-bit architectures, so no need to make
1297  * this conditional.
1298  */
1299 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1300 					    dma_addr_t addr, int type)
1301 {
1302 	/* Shift high bits down */
1303 	addr |= (addr >> 28) & 0xf0;
1304 
1305 	/* Type checking must be done elsewhere */
1306 	return addr | bridge->driver->masks[type].mask;
1307 }
1308 
1309 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1310 {
1311 	u16 snb_gmch_ctl;
1312 
1313 	switch (agp_bridge->dev->device) {
1314 	case PCI_DEVICE_ID_INTEL_GM45_HB:
1315 	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1316 	case PCI_DEVICE_ID_INTEL_Q45_HB:
1317 	case PCI_DEVICE_ID_INTEL_G45_HB:
1318 	case PCI_DEVICE_ID_INTEL_G41_HB:
1319 	case PCI_DEVICE_ID_INTEL_B43_HB:
1320 	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1321 	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1322 	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1323 	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1324 		*gtt_offset = *gtt_size = MB(2);
1325 		break;
1326 	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1327 	case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1328 		*gtt_offset = MB(2);
1329 
1330 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1331 		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1332 		default:
1333 		case SNB_GTT_SIZE_0M:
1334 			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1335 			*gtt_size = MB(0);
1336 			break;
1337 		case SNB_GTT_SIZE_1M:
1338 			*gtt_size = MB(1);
1339 			break;
1340 		case SNB_GTT_SIZE_2M:
1341 			*gtt_size = MB(2);
1342 			break;
1343 		}
1344 		break;
1345 	default:
1346 		*gtt_offset = *gtt_size = KB(512);
1347 	}
1348 }
1349 
1350 /* The intel i965 automatically initializes the agp aperture during POST.
1351  * Use the memory already set aside for in the GTT.
1352  */
1353 static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1354 {
1355 	int page_order;
1356 	struct aper_size_info_fixed *size;
1357 	int num_entries;
1358 	u32 temp;
1359 	int gtt_offset, gtt_size;
1360 
1361 	size = agp_bridge->current_size;
1362 	page_order = size->page_order;
1363 	num_entries = size->num_entries;
1364 	agp_bridge->gatt_table_real = NULL;
1365 
1366 	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1367 
1368 	temp &= 0xfff00000;
1369 
1370 	intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1371 
1372 	intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1373 
1374 	if (!intel_private.gtt)
1375 		return -ENOMEM;
1376 
1377 	intel_private.gtt_total_size = gtt_size / 4;
1378 
1379 	intel_private.registers = ioremap(temp, 128 * 4096);
1380 	if (!intel_private.registers) {
1381 		iounmap(intel_private.gtt);
1382 		return -ENOMEM;
1383 	}
1384 
1385 	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1386 	global_cache_flush();   /* FIXME: ? */
1387 
1388 	/* we have to call this as early as possible after the MMIO base address is known */
1389 	intel_i830_init_gtt_entries();
1390 
1391 	agp_bridge->gatt_table = NULL;
1392 
1393 	agp_bridge->gatt_bus_addr = temp;
1394 
1395 	return 0;
1396 }
1397 
1398 static const struct agp_bridge_driver intel_810_driver = {
1399 	.owner			= THIS_MODULE,
1400 	.aperture_sizes		= intel_i810_sizes,
1401 	.size_type		= FIXED_APER_SIZE,
1402 	.num_aperture_sizes	= 2,
1403 	.needs_scratch_page	= true,
1404 	.configure		= intel_i810_configure,
1405 	.fetch_size		= intel_i810_fetch_size,
1406 	.cleanup		= intel_i810_cleanup,
1407 	.mask_memory		= intel_i810_mask_memory,
1408 	.masks			= intel_i810_masks,
1409 	.agp_enable		= intel_i810_agp_enable,
1410 	.cache_flush		= global_cache_flush,
1411 	.create_gatt_table	= agp_generic_create_gatt_table,
1412 	.free_gatt_table	= agp_generic_free_gatt_table,
1413 	.insert_memory		= intel_i810_insert_entries,
1414 	.remove_memory		= intel_i810_remove_entries,
1415 	.alloc_by_type		= intel_i810_alloc_by_type,
1416 	.free_by_type		= intel_i810_free_by_type,
1417 	.agp_alloc_page		= agp_generic_alloc_page,
1418 	.agp_alloc_pages        = agp_generic_alloc_pages,
1419 	.agp_destroy_page	= agp_generic_destroy_page,
1420 	.agp_destroy_pages      = agp_generic_destroy_pages,
1421 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1422 };
1423 
1424 static const struct agp_bridge_driver intel_830_driver = {
1425 	.owner			= THIS_MODULE,
1426 	.aperture_sizes		= intel_i830_sizes,
1427 	.size_type		= FIXED_APER_SIZE,
1428 	.num_aperture_sizes	= 4,
1429 	.needs_scratch_page	= true,
1430 	.configure		= intel_i830_configure,
1431 	.fetch_size		= intel_i830_fetch_size,
1432 	.cleanup		= intel_i830_cleanup,
1433 	.mask_memory		= intel_i810_mask_memory,
1434 	.masks			= intel_i810_masks,
1435 	.agp_enable		= intel_i810_agp_enable,
1436 	.cache_flush		= global_cache_flush,
1437 	.create_gatt_table	= intel_i830_create_gatt_table,
1438 	.free_gatt_table	= intel_i830_free_gatt_table,
1439 	.insert_memory		= intel_i830_insert_entries,
1440 	.remove_memory		= intel_i830_remove_entries,
1441 	.alloc_by_type		= intel_i830_alloc_by_type,
1442 	.free_by_type		= intel_i810_free_by_type,
1443 	.agp_alloc_page		= agp_generic_alloc_page,
1444 	.agp_alloc_pages        = agp_generic_alloc_pages,
1445 	.agp_destroy_page	= agp_generic_destroy_page,
1446 	.agp_destroy_pages      = agp_generic_destroy_pages,
1447 	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1448 	.chipset_flush		= intel_i830_chipset_flush,
1449 };
1450 
1451 static const struct agp_bridge_driver intel_915_driver = {
1452 	.owner			= THIS_MODULE,
1453 	.aperture_sizes		= intel_i830_sizes,
1454 	.size_type		= FIXED_APER_SIZE,
1455 	.num_aperture_sizes	= 4,
1456 	.needs_scratch_page	= true,
1457 	.configure		= intel_i9xx_configure,
1458 	.fetch_size		= intel_i9xx_fetch_size,
1459 	.cleanup		= intel_i915_cleanup,
1460 	.mask_memory		= intel_i810_mask_memory,
1461 	.masks			= intel_i810_masks,
1462 	.agp_enable		= intel_i810_agp_enable,
1463 	.cache_flush		= global_cache_flush,
1464 	.create_gatt_table	= intel_i915_create_gatt_table,
1465 	.free_gatt_table	= intel_i830_free_gatt_table,
1466 	.insert_memory		= intel_i915_insert_entries,
1467 	.remove_memory		= intel_i915_remove_entries,
1468 	.alloc_by_type		= intel_i830_alloc_by_type,
1469 	.free_by_type		= intel_i810_free_by_type,
1470 	.agp_alloc_page		= agp_generic_alloc_page,
1471 	.agp_alloc_pages        = agp_generic_alloc_pages,
1472 	.agp_destroy_page	= agp_generic_destroy_page,
1473 	.agp_destroy_pages      = agp_generic_destroy_pages,
1474 	.agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1475 	.chipset_flush		= intel_i915_chipset_flush,
1476 #ifdef USE_PCI_DMA_API
1477 	.agp_map_page		= intel_agp_map_page,
1478 	.agp_unmap_page		= intel_agp_unmap_page,
1479 	.agp_map_memory		= intel_agp_map_memory,
1480 	.agp_unmap_memory	= intel_agp_unmap_memory,
1481 #endif
1482 };
1483 
1484 static const struct agp_bridge_driver intel_i965_driver = {
1485 	.owner			= THIS_MODULE,
1486 	.aperture_sizes		= intel_i830_sizes,
1487 	.size_type		= FIXED_APER_SIZE,
1488 	.num_aperture_sizes	= 4,
1489 	.needs_scratch_page	= true,
1490 	.configure		= intel_i9xx_configure,
1491 	.fetch_size		= intel_i9xx_fetch_size,
1492 	.cleanup		= intel_i915_cleanup,
1493 	.mask_memory		= intel_i965_mask_memory,
1494 	.masks			= intel_i810_masks,
1495 	.agp_enable		= intel_i810_agp_enable,
1496 	.cache_flush		= global_cache_flush,
1497 	.create_gatt_table	= intel_i965_create_gatt_table,
1498 	.free_gatt_table	= intel_i830_free_gatt_table,
1499 	.insert_memory		= intel_i915_insert_entries,
1500 	.remove_memory		= intel_i915_remove_entries,
1501 	.alloc_by_type		= intel_i830_alloc_by_type,
1502 	.free_by_type		= intel_i810_free_by_type,
1503 	.agp_alloc_page		= agp_generic_alloc_page,
1504 	.agp_alloc_pages        = agp_generic_alloc_pages,
1505 	.agp_destroy_page	= agp_generic_destroy_page,
1506 	.agp_destroy_pages      = agp_generic_destroy_pages,
1507 	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
1508 	.chipset_flush		= intel_i915_chipset_flush,
1509 #ifdef USE_PCI_DMA_API
1510 	.agp_map_page		= intel_agp_map_page,
1511 	.agp_unmap_page		= intel_agp_unmap_page,
1512 	.agp_map_memory		= intel_agp_map_memory,
1513 	.agp_unmap_memory	= intel_agp_unmap_memory,
1514 #endif
1515 };
1516 
1517 static const struct agp_bridge_driver intel_g33_driver = {
1518 	.owner			= THIS_MODULE,
1519 	.aperture_sizes		= intel_i830_sizes,
1520 	.size_type		= FIXED_APER_SIZE,
1521 	.num_aperture_sizes	= 4,
1522 	.needs_scratch_page	= true,
1523 	.configure		= intel_i9xx_configure,
1524 	.fetch_size		= intel_i9xx_fetch_size,
1525 	.cleanup		= intel_i915_cleanup,
1526 	.mask_memory		= intel_i965_mask_memory,
1527 	.masks			= intel_i810_masks,
1528 	.agp_enable		= intel_i810_agp_enable,
1529 	.cache_flush		= global_cache_flush,
1530 	.create_gatt_table	= intel_i915_create_gatt_table,
1531 	.free_gatt_table	= intel_i830_free_gatt_table,
1532 	.insert_memory		= intel_i915_insert_entries,
1533 	.remove_memory		= intel_i915_remove_entries,
1534 	.alloc_by_type		= intel_i830_alloc_by_type,
1535 	.free_by_type		= intel_i810_free_by_type,
1536 	.agp_alloc_page		= agp_generic_alloc_page,
1537 	.agp_alloc_pages        = agp_generic_alloc_pages,
1538 	.agp_destroy_page	= agp_generic_destroy_page,
1539 	.agp_destroy_pages      = agp_generic_destroy_pages,
1540 	.agp_type_to_mask_type	= intel_i830_type_to_mask_type,
1541 	.chipset_flush		= intel_i915_chipset_flush,
1542 #ifdef USE_PCI_DMA_API
1543 	.agp_map_page		= intel_agp_map_page,
1544 	.agp_unmap_page		= intel_agp_unmap_page,
1545 	.agp_map_memory		= intel_agp_map_memory,
1546 	.agp_unmap_memory	= intel_agp_unmap_memory,
1547 #endif
1548 };
1549