xref: /linux/arch/x86/mm/highmem_32.c (revision bcefe12eff5dca6fdfa94ed85e5bee66380d5cd9)
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4 
5 void *kmap(struct page *page)
6 {
7 	might_sleep();
8 	if (!PageHighMem(page))
9 		return page_address(page);
10 	return kmap_high(page);
11 }
12 
13 void kunmap(struct page *page)
14 {
15 	if (in_interrupt())
16 		BUG();
17 	if (!PageHighMem(page))
18 		return;
19 	kunmap_high(page);
20 }
21 
22 /*
23  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
24  * no global lock is needed and because the kmap code must perform a global TLB
25  * invalidation when the kmap pool wraps.
26  *
27  * However when holding an atomic kmap it is not legal to sleep, so atomic
28  * kmaps are appropriate for short, tight code paths only.
29  */
30 void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
31 {
32 	enum fixed_addresses idx;
33 	unsigned long vaddr;
34 
35 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
36 	pagefault_disable();
37 
38 	if (!PageHighMem(page))
39 		return page_address(page);
40 
41 	debug_kmap_atomic(type);
42 
43 	idx = type + KM_TYPE_NR*smp_processor_id();
44 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45 	BUG_ON(!pte_none(*(kmap_pte-idx)));
46 	set_pte(kmap_pte-idx, mk_pte(page, prot));
47 
48 	return (void *)vaddr;
49 }
50 
51 void *kmap_atomic(struct page *page, enum km_type type)
52 {
53 	return kmap_atomic_prot(page, type, kmap_prot);
54 }
55 
56 void kunmap_atomic(void *kvaddr, enum km_type type)
57 {
58 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
59 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
60 
61 	/*
62 	 * Force other mappings to Oops if they'll try to access this pte
63 	 * without first remap it.  Keeping stale mappings around is a bad idea
64 	 * also, in case the page changes cacheability attributes or becomes
65 	 * a protected page in a hypervisor.
66 	 */
67 	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
68 		kpte_clear_flush(kmap_pte-idx, vaddr);
69 	else {
70 #ifdef CONFIG_DEBUG_HIGHMEM
71 		BUG_ON(vaddr < PAGE_OFFSET);
72 		BUG_ON(vaddr >= (unsigned long)high_memory);
73 #endif
74 	}
75 
76 	pagefault_enable();
77 }
78 
79 /*
80  * This is the same as kmap_atomic() but can map memory that doesn't
81  * have a struct page associated with it.
82  */
83 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
84 {
85 	return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
86 }
87 EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
88 
89 struct page *kmap_atomic_to_page(void *ptr)
90 {
91 	unsigned long idx, vaddr = (unsigned long)ptr;
92 	pte_t *pte;
93 
94 	if (vaddr < FIXADDR_START)
95 		return virt_to_page(ptr);
96 
97 	idx = virt_to_fix(vaddr);
98 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
99 	return pte_page(*pte);
100 }
101 
102 EXPORT_SYMBOL(kmap);
103 EXPORT_SYMBOL(kunmap);
104 EXPORT_SYMBOL(kmap_atomic);
105 EXPORT_SYMBOL(kunmap_atomic);
106 EXPORT_SYMBOL(kmap_atomic_prot);
107 EXPORT_SYMBOL(kmap_atomic_to_page);
108 
109 void __init set_highmem_pages_init(void)
110 {
111 	struct zone *zone;
112 	int nid;
113 
114 	for_each_zone(zone) {
115 		unsigned long zone_start_pfn, zone_end_pfn;
116 
117 		if (!is_highmem(zone))
118 			continue;
119 
120 		zone_start_pfn = zone->zone_start_pfn;
121 		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
122 
123 		nid = zone_to_nid(zone);
124 		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
125 				zone->name, nid, zone_start_pfn, zone_end_pfn);
126 
127 		add_highpages_with_active_regions(nid, zone_start_pfn,
128 				 zone_end_pfn);
129 	}
130 	totalram_pages += totalhigh_pages;
131 }
132