xref: /linux/arch/mips/mm/highmem.c (revision 2c9e703c618106f5383226fbb1f526cb11034f8a)
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <asm/fixmap.h>
4 #include <asm/tlbflush.h>
5 
6 static pte_t *kmap_pte;
7 
8 unsigned long highstart_pfn, highend_pfn;
9 
10 void *__kmap(struct page *page)
11 {
12 	void *addr;
13 
14 	might_sleep();
15 	if (!PageHighMem(page))
16 		return page_address(page);
17 	addr = kmap_high(page);
18 	flush_tlb_one((unsigned long)addr);
19 
20 	return addr;
21 }
22 EXPORT_SYMBOL(__kmap);
23 
24 void __kunmap(struct page *page)
25 {
26 	BUG_ON(in_interrupt());
27 	if (!PageHighMem(page))
28 		return;
29 	kunmap_high(page);
30 }
31 EXPORT_SYMBOL(__kunmap);
32 
33 /*
34  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
35  * no global lock is needed and because the kmap code must perform a global TLB
36  * invalidation when the kmap pool wraps.
37  *
38  * However when holding an atomic kmap is is not legal to sleep, so atomic
39  * kmaps are appropriate for short, tight code paths only.
40  */
41 
42 void *__kmap_atomic(struct page *page, enum km_type type)
43 {
44 	enum fixed_addresses idx;
45 	unsigned long vaddr;
46 
47 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
48 	pagefault_disable();
49 	if (!PageHighMem(page))
50 		return page_address(page);
51 
52 	debug_kmap_atomic(type);
53 	idx = type + KM_TYPE_NR*smp_processor_id();
54 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 #ifdef CONFIG_DEBUG_HIGHMEM
56 	BUG_ON(!pte_none(*(kmap_pte - idx)));
57 #endif
58 	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
59 	local_flush_tlb_one((unsigned long)vaddr);
60 
61 	return (void*) vaddr;
62 }
63 EXPORT_SYMBOL(__kmap_atomic);
64 
65 void __kunmap_atomic(void *kvaddr, enum km_type type)
66 {
67 #ifdef CONFIG_DEBUG_HIGHMEM
68 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
69 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
70 
71 	if (vaddr < FIXADDR_START) { // FIXME
72 		pagefault_enable();
73 		return;
74 	}
75 
76 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
77 
78 	/*
79 	 * force other mappings to Oops if they'll try to access
80 	 * this pte without first remap it
81 	 */
82 	pte_clear(&init_mm, vaddr, kmap_pte-idx);
83 	local_flush_tlb_one(vaddr);
84 #endif
85 
86 	pagefault_enable();
87 }
88 EXPORT_SYMBOL(__kunmap_atomic);
89 
90 /*
91  * This is the same as kmap_atomic() but can map memory that doesn't
92  * have a struct page associated with it.
93  */
94 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
95 {
96 	enum fixed_addresses idx;
97 	unsigned long vaddr;
98 
99 	pagefault_disable();
100 
101 	debug_kmap_atomic(type);
102 	idx = type + KM_TYPE_NR*smp_processor_id();
103 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
104 	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
105 	flush_tlb_one(vaddr);
106 
107 	return (void*) vaddr;
108 }
109 
110 struct page *__kmap_atomic_to_page(void *ptr)
111 {
112 	unsigned long idx, vaddr = (unsigned long)ptr;
113 	pte_t *pte;
114 
115 	if (vaddr < FIXADDR_START)
116 		return virt_to_page(ptr);
117 
118 	idx = virt_to_fix(vaddr);
119 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
120 	return pte_page(*pte);
121 }
122 
123 void __init kmap_init(void)
124 {
125 	unsigned long kmap_vstart;
126 
127 	/* cache the first kmap pte */
128 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
129 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
130 }
131