xref: /linux/arch/sparc/mm/hugetlbpage.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * SPARC64 Huge TLB page support.
3  *
4  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/pagemap.h>
11 #include <linux/sysctl.h>
12 
13 #include <asm/mman.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/cacheflush.h>
18 #include <asm/mmu_context.h>
19 
20 /* Slightly simplified from the non-hugepage variant because by
21  * definition we don't have to worry about any page coloring stuff
22  */
23 
24 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
25 							unsigned long addr,
26 							unsigned long len,
27 							unsigned long pgoff,
28 							unsigned long flags)
29 {
30 	unsigned long task_size = TASK_SIZE;
31 	struct vm_unmapped_area_info info;
32 
33 	if (test_thread_flag(TIF_32BIT))
34 		task_size = STACK_TOP32;
35 
36 	info.flags = 0;
37 	info.length = len;
38 	info.low_limit = TASK_UNMAPPED_BASE;
39 	info.high_limit = min(task_size, VA_EXCLUDE_START);
40 	info.align_mask = PAGE_MASK & ~HPAGE_MASK;
41 	info.align_offset = 0;
42 	addr = vm_unmapped_area(&info);
43 
44 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
45 		VM_BUG_ON(addr != -ENOMEM);
46 		info.low_limit = VA_EXCLUDE_END;
47 		info.high_limit = task_size;
48 		addr = vm_unmapped_area(&info);
49 	}
50 
51 	return addr;
52 }
53 
54 static unsigned long
55 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
56 				  const unsigned long len,
57 				  const unsigned long pgoff,
58 				  const unsigned long flags)
59 {
60 	struct mm_struct *mm = current->mm;
61 	unsigned long addr = addr0;
62 	struct vm_unmapped_area_info info;
63 
64 	/* This should only ever run for 32-bit processes.  */
65 	BUG_ON(!test_thread_flag(TIF_32BIT));
66 
67 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
68 	info.length = len;
69 	info.low_limit = PAGE_SIZE;
70 	info.high_limit = mm->mmap_base;
71 	info.align_mask = PAGE_MASK & ~HPAGE_MASK;
72 	info.align_offset = 0;
73 	addr = vm_unmapped_area(&info);
74 
75 	/*
76 	 * A failed mmap() very likely causes application failure,
77 	 * so fall back to the bottom-up function here. This scenario
78 	 * can happen with large stack limits and large mmap()
79 	 * allocations.
80 	 */
81 	if (addr & ~PAGE_MASK) {
82 		VM_BUG_ON(addr != -ENOMEM);
83 		info.flags = 0;
84 		info.low_limit = TASK_UNMAPPED_BASE;
85 		info.high_limit = STACK_TOP32;
86 		addr = vm_unmapped_area(&info);
87 	}
88 
89 	return addr;
90 }
91 
92 unsigned long
93 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
94 		unsigned long len, unsigned long pgoff, unsigned long flags)
95 {
96 	struct mm_struct *mm = current->mm;
97 	struct vm_area_struct *vma;
98 	unsigned long task_size = TASK_SIZE;
99 
100 	if (test_thread_flag(TIF_32BIT))
101 		task_size = STACK_TOP32;
102 
103 	if (len & ~HPAGE_MASK)
104 		return -EINVAL;
105 	if (len > task_size)
106 		return -ENOMEM;
107 
108 	if (flags & MAP_FIXED) {
109 		if (prepare_hugepage_range(file, addr, len))
110 			return -EINVAL;
111 		return addr;
112 	}
113 
114 	if (addr) {
115 		addr = ALIGN(addr, HPAGE_SIZE);
116 		vma = find_vma(mm, addr);
117 		if (task_size - len >= addr &&
118 		    (!vma || addr + len <= vma->vm_start))
119 			return addr;
120 	}
121 	if (mm->get_unmapped_area == arch_get_unmapped_area)
122 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
123 				pgoff, flags);
124 	else
125 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
126 				pgoff, flags);
127 }
128 
129 pte_t *huge_pte_alloc(struct mm_struct *mm,
130 			unsigned long addr, unsigned long sz)
131 {
132 	pgd_t *pgd;
133 	pud_t *pud;
134 	pmd_t *pmd;
135 	pte_t *pte = NULL;
136 
137 	/* We must align the address, because our caller will run
138 	 * set_huge_pte_at() on whatever we return, which writes out
139 	 * all of the sub-ptes for the hugepage range.  So we have
140 	 * to give it the first such sub-pte.
141 	 */
142 	addr &= HPAGE_MASK;
143 
144 	pgd = pgd_offset(mm, addr);
145 	pud = pud_alloc(mm, pgd, addr);
146 	if (pud) {
147 		pmd = pmd_alloc(mm, pud, addr);
148 		if (pmd)
149 			pte = pte_alloc_map(mm, NULL, pmd, addr);
150 	}
151 	return pte;
152 }
153 
154 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
155 {
156 	pgd_t *pgd;
157 	pud_t *pud;
158 	pmd_t *pmd;
159 	pte_t *pte = NULL;
160 
161 	addr &= HPAGE_MASK;
162 
163 	pgd = pgd_offset(mm, addr);
164 	if (!pgd_none(*pgd)) {
165 		pud = pud_offset(pgd, addr);
166 		if (!pud_none(*pud)) {
167 			pmd = pmd_offset(pud, addr);
168 			if (!pmd_none(*pmd))
169 				pte = pte_offset_map(pmd, addr);
170 		}
171 	}
172 	return pte;
173 }
174 
175 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
176 {
177 	return 0;
178 }
179 
180 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
181 		     pte_t *ptep, pte_t entry)
182 {
183 	int i;
184 
185 	if (!pte_present(*ptep) && pte_present(entry))
186 		mm->context.huge_pte_count++;
187 
188 	addr &= HPAGE_MASK;
189 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
190 		set_pte_at(mm, addr, ptep, entry);
191 		ptep++;
192 		addr += PAGE_SIZE;
193 		pte_val(entry) += PAGE_SIZE;
194 	}
195 }
196 
197 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
198 			      pte_t *ptep)
199 {
200 	pte_t entry;
201 	int i;
202 
203 	entry = *ptep;
204 	if (pte_present(entry))
205 		mm->context.huge_pte_count--;
206 
207 	addr &= HPAGE_MASK;
208 
209 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
210 		pte_clear(mm, addr, ptep);
211 		addr += PAGE_SIZE;
212 		ptep++;
213 	}
214 
215 	return entry;
216 }
217 
218 int pmd_huge(pmd_t pmd)
219 {
220 	return 0;
221 }
222 
223 int pud_huge(pud_t pud)
224 {
225 	return 0;
226 }
227