memory.c (1da177e4c3f41524e886b7f1b8a0c1fc7321cac2) memory.c (e0da382c92626ad1d7f4b7527d19b80104d67a83)
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of

--- 96 unchanged lines hidden (view full) ---

105 pmd_ERROR(*pmd);
106 pmd_clear(pmd);
107}
108
109/*
110 * Note: this doesn't free the actual pages themselves. That
111 * has been handled earlier when unmapping all the memory regions.
112 */
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of

--- 96 unchanged lines hidden (view full) ---

105 pmd_ERROR(*pmd);
106 pmd_clear(pmd);
107}
108
109/*
110 * Note: this doesn't free the actual pages themselves. That
111 * has been handled earlier when unmapping all the memory regions.
112 */
113static inline void clear_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
114 unsigned long addr, unsigned long end)
113static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
115{
114{
116 if (!((addr | end) & ~PMD_MASK)) {
117 /* Only free fully aligned ranges */
118 struct page *page = pmd_page(*pmd);
119 pmd_clear(pmd);
120 dec_page_state(nr_page_table_pages);
121 tlb->mm->nr_ptes--;
122 pte_free_tlb(tlb, page);
123 }
115 struct page *page = pmd_page(*pmd);
116 pmd_clear(pmd);
117 pte_free_tlb(tlb, page);
118 dec_page_state(nr_page_table_pages);
119 tlb->mm->nr_ptes--;
124}
125
120}
121
126static inline void clear_pmd_range(struct mmu_gather *tlb, pud_t *pud,
127 unsigned long addr, unsigned long end)
122static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
123 unsigned long addr, unsigned long end,
124 unsigned long floor, unsigned long ceiling)
128{
129 pmd_t *pmd;
130 unsigned long next;
125{
126 pmd_t *pmd;
127 unsigned long next;
131 pmd_t *empty_pmd = NULL;
128 unsigned long start;
132
129
130 start = addr;
133 pmd = pmd_offset(pud, addr);
131 pmd = pmd_offset(pud, addr);
134
135 /* Only free fully aligned ranges */
136 if (!((addr | end) & ~PUD_MASK))
137 empty_pmd = pmd;
138 do {
139 next = pmd_addr_end(addr, end);
140 if (pmd_none_or_clear_bad(pmd))
141 continue;
132 do {
133 next = pmd_addr_end(addr, end);
134 if (pmd_none_or_clear_bad(pmd))
135 continue;
142 clear_pte_range(tlb, pmd, addr, next);
136 free_pte_range(tlb, pmd);
143 } while (pmd++, addr = next, addr != end);
144
137 } while (pmd++, addr = next, addr != end);
138
145 if (empty_pmd) {
146 pud_clear(pud);
147 pmd_free_tlb(tlb, empty_pmd);
139 start &= PUD_MASK;
140 if (start < floor)
141 return;
142 if (ceiling) {
143 ceiling &= PUD_MASK;
144 if (!ceiling)
145 return;
148 }
146 }
147 if (end - 1 > ceiling - 1)
148 return;
149
150 pmd = pmd_offset(pud, start);
151 pud_clear(pud);
152 pmd_free_tlb(tlb, pmd);
149}
150
153}
154
151static inline void clear_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
152 unsigned long addr, unsigned long end)
155static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
156 unsigned long addr, unsigned long end,
157 unsigned long floor, unsigned long ceiling)
153{
154 pud_t *pud;
155 unsigned long next;
158{
159 pud_t *pud;
160 unsigned long next;
156 pud_t *empty_pud = NULL;
161 unsigned long start;
157
162
163 start = addr;
158 pud = pud_offset(pgd, addr);
164 pud = pud_offset(pgd, addr);
159
160 /* Only free fully aligned ranges */
161 if (!((addr | end) & ~PGDIR_MASK))
162 empty_pud = pud;
163 do {
164 next = pud_addr_end(addr, end);
165 if (pud_none_or_clear_bad(pud))
166 continue;
165 do {
166 next = pud_addr_end(addr, end);
167 if (pud_none_or_clear_bad(pud))
168 continue;
167 clear_pmd_range(tlb, pud, addr, next);
169 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
168 } while (pud++, addr = next, addr != end);
169
170 } while (pud++, addr = next, addr != end);
171
170 if (empty_pud) {
171 pgd_clear(pgd);
172 pud_free_tlb(tlb, empty_pud);
172 start &= PGDIR_MASK;
173 if (start < floor)
174 return;
175 if (ceiling) {
176 ceiling &= PGDIR_MASK;
177 if (!ceiling)
178 return;
173 }
179 }
180 if (end - 1 > ceiling - 1)
181 return;
182
183 pud = pud_offset(pgd, start);
184 pgd_clear(pgd);
185 pud_free_tlb(tlb, pud);
174}
175
176/*
186}
187
188/*
177 * This function clears user-level page tables of a process.
178 * Unlike other pagetable walks, some memory layouts might give end 0.
189 * This function frees user-level page tables of a process.
190 *
179 * Must be called with pagetable lock held.
180 */
191 * Must be called with pagetable lock held.
192 */
181void clear_page_range(struct mmu_gather *tlb,
182 unsigned long addr, unsigned long end)
193static inline void free_pgd_range(struct mmu_gather *tlb,
194 unsigned long addr, unsigned long end,
195 unsigned long floor, unsigned long ceiling)
183{
184 pgd_t *pgd;
185 unsigned long next;
196{
197 pgd_t *pgd;
198 unsigned long next;
199 unsigned long start;
186
200
201 /*
202 * The next few lines have given us lots of grief...
203 *
204 * Why are we testing PMD* at this top level? Because often
205 * there will be no work to do at all, and we'd prefer not to
206 * go all the way down to the bottom just to discover that.
207 *
208 * Why all these "- 1"s? Because 0 represents both the bottom
209 * of the address space and the top of it (using -1 for the
210 * top wouldn't help much: the masks would do the wrong thing).
211 * The rule is that addr 0 and floor 0 refer to the bottom of
212 * the address space, but end 0 and ceiling 0 refer to the top
213 * Comparisons need to use "end - 1" and "ceiling - 1" (though
214 * that end 0 case should be mythical).
215 *
216 * Wherever addr is brought up or ceiling brought down, we must
217 * be careful to reject "the opposite 0" before it confuses the
218 * subsequent tests. But what about where end is brought down
219 * by PMD_SIZE below? no, end can't go down to 0 there.
220 *
221 * Whereas we round start (addr) and ceiling down, by different
222 * masks at different levels, in order to test whether a table
223 * now has no other vmas using it, so can be freed, we don't
224 * bother to round floor or end up - the tests don't need that.
225 */
226
227 addr &= PMD_MASK;
228 if (addr < floor) {
229 addr += PMD_SIZE;
230 if (!addr)
231 return;
232 }
233 if (ceiling) {
234 ceiling &= PMD_MASK;
235 if (!ceiling)
236 return;
237 }
238 if (end - 1 > ceiling - 1)
239 end -= PMD_SIZE;
240 if (addr > end - 1)
241 return;
242
243 start = addr;
187 pgd = pgd_offset(tlb->mm, addr);
188 do {
189 next = pgd_addr_end(addr, end);
190 if (pgd_none_or_clear_bad(pgd))
191 continue;
244 pgd = pgd_offset(tlb->mm, addr);
245 do {
246 next = pgd_addr_end(addr, end);
247 if (pgd_none_or_clear_bad(pgd))
248 continue;
192 clear_pud_range(tlb, pgd, addr, next);
249 free_pud_range(tlb, pgd, addr, next, floor, ceiling);
193 } while (pgd++, addr = next, addr != end);
250 } while (pgd++, addr = next, addr != end);
251
252 if (!tlb_is_full_mm(tlb))
253 flush_tlb_pgtables(tlb->mm, start, end);
194}
195
254}
255
256void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257 unsigned long floor, unsigned long ceiling)
258{
259 while (vma) {
260 struct vm_area_struct *next = vma->vm_next;
261 unsigned long addr = vma->vm_start;
262
263 /* Optimization: gather nearby vmas into a single call down */
264 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
265 vma = next;
266 next = vma->vm_next;
267 }
268 free_pgd_range(*tlb, addr, vma->vm_end,
269 floor, next? next->vm_start: ceiling);
270 vma = next;
271 }
272}
273
196pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
197{
198 if (!pmd_present(*pmd)) {
199 struct page *new;
200
201 spin_unlock(&mm->page_table_lock);
202 new = pte_alloc_one(mm, address);
203 spin_lock(&mm->page_table_lock);

--- 1962 unchanged lines hidden ---
274pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
275{
276 if (!pmd_present(*pmd)) {
277 struct page *new;
278
279 spin_unlock(&mm->page_table_lock);
280 new = pte_alloc_one(mm, address);
281 spin_lock(&mm->page_table_lock);

--- 1962 unchanged lines hidden ---