pgtable_32.c (36d99df2fb474222ab47fbe8ae7385661033223b) | pgtable_32.c (4f76cd382213b29dd3658e3e1ea47c0c2be06f3c) |
---|---|
1#include <linux/sched.h> 2#include <linux/kernel.h> 3#include <linux/errno.h> 4#include <linux/mm.h> 5#include <linux/nmi.h> 6#include <linux/swap.h> 7#include <linux/smp.h> 8#include <linux/highmem.h> --- 159 unchanged lines hidden (view full) --- 168{ 169 BUG_ON(fixmaps > 0); 170 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", 171 (int)-reserve); 172 __FIXADDR_TOP = -reserve - PAGE_SIZE; 173 __VMALLOC_RESERVE += reserve; 174} 175 | 1#include <linux/sched.h> 2#include <linux/kernel.h> 3#include <linux/errno.h> 4#include <linux/mm.h> 5#include <linux/nmi.h> 6#include <linux/swap.h> 7#include <linux/smp.h> 8#include <linux/highmem.h> --- 159 unchanged lines hidden (view full) --- 168{ 169 BUG_ON(fixmaps > 0); 170 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", 171 (int)-reserve); 172 __FIXADDR_TOP = -reserve - PAGE_SIZE; 173 __VMALLOC_RESERVE += reserve; 174} 175 |
176pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 177{ 178 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 179} 180 181pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 182{ 183 struct page *pte; 184 185#ifdef CONFIG_HIGHPTE 186 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); 187#else 188 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 189#endif 190 if (pte) 191 pgtable_page_ctor(pte); 192 return pte; 193} 194 195/* 196 * List of all pgd's needed for non-PAE so it can invalidate entries 197 * in both cached and uncached pgd's; not needed for PAE since the 198 * kernel pmd is shared. If PAE were not to share the pmd a similar 199 * tactic would be needed. This is essentially codepath-based locking 200 * against pageattr.c; it is the unique case in which a valid change 201 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 202 * vmalloc faults work because attached pagetables are never freed. 203 * -- wli 204 */ 205static inline void pgd_list_add(pgd_t *pgd) 206{ 207 struct page *page = virt_to_page(pgd); 208 209 list_add(&page->lru, &pgd_list); 210} 211 212static inline void pgd_list_del(pgd_t *pgd) 213{ 214 struct page *page = virt_to_page(pgd); 215 216 list_del(&page->lru); 217} 218 219#define UNSHARED_PTRS_PER_PGD \ 220 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) 221 222static void pgd_ctor(void *p) 223{ 224 pgd_t *pgd = p; 225 unsigned long flags; 226 227 /* Clear usermode parts of PGD */ 228 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 229 230 spin_lock_irqsave(&pgd_lock, flags); 231 232 /* If the pgd points to a shared pagetable level (either the 233 ptes in non-PAE, or shared PMD in PAE), then just copy the 234 references from swapper_pg_dir. */ 235 if (PAGETABLE_LEVELS == 2 || 236 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { 237 clone_pgd_range(pgd + USER_PTRS_PER_PGD, 238 swapper_pg_dir + USER_PTRS_PER_PGD, 239 KERNEL_PGD_PTRS); 240 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, 241 __pa(swapper_pg_dir) >> PAGE_SHIFT, 242 USER_PTRS_PER_PGD, 243 KERNEL_PGD_PTRS); 244 } 245 246 /* list required to sync kernel mapping updates */ 247 if (!SHARED_KERNEL_PMD) 248 pgd_list_add(pgd); 249 250 spin_unlock_irqrestore(&pgd_lock, flags); 251} 252 253static void pgd_dtor(void *pgd) 254{ 255 unsigned long flags; /* can be called from interrupt context */ 256 257 if (SHARED_KERNEL_PMD) 258 return; 259 260 spin_lock_irqsave(&pgd_lock, flags); 261 pgd_list_del(pgd); 262 spin_unlock_irqrestore(&pgd_lock, flags); 263} 264 265#ifdef CONFIG_X86_PAE 266/* 267 * Mop up any pmd pages which may still be attached to the pgd. 268 * Normally they will be freed by munmap/exit_mmap, but any pmd we 269 * preallocate which never got a corresponding vma will need to be 270 * freed manually. 271 */ 272static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) 273{ 274 int i; 275 276 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { 277 pgd_t pgd = pgdp[i]; 278 279 if (pgd_val(pgd) != 0) { 280 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 281 282 pgdp[i] = native_make_pgd(0); 283 284 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); 285 pmd_free(mm, pmd); 286 } 287 } 288} 289 290/* 291 * In PAE mode, we need to do a cr3 reload (=tlb flush) when 292 * updating the top-level pagetable entries to guarantee the 293 * processor notices the update. Since this is expensive, and 294 * all 4 top-level entries are used almost immediately in a 295 * new process's life, we just pre-populate them here. 296 * 297 * Also, if we're in a paravirt environment where the kernel pmd is 298 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate 299 * and initialize the kernel pmds here. 300 */ 301static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) 302{ 303 pud_t *pud; 304 unsigned long addr; 305 int i; 306 307 pud = pud_offset(pgd, 0); 308 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; 309 i++, pud++, addr += PUD_SIZE) { 310 pmd_t *pmd = pmd_alloc_one(mm, addr); 311 312 if (!pmd) { 313 pgd_mop_up_pmds(mm, pgd); 314 return 0; 315 } 316 317 if (i >= USER_PTRS_PER_PGD) 318 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), 319 sizeof(pmd_t) * PTRS_PER_PMD); 320 321 pud_populate(mm, pud, pmd); 322 } 323 324 return 1; 325} 326#else /* !CONFIG_X86_PAE */ 327/* No need to prepopulate any pagetable entries in non-PAE modes. */ 328static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) 329{ 330 return 1; 331} 332 333static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) 334{ 335} 336#endif /* CONFIG_X86_PAE */ 337 338pgd_t *pgd_alloc(struct mm_struct *mm) 339{ 340 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 341 342 /* so that alloc_pd can use it */ 343 mm->pgd = pgd; 344 if (pgd) 345 pgd_ctor(pgd); 346 347 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { 348 pgd_dtor(pgd); 349 free_page((unsigned long)pgd); 350 pgd = NULL; 351 } 352 353 return pgd; 354} 355 356void pgd_free(struct mm_struct *mm, pgd_t *pgd) 357{ 358 pgd_mop_up_pmds(mm, pgd); 359 pgd_dtor(pgd); 360 free_page((unsigned long)pgd); 361} 362 | |
363void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 364{ 365 pgtable_page_dtor(pte); 366 paravirt_release_pt(page_to_pfn(pte)); 367 tlb_remove_page(tlb, pte); 368} 369 370#ifdef CONFIG_X86_PAE --- 15 unchanged lines hidden --- | 176void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 177{ 178 pgtable_page_dtor(pte); 179 paravirt_release_pt(page_to_pfn(pte)); 180 tlb_remove_page(tlb, pte); 181} 182 183#ifdef CONFIG_X86_PAE --- 15 unchanged lines hidden --- |