1 /* 2 * Hibernation support specific for i386 - temporary page tables 3 * 4 * Distribute under GPLv2 5 * 6 * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> 7 */ 8 9 #include <linux/gfp.h> 10 #include <linux/suspend.h> 11 #include <linux/bootmem.h> 12 13 #include <asm/page.h> 14 #include <asm/pgtable.h> 15 #include <asm/mmzone.h> 16 #include <asm/sections.h> 17 18 /* Defined in hibernate_asm_32.S */ 19 extern int restore_image(void); 20 21 /* Pointer to the temporary resume page tables */ 22 pgd_t *resume_pg_dir; 23 24 /* The following three functions are based on the analogous code in 25 * arch/x86/mm/init_32.c 26 */ 27 28 /* 29 * Create a middle page table on a resume-safe page and put a pointer to it in 30 * the given global directory entry. This only returns the gd entry 31 * in non-PAE compilation mode, since the middle layer is folded. 32 */ 33 static pmd_t *resume_one_md_table_init(pgd_t *pgd) 34 { 35 pud_t *pud; 36 pmd_t *pmd_table; 37 38 #ifdef CONFIG_X86_PAE 39 pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); 40 if (!pmd_table) 41 return NULL; 42 43 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 44 pud = pud_offset(pgd, 0); 45 46 BUG_ON(pmd_table != pmd_offset(pud, 0)); 47 #else 48 pud = pud_offset(pgd, 0); 49 pmd_table = pmd_offset(pud, 0); 50 #endif 51 52 return pmd_table; 53 } 54 55 /* 56 * Create a page table on a resume-safe page and place a pointer to it in 57 * a middle page directory entry. 58 */ 59 static pte_t *resume_one_page_table_init(pmd_t *pmd) 60 { 61 if (pmd_none(*pmd)) { 62 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); 63 if (!page_table) 64 return NULL; 65 66 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 67 68 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 69 70 return page_table; 71 } 72 73 return pte_offset_kernel(pmd, 0); 74 } 75 76 /* 77 * This maps the physical memory to kernel virtual address space, a total 78 * of max_low_pfn pages, by creating page tables starting from address 79 * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. 80 */ 81 static int resume_physical_mapping_init(pgd_t *pgd_base) 82 { 83 unsigned long pfn; 84 pgd_t *pgd; 85 pmd_t *pmd; 86 pte_t *pte; 87 int pgd_idx, pmd_idx; 88 89 pgd_idx = pgd_index(PAGE_OFFSET); 90 pgd = pgd_base + pgd_idx; 91 pfn = 0; 92 93 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { 94 pmd = resume_one_md_table_init(pgd); 95 if (!pmd) 96 return -ENOMEM; 97 98 if (pfn >= max_low_pfn) 99 continue; 100 101 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { 102 if (pfn >= max_low_pfn) 103 break; 104 105 /* Map with big pages if possible, otherwise create 106 * normal page tables. 107 * NOTE: We can mark everything as executable here 108 */ 109 if (cpu_has_pse) { 110 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); 111 pfn += PTRS_PER_PTE; 112 } else { 113 pte_t *max_pte; 114 115 pte = resume_one_page_table_init(pmd); 116 if (!pte) 117 return -ENOMEM; 118 119 max_pte = pte + PTRS_PER_PTE; 120 for (; pte < max_pte; pte++, pfn++) { 121 if (pfn >= max_low_pfn) 122 break; 123 124 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); 125 } 126 } 127 } 128 } 129 130 return 0; 131 } 132 133 static inline void resume_init_first_level_page_table(pgd_t *pg_dir) 134 { 135 #ifdef CONFIG_X86_PAE 136 int i; 137 138 /* Init entries of the first-level page table to the zero page */ 139 for (i = 0; i < PTRS_PER_PGD; i++) 140 set_pgd(pg_dir + i, 141 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); 142 #endif 143 } 144 145 int swsusp_arch_resume(void) 146 { 147 int error; 148 149 resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); 150 if (!resume_pg_dir) 151 return -ENOMEM; 152 153 resume_init_first_level_page_table(resume_pg_dir); 154 error = resume_physical_mapping_init(resume_pg_dir); 155 if (error) 156 return error; 157 158 /* We have got enough memory and from now on we cannot recover */ 159 restore_image(); 160 return 0; 161 } 162 163 /* 164 * pfn_is_nosave - check if given pfn is in the 'nosave' section 165 */ 166 167 int pfn_is_nosave(unsigned long pfn) 168 { 169 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; 170 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; 171 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); 172 } 173